code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from entity_reader import EntityReader
import textract
from dataset_importer.utils import HandleDatasetImportException
class DocXReader(EntityReader):
@staticmethod
def get_features(**kwargs):
directory = kwargs['directory']
for file_path in DocXReader.get_file_list(directory, 'docx'):
try:
features = DocXReader.get_meta_features(file_path=file_path)
features['text'] = textract.process(file_path).decode('utf8')
features['_texta_id'] = file_path
yield features
except Exception as e:
HandleDatasetImportException(kwargs, e, file_path=file_path)
@staticmethod
def count_total_documents(**kwargs):
directory = kwargs['directory']
return DocXReader.count_documents(root_directory=directory, extension='docx')
|
texta-tk/texta
|
dataset_importer/document_reader/readers/entity/docx_reader.py
|
Python
|
gpl-3.0
| 870
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue as Queue
import time
from ansible.errors import *
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.role import hash_params
from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['StrategyBase']
# FIXME: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.basedirs = _basedirs[:]
self.filter_loader = filter_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm.get_notified_handlers()
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = getattr(tqm._options, 'step', False)
self._diff = getattr(tqm._options, 'diff', False)
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
def run(self, iterator, play_context, result=True):
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = self._tqm._failed_hosts.keys()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
self._display.debug("running handlers")
result &= self.run_handlers(iterator, play_context)
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(self._tqm._failed_hosts.keys())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# send the stats callback
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
if len(unreachable_hosts) > 0:
return 3
elif len(failed_hosts) > 0:
return 2
elif not result:
return 1
else:
return 0
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
new_vars = vars.copy()
new_vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
new_vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
return new_vars
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
self._display.debug("entering _queue_task() for %s/%s" % (host, task))
# and then queue the new task
self._display.debug("%s - putting task (%s) in queue" % (host, task))
try:
self._display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
main_q.put((host, task, self._loader.get_basedir(), task_vars, play_context, shared_loader_obj), block=False)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
self._display.debug("got an error while queuing: %s" % e)
return
self._display.debug("exiting _queue_task() for %s/%s" % (host, task))
def _process_pending_results(self, iterator):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
while not self._final_q.empty() and not self._tqm._terminated:
try:
result = self._final_q.get(block=False)
self._display.debug("got result from result worker: %s" % ([unicode(x) for x in result],))
# all host status messages contain 2 entries: (msg, task_result)
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
task_result = result[1]
host = task_result._host
task = task_result._task
if result[0] == 'host_task_failed' or task_result.is_failed():
if not task.ignore_errors:
self._display.debug("marking %s as failed" % host.name)
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
else:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
elif result[0] == 'host_unreachable':
self._tqm._unreachable_hosts[host.name] = True
self._tqm._stats.increment('dark', host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif result[0] == 'host_task_skipped':
self._tqm._stats.increment('skipped', host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
elif result[0] == 'host_task_ok':
self._tqm._stats.increment('ok', host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', host.name)
self._tqm.send_callback('v2_runner_on_ok', task_result)
if self._diff and 'diff' in task_result._result:
self._tqm.send_callback('v2_on_file_diff', task_result)
self._pending_results -= 1
if host.name in self._blocked_hosts:
del self._blocked_hosts[host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[task_result._task._role._role_name].iteritems():
if role_obj._uuid == task_result._task._role._uuid:
role_obj._had_task_run[host.name] = True
ret_results.append(task_result)
elif result[0] == 'add_host':
task_result = result[1]
new_host_info = task_result.get('add_host', dict())
self._add_host(new_host_info)
elif result[0] == 'add_group':
task = result[1]
self._add_group(task, iterator)
elif result[0] == 'notify_handler':
task_result = result[1]
handler_name = result[2]
original_task = iterator.get_original_task(task_result._host, task_result._task)
if handler_name not in self._notified_handlers:
self._notified_handlers[handler_name] = []
if task_result._host not in self._notified_handlers[handler_name]:
self._notified_handlers[handler_name].append(task_result._host)
elif result[0] == 'register_host_var':
# essentially the same as 'set_host_var' below, however we
# never follow the delegate_to value for registered vars
host = result[1]
var_name = result[2]
var_value = result[3]
self._variable_manager.set_host_variable(host, var_name, var_value)
elif result[0] in ('set_host_var', 'set_host_facts'):
host = result[1]
task = result[2]
item = result[3]
if task.delegate_to is not None:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
if item is not None:
task_vars['item'] = item
templar = Templar(loader=self._loader, variables=task_vars)
host_name = templar.template(task.delegate_to)
target_host = self._inventory.get_host(host_name)
if target_host is None:
target_host = Host(name=host_name)
else:
target_host = host
if result[0] == 'set_host_var':
var_name = result[4]
var_value = result[5]
self._variable_manager.set_host_variable(target_host, var_name, var_value)
elif result[0] == 'set_host_facts':
facts = result[4]
self._variable_manager.set_host_facts(target_host, facts)
else:
raise AnsibleError("unknown result message received: %s" % result[0])
except Queue.Empty:
pass
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
self._display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
results = self._process_pending_results(iterator)
ret_results.extend(results)
time.sleep(0.01)
self._display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info):
'''
Helper function to add a new host to inventory based on a task result.
'''
host_name = host_info.get('host_name')
# Check if host in cache, add if not
if host_name in self._inventory._hosts_cache:
new_host = self._inventory._hosts_cache[host_name]
else:
new_host = Host(name=host_name)
self._inventory._hosts_cache[host_name] = new_host
allgroup = self._inventory.get_group('all')
allgroup.add_host(new_host)
# Set/update the vars for this host
# FIXME: probably should have a set vars method for the host?
new_vars = host_info.get('host_vars', dict())
new_host.vars.update(new_vars)
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if not self._inventory.get_group(group_name):
new_group = Group(group_name)
self._inventory.add_group(new_group)
new_group.vars = self._inventory.get_group_variables(group_name)
else:
new_group = self._inventory.get_group(group_name)
new_group.add_host(new_host)
# add this host to the group cache
if self._inventory._groups_list is not None:
if group_name in self._inventory._groups_list:
if new_host.name not in self._inventory._groups_list[group_name]:
self._inventory._groups_list[group_name].append(new_host.name)
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
# FIXME: is this still required?
self._inventory.clear_pattern_cache()
def _add_group(self, task, iterator):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
groups = {}
changed = False
for host in self._inventory.get_hosts():
original_task = iterator.get_original_task(host, task)
all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=original_task)
templar = Templar(loader=self._loader, variables=all_vars)
group_name = templar.template(original_task.args.get('key'))
if task.evaluate_conditional(templar=templar, all_vars=all_vars):
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(host)
for group_name, hosts in groups.iteritems():
new_group = self._inventory.get_group(group_name)
if not new_group:
# create the new group and add it to inventory
new_group = Group(name=group_name)
self._inventory.add_group(new_group)
# and add the group to the proper hierarchy
allgroup = self._inventory.get_group('all')
allgroup.add_child_group(new_group)
changed = True
for host in hosts:
if group_name not in host.get_groups():
new_group.add_host(host)
changed = True
return changed
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
except AnsibleError, e:
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
if not isinstance(data, list):
raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds)
block_list = load_list_of_blocks(
data,
play=included_file._task._block._play,
parent_block=included_file._task._block,
task_include=included_file._task,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader
)
# set the vars for this task from those specified as params to the include
for b in block_list:
temp_vars = b._task_include.vars.copy()
temp_vars.update(included_file._args.copy())
b._task_include.vars = temp_vars
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = True
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
handler_name = handler.get_name()
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
host_results = []
for host in self._notified_handlers[handler_name]:
if not handler.has_triggered(host) and (host.name not in self._tqm._failed_hosts or play_context.force_handlers):
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
#handler.flag_for_host(host)
results = self._process_pending_results(iterator)
host_results.extend(results)
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
# wipe the notification list
self._notified_handlers[handler_name] = []
try:
included_files = IncludedFile.process_include_results(
host_results,
self._tqm,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError, e:
return False
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
for task in block.block:
if task.name in self._notified_handlers:
for host in included_file._hosts:
if host.name not in self._notified_handlers[task.name]:
self._notified_handlers[task.name].append(host)
else:
self._notified_handlers[task.name] = included_file._hosts[:]
# and add the new blocks to the list of handler blocks
handler_block.block.extend(block.block)
#iterator._play.handlers.extend(new_blocks)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._display.warning(str(e))
continue
self._display.debug("done running handlers, result is: %s" % result)
return result
def _take_step(self, task, host=None):
ret=False
if host:
msg = u'Perform task: %s on %s (y/n/c): ' % (task, host)
else:
msg = u'Perform task: %s (y/n/c): ' % task
resp = self._display.prompt(msg)
if resp.lower() in ['y','yes']:
self._display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
self._display.debug("User ran task and cancled step mode")
self._step = False
ret = True
else:
self._display.debug("User skipped task")
self._display.banner(msg)
return ret
|
sharifmamun/ansible
|
lib/ansible/plugins/strategies/__init__.py
|
Python
|
gpl-3.0
| 23,205
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_postgresqlserver
version_added: "2.5"
short_description: Manage PostgreSQL Server instance.
description:
- Create, update and delete instance of PostgreSQL Server.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
name:
description:
- The name of the server.
required: True
sku:
description:
- The SKU (pricing tier) of the server.
suboptions:
name:
description:
- The name of the sku, typically, a letter + Number code, e.g. P3.
tier:
description:
- The tier of the particular SKU, e.g. Basic.
choices: ['basic', 'standard']
capacity:
description:
- "The scale up/out capacity, representing server's compute units."
size:
description:
- The size code, to be interpreted by resource as appropriate.
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
storage_mb:
description:
- The maximum storage allowed for a server.
version:
description:
- Server version.
choices: ['9.5', '9.6']
enforce_ssl:
description:
- Enable SSL enforcement.
type: bool
default: False
admin_username:
description:
- "The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation)."
admin_password:
description:
- The password of the administrator login.
create_mode:
description:
- Create mode of SQL Server
default: Default
state:
description:
- Assert the state of the PostgreSQL server. Use 'present' to create or update a server and 'absent' to delete it.
default: present
choices:
- present
- absent
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) PostgreSQL Server
azure_rm_postgresqlserver:
resource_group: TestGroup
name: testserver
sku:
name: PGSQLS100
tier: Basic
capacity: 100
location: eastus
storage_mb: 1024
enforce_ssl: True
admin_username: cloudsa
admin_password: password
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/12345678-1234-1234-1234-123412341234/resourceGroups/samplerg/providers/Microsoft.DBforPostgreSQL/servers/mysqlsrv1b6dd89593
version:
description:
- 'Server version. Possible values include: C(9.5), C(9.6)'
returned: always
type: str
sample: 9.6
state:
description:
- 'A state of a server that is visible to user. Possible values include: C(Ready), C(Dropping), C(Disabled)'
returned: always
type: str
sample: Ready
fully_qualified_domain_name:
description:
- The fully qualified domain name of a server.
returned: always
type: str
sample: postgresqlsrv1b6dd89593.postgresql.database.azure.com
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMServers(AzureRMModuleBase):
"""Configuration class for an Azure RM PostgreSQL Server resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
sku=dict(
type='dict'
),
location=dict(
type='str'
),
storage_mb=dict(
type='int'
),
version=dict(
type='str',
choices=['9.5', '9.6']
),
enforce_ssl=dict(
type='bool',
default=False
),
create_mode=dict(
type='str',
default='Default'
),
admin_username=dict(
type='str'
),
admin_password=dict(
type='str',
no_log=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMServers, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "sku":
ev = kwargs[key]
if 'tier' in ev:
if ev['tier'] == 'basic':
ev['tier'] = 'Basic'
elif ev['tier'] == 'standard':
ev['tier'] = 'Standard'
self.parameters["sku"] = ev
elif key == "location":
self.parameters["location"] = kwargs[key]
elif key == "storage_mb":
self.parameters.setdefault("properties", {})["storage_mb"] = kwargs[key]
elif key == "version":
self.parameters.setdefault("properties", {})["version"] = kwargs[key]
elif key == "enforce_ssl":
self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
elif key == "create_mode":
self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
elif key == "admin_username":
self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
elif key == "admin_password":
self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if "location" not in self.parameters:
self.parameters["location"] = resource_group.location
old_response = self.get_postgresqlserver()
if not old_response:
self.log("PostgreSQL Server instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("PostgreSQL Server instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if PostgreSQL Server instance has to be deleted or may be updated")
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the PostgreSQL Server instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_postgresqlserver()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("PostgreSQL Server instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_postgresqlserver()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_postgresqlserver():
time.sleep(20)
else:
self.log("PostgreSQL Server instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["version"] = response["version"]
self.results["state"] = response["user_visible_state"]
self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
return self.results
def create_update_postgresqlserver(self):
'''
Creates or updates PostgreSQL Server with the specified configuration.
:return: deserialized PostgreSQL Server instance state dictionary
'''
self.log("Creating / Updating the PostgreSQL Server instance {0}".format(self.name))
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.servers.create(resource_group_name=self.resource_group,
server_name=self.name,
parameters=self.parameters)
else:
response = self.mgmt_client.servers.update(resource_group_name=self.resource_group,
server_name=self.name,
parameters=self.parameters)
if isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the PostgreSQL Server instance.')
self.fail("Error creating the PostgreSQL Server instance: {0}".format(str(exc)))
return response.as_dict()
def delete_postgresqlserver(self):
'''
Deletes specified PostgreSQL Server instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the PostgreSQL Server instance {0}".format(self.name))
try:
response = self.mgmt_client.servers.delete(resource_group_name=self.resource_group,
server_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the PostgreSQL Server instance.')
self.fail("Error deleting the PostgreSQL Server instance: {0}".format(str(e)))
return True
def get_postgresqlserver(self):
'''
Gets the properties of the specified PostgreSQL Server.
:return: deserialized PostgreSQL Server instance state dictionary
'''
self.log("Checking if the PostgreSQL Server instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.servers.get(resource_group_name=self.resource_group,
server_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("PostgreSQL Server instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the PostgreSQL Server instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMServers()
if __name__ == '__main__':
main()
|
konstruktoid/ansible-upstream
|
lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py
|
Python
|
gpl-3.0
| 13,248
|
"""
Test some SBValue APIs.
"""
import os, time
import re
import unittest2
import lldb, lldbutil
from lldbtest import *
class ValueAPITestCase(TestBase):
mydir = os.path.join("python_api", "value")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
@python_api_test
@dsym_test
def test_with_dsym(self):
"""Exercise some SBValue APIs."""
d = {'EXE': self.exe_name}
self.buildDsym(dictionary=d)
self.setTearDownCleanup(dictionary=d)
self.value_api(self.exe_name)
@python_api_test
@dwarf_test
def test_with_dwarf(self):
"""Exercise some SBValue APIs."""
d = {'EXE': self.exe_name}
self.buildDwarf(dictionary=d)
self.setTearDownCleanup(dictionary=d)
self.value_api(self.exe_name)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# We'll use the test method name as the exe_name.
self.exe_name = self.testMethodName
# Find the line number to of function 'c'.
self.line = line_number('main.c', '// Break at this line')
def value_api(self, exe_name):
"""Exercise some SBValue APIs."""
exe = os.path.join(os.getcwd(), exe_name)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation('main.c', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
# Get Frame #0.
self.assertTrue(process.GetState() == lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint condition")
frame0 = thread.GetFrameAtIndex(0)
# Get global variable 'days_of_week'.
list = target.FindGlobalVariables('days_of_week', 1)
days_of_week = list.GetValueAtIndex(0)
self.assertTrue(days_of_week, VALID_VARIABLE)
self.assertTrue(days_of_week.GetNumChildren() == 7, VALID_VARIABLE)
self.DebugSBValue(days_of_week)
# Get global variable 'weekdays'.
list = target.FindGlobalVariables('weekdays', 1)
weekdays = list.GetValueAtIndex(0)
self.assertTrue(weekdays, VALID_VARIABLE)
self.assertTrue(weekdays.GetNumChildren() == 5, VALID_VARIABLE)
self.DebugSBValue(weekdays)
# Get global variable 'g_table'.
list = target.FindGlobalVariables('g_table', 1)
g_table = list.GetValueAtIndex(0)
self.assertTrue(g_table, VALID_VARIABLE)
self.assertTrue(g_table.GetNumChildren() == 2, VALID_VARIABLE)
self.DebugSBValue(g_table)
fmt = lldbutil.BasicFormatter()
cvf = lldbutil.ChildVisitingFormatter(indent_child=2)
rdf = lldbutil.RecursiveDecentFormatter(indent_child=2)
if self.TraceOn():
print fmt.format(days_of_week)
print cvf.format(days_of_week)
print cvf.format(weekdays)
print rdf.format(g_table)
# Get variable 'my_int_ptr'.
value = frame0.FindVariable('my_int_ptr')
self.assertTrue(value, VALID_VARIABLE)
self.DebugSBValue(value)
# Get what 'my_int_ptr' points to.
pointed = value.GetChildAtIndex(0)
self.assertTrue(pointed, VALID_VARIABLE)
self.DebugSBValue(pointed)
# While we are at it, verify that 'my_int_ptr' points to 'g_my_int'.
symbol = target.ResolveLoadAddress(int(pointed.GetLocation(), 0)).GetSymbol()
self.assertTrue(symbol)
self.expect(symbol.GetName(), exe=False,
startstr = 'g_my_int')
# Get variable 'str_ptr'.
value = frame0.FindVariable('str_ptr')
self.assertTrue(value, VALID_VARIABLE)
self.DebugSBValue(value)
# SBValue::TypeIsPointerType() should return true.
self.assertTrue(value.TypeIsPointerType())
# Verify the SBValue::GetByteSize() API is working correctly.
arch = self.getArchitecture()
if arch == 'i386':
self.assertTrue(value.GetByteSize() == 4)
elif arch == 'x86_64':
self.assertTrue(value.GetByteSize() == 8)
# Get child at index 5 => 'Friday'.
child = value.GetChildAtIndex(5, lldb.eNoDynamicValues, True)
self.assertTrue(child, VALID_VARIABLE)
self.DebugSBValue(child)
self.expect(child.GetSummary(), exe=False,
substrs = ['Friday'])
# Now try to get at the same variable using GetValueForExpressionPath().
# These two SBValue objects should have the same value.
val2 = value.GetValueForExpressionPath('[5]')
self.assertTrue(val2, VALID_VARIABLE)
self.DebugSBValue(val2)
self.assertTrue(child.GetValue() == val2.GetValue() and
child.GetSummary() == val2.GetSummary())
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
s20121035/rk3288_android5.1_repo
|
external/lldb/test/python_api/value/TestValueAPI.py
|
Python
|
gpl-3.0
| 5,371
|
import numpy as np
from mathutils import Matrix, Vector
import bpy
from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level
from sverchok.utils.curve import SvCurve, SvNormalTrack
class SvCurveZeroTwistFrameNode(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Curve Zero-Twist Frame
Tooltip: Calculate Zero-Twist Perpendicular frame for curve
"""
bl_idname = 'SvExCurveZeroTwistFrameNode'
bl_label = 'Curve Zero-Twist Frame'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_CURVE_FRAME'
resolution : IntProperty(
name = "Resolution",
min = 10, default = 50,
update = updateNode)
t_value : FloatProperty(
name = "T",
default = 0.5,
update = updateNode)
join : BoolProperty(
name = "Join",
description = "If enabled, join generated lists of matrices; otherwise, output separate list of matrices for each curve",
default = True,
update = updateNode)
def update_sockets(self, context):
self.outputs['CumulativeTorsion'].hide_safe = self.algorithm != 'FRENET'
updateNode(self, context)
algorithms = [
('FRENET', "Integrate torsion", "Subtract torsion integral from Frenet matrices", 0),
('TRACK', "Track normal", "Try to maintain constant normal direction by tracking it along the curve", 1)
]
algorithm : EnumProperty(
name = "Algorithm",
items = algorithms,
default = 'FRENET',
update = update_sockets)
def draw_buttons(self, context, layout):
layout.prop(self, 'algorithm', text='')
layout.prop(self, 'join', toggle=True)
def sv_init(self, context):
self.inputs.new('SvCurveSocket', "Curve")
self.inputs.new('SvStringsSocket', "Resolution").prop_name = 'resolution'
self.inputs.new('SvStringsSocket', "T").prop_name = 't_value'
self.outputs.new('SvStringsSocket', "CumulativeTorsion")
self.outputs.new('SvMatrixSocket', 'Matrix')
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
curve_s = self.inputs['Curve'].sv_get()
ts_s = self.inputs['T'].sv_get()
resolution_s = self.inputs['Resolution'].sv_get()
curve_s = ensure_nesting_level(curve_s, 2, data_types=(SvCurve,))
resolution_s = ensure_nesting_level(resolution_s, 2)
ts_s = ensure_nesting_level(ts_s, 3)
torsion_out = []
matrix_out = []
for curves, resolution_i, ts_i in zip_long_repeat(curve_s, resolution_s, ts_s):
for curve, resolution, ts in zip_long_repeat(curves, resolution_i, ts_i):
ts = np.array(ts)
if self.algorithm == 'FRENET':
curve.pre_calc_torsion_integral(resolution)
new_torsion, new_matrices = curve.zero_torsion_frame_array(ts)
new_torsion = new_torsion.tolist()
else: # TRACK
tracker = SvNormalTrack(curve, resolution)
matrices_np = tracker.evaluate_array(ts)
points = curve.evaluate_array(ts)
new_matrices = []
for m, point in zip(matrices_np, points):
matrix = Matrix(m.tolist()).to_4x4()
matrix.translation = Vector(point)
new_matrices.append(matrix)
new_torsion = []
torsion_out.append(new_torsion)
if self.join:
matrix_out.extend(new_matrices)
else:
matrix_out.append(new_matrices)
self.outputs['CumulativeTorsion'].sv_set(torsion_out)
self.outputs['Matrix'].sv_set(matrix_out)
def register():
bpy.utils.register_class(SvCurveZeroTwistFrameNode)
def unregister():
bpy.utils.unregister_class(SvCurveZeroTwistFrameNode)
|
DolphinDream/sverchok
|
nodes/curve/zero_twist_frame.py
|
Python
|
gpl-3.0
| 4,460
|
# -*- coding: utf-8 -*-
import system_tests
class FujiTags(metaclass=system_tests.CaseMeta):
filesAndExpectedOutput = [
("FujiTagsDRangeAutoRating1.jpg",
"""Exif.Fujifilm.ShadowTone SLong 1 0
Exif.Fujifilm.HighlightTone SLong 1 0
Exif.Fujifilm.Rating Long 1 1
Exif.Fujifilm.DRangePriority Short 1 Auto
Exif.Fujifilm.DRangePriorityAuto Short 1 Weak
""")
,
("FujiTagsDRangeWeakRating2.jpg",
"""Exif.Fujifilm.ShadowTone SLong 1 0
Exif.Fujifilm.HighlightTone SLong 1 0
Exif.Fujifilm.Rating Long 1 2
Exif.Fujifilm.DRangePriority Short 1 Fixed
Exif.Fujifilm.DRangePriorityFixed Short 1 Weak
""")
,
("FujiTagsDRangeStrongRating3.jpg",
"""Exif.Fujifilm.ShadowTone SLong 1 0
Exif.Fujifilm.HighlightTone SLong 1 0
Exif.Fujifilm.Rating Long 1 3
Exif.Fujifilm.DRangePriority Short 1 Fixed
Exif.Fujifilm.DRangePriorityFixed Short 1 Strong
"""),
("FujiTagsSTone0HTone0Rating4.jpg",
"""Exif.Fujifilm.ShadowTone SLong 1 0
Exif.Fujifilm.HighlightTone SLong 1 0
Exif.Fujifilm.Rating Long 1 4
"""),
("FujiTagsSTone1HTone-1Rating5.jpg",
"""Exif.Fujifilm.ShadowTone SLong 1 +1
Exif.Fujifilm.HighlightTone SLong 1 -1
Exif.Fujifilm.Rating Long 1 5
"""),
("FujiTagsSTone4HTone-2.jpg",
"""Exif.Fujifilm.ShadowTone SLong 1 +4
Exif.Fujifilm.HighlightTone SLong 1 -2
Exif.Fujifilm.Rating Long 1 0
"""),
("FujiTagsSTone-2HTone4.jpg",
"""Exif.Fujifilm.ShadowTone SLong 1 -2
Exif.Fujifilm.HighlightTone SLong 1 +4
Exif.Fujifilm.Rating Long 1 0
""")
]
tags = ["Exif.Fujifilm.ShadowTone",
"Exif.Fujifilm.HighlightTone",
"Exif.Fujifilm.Rating",
"Exif.Fujifilm.DRangePriority",
"Exif.Fujifilm.DRangePriorityAuto",
"Exif.Fujifilm.DRangePriorityFixed"]
tagcmd = "$exiv2 -K " + " -K ".join(tags)
commands = ["$tagcmd $data_path/" + f for f, _ in filesAndExpectedOutput]
stdout = [e for _, e in filesAndExpectedOutput]
stderr = [""] * len(filesAndExpectedOutput)
retval = [0] * len(filesAndExpectedOutput)
|
AlienCowEatCake/ImageViewer
|
src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/github/test_fuji_tags.py
|
Python
|
gpl-3.0
| 2,665
|
# Your search indexes here.
# See: http://django-haystack.readthedocs.org/en/latest/searchindex_api.html
|
labhackercd/colab-edemocracia-plugin
|
src/colab_edemocracia/search_indexes.py
|
Python
|
gpl-3.0
| 107
|
import copy
import datetime
import json
import logging
import os
import re
import subprocess
import sys
import time
import uuid
import warnings
from elastalert.alerts import Alerter, BasicMatchString, DateTimeEncoder
from elastalert.util import EAException
from elastalert.util import elastalert_logger
from elastalert.util import lookup_es_key
from elastalert.util import pretty_ts
from elastalert.util import resolve_string
from elastalert.util import ts_now
from elastalert.util import ts_to_dt
class ExecAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(ExecAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], str):
self.shell = True
if '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
self.new_style_string_format = False
if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']:
self.new_style_string_format = True
def alert(self, matches):
# Format the command and arguments
try:
command = [resolve_string(command_arg, matches[0]) for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
input_string = self.rule['name'] + ":||:" + match_json
stdout, stderr = subp.communicate(input=input_string.encode())
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
|
thomashaw/SecGen
|
modules/utilities/unix/logging/elastalert/files/exec_alerter.py
|
Python
|
gpl-3.0
| 2,223
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from ZODB.fstools import prev_txn, TxnHeader
from ZODB.serialize import ObjectReader, get_refs
from persistent.TimeStamp import TimeStamp
from ZODB.FileStorage.FileStorage import FileIterator
from time import gmtime, strftime
from datetime import timedelta
from optparse import OptionParser
import cStringIO, cPickle
import optparse, getopt
import sys
import datetime
import os
class Nonce(object): pass
class Reader(ObjectReader):
def __init__(self):
self.identity = None
def _get_unpickler(self, pickle):
file = cStringIO.StringIO(pickle)
unpickler = cPickle.Unpickler(file)
unpickler.persistent_load = self._persistent_load
def find_global(modulename, name):
self.identity ="%s.%s"%(modulename, name)
return Nonce
unpickler.find_global = find_global
return unpickler
def getIdentity(self, pickle ):
self.identity = None
unpickler = self._get_unpickler( pickle )
unpickler.load()
return self.identity
def getObject(self, pickle):
unpickler = self._get_unpickler( pickle )
ob = unpickler.load()
return ob
def pretty_size( size ):
if size < 1024:
return "%sB"%(size)
kb = size / 1024.0
if kb < 1024.0:
return '%0.1fKb'%kb
mb = kb / 1024.0
if mb < 1024.0:
return '%0.1fMb'%mb
else:
gb = mb/1024.0
return '%0.1fGb'%gb
def run(path, days, notPacked):
f = open(path, "rb")
f.seek(0, 2)
size= os.path.getsize(path)
now = datetime.date.today()
notPackedDays = []
for day in range(notPacked):
notPackedDays.append(str(now - timedelta(days=day+1)))
#day->size
stats = {}
th = prev_txn(f)
bool = True
while bool:
ts = TimeStamp(th.tid)
then = datetime.date(int(ts.year()), int(ts.month()), int(ts.day()))
delta = timedelta(days=int(days))
if(now - then < delta):
dateT = strftime("%Y-%m-%d", [int(ts.year()), int(ts.month()), int(ts.day()),1,1,1,1,1,1] )
try:
stats[dateT] = stats[dateT] + th.length
except KeyError:
stats[dateT] = th.length
else:
bool = False
th = th.prev_txn()
f.close()
total = 0
totalPacked = 0
daysPacked = 0
for (d,s) in sorted(stats.items(), key=lambda (k,v): v, reverse=True):
print d,"size:", pretty_size(s),
date = str(d)
if(date in notPackedDays or date == str(now)):
print "(not yet packed)"
else:
totalPacked = totalPacked + s
daysPacked = daysPacked + 1
print
total = total + s
if int(totalPacked):
average = totalPacked/int(daysPacked)
else:
average = 0
print "\n-- ALREADY PACKED DAYS--"
print "The amount of data added in", daysPacked, "days is", pretty_size(totalPacked)
print "Average", pretty_size(average), "per day"
print "Following this trend, the size of the database will be:"
print "\t",pretty_size(average*365+size)," in 1 year"
print "\t",pretty_size(average*365*2+size)," in 2 years"
print "\t",pretty_size(average*365*10+size)," in 10 years"
print "\n-- ALL DAYS --"
print "The amount of data added in", days, "days is", pretty_size(total)
if int(total):
print "Average", pretty_size(total/int(days)), "per day"
else:
print "Average 0bytes per day"
def main():
usage = "usage: %prog [options] filename"
parser = OptionParser(usage=usage)
parser.add_option("-d", "--days", dest="days", action="store",
help="show to amound of data added the last 'd' days")
parser.add_option("-n", "--notPacked", dest="np", action="store", default = 2,
help="not packed days (starting from yesterday")
(options, args) = parser.parse_args()
days = 0
if options.days:
days = options.days
else:
print "You have to enter the number of days, see --help for details"
return 2
path = args[0]
run(path, days, int(options.np))
if __name__ == "__main__":
main()
|
XeCycle/indico
|
bin/utils/zodb/sizeIncreasing_stats.py
|
Python
|
gpl-3.0
| 4,918
|
# coding=utf-8
import os.path as op
from .. import nodes
def remove_dead(nodelist):
""" Remove nodes that no longer exist.
"""
paths = [node["path"] for node in nodelist]
for path in paths:
if not op.exists(path):
nodes.remove(nodelist, path)
return nodelist
def register(pipes_dics):
pipes_dics["remove_dead"] = {
"func": remove_dead,
"args": [],
"desc": "Remove nodes that no longer exist.",
}
|
termoshtt/DataProcessor
|
lib/dataprocessor/pipes/remove_dead.py
|
Python
|
gpl-3.0
| 473
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel
from rna_prop_ui import PropertyPanel
class LAMP_MT_sunsky_presets(Menu):
bl_label = "Sun & Sky Presets"
preset_subdir = "sunsky"
preset_operator = "script.execute_preset"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
draw = Menu.draw_preset
class DataButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
return context.lamp and (engine in cls.COMPAT_ENGINES)
class DATA_PT_context_lamp(DataButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
ob = context.object
lamp = context.lamp
space = context.space_data
split = layout.split(percentage=0.65)
texture_count = len(lamp.texture_slots.keys())
if ob:
split.template_ID(ob, "data")
elif lamp:
split.template_ID(space, "pin_id")
if texture_count != 0:
split.label(text=str(texture_count), icon='TEXTURE')
class DATA_PT_preview(DataButtonsPanel, Panel):
bl_label = "Preview"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
self.layout.template_preview(context.lamp)
class DATA_PT_lamp(DataButtonsPanel, Panel):
bl_label = "Lamp"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
lamp = context.lamp
layout.prop(lamp, "type", expand=True)
split = layout.split()
col = split.column()
sub = col.column()
sub.prop(lamp, "color", text="")
sub.prop(lamp, "energy")
if lamp.type in {'POINT', 'SPOT'}:
sub.label(text="Falloff:")
sub.prop(lamp, "falloff_type", text="")
sub.prop(lamp, "distance")
if lamp.falloff_type == 'LINEAR_QUADRATIC_WEIGHTED':
col.label(text="Attenuation Factors:")
sub = col.column(align=True)
sub.prop(lamp, "linear_attenuation", slider=True, text="Linear")
sub.prop(lamp, "quadratic_attenuation", slider=True, text="Quadratic")
col.prop(lamp, "use_sphere")
if lamp.type == 'AREA':
col.prop(lamp, "distance")
col.prop(lamp, "gamma")
col = split.column()
col.prop(lamp, "use_negative")
col.prop(lamp, "use_own_layer", text="This Layer Only")
col.prop(lamp, "use_specular")
col.prop(lamp, "use_diffuse")
class DATA_PT_sunsky(DataButtonsPanel, Panel):
bl_label = "Sky & Atmosphere"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type == 'SUN') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp.sky
row = layout.row(align=True)
row.prop(lamp, "use_sky")
row.menu("LAMP_MT_sunsky_presets", text=bpy.types.LAMP_MT_sunsky_presets.bl_label)
row.operator("lamp.sunsky_preset_add", text="", icon='ZOOMIN')
row.operator("lamp.sunsky_preset_add", text="", icon='ZOOMOUT').remove_active = True
row = layout.row()
row.active = lamp.use_sky or lamp.use_atmosphere
row.prop(lamp, "atmosphere_turbidity", text="Turbidity")
split = layout.split()
col = split.column()
col.active = lamp.use_sky
col.label(text="Blending:")
sub = col.column()
sub.prop(lamp, "sky_blend_type", text="")
sub.prop(lamp, "sky_blend", text="Factor")
col.label(text="Color Space:")
sub = col.column()
sub.row().prop(lamp, "sky_color_space", expand=True)
sub.prop(lamp, "sky_exposure", text="Exposure")
col = split.column()
col.active = lamp.use_sky
col.label(text="Horizon:")
sub = col.column()
sub.prop(lamp, "horizon_brightness", text="Brightness")
sub.prop(lamp, "spread", text="Spread")
col.label(text="Sun:")
sub = col.column()
sub.prop(lamp, "sun_brightness", text="Brightness")
sub.prop(lamp, "sun_size", text="Size")
sub.prop(lamp, "backscattered_light", slider=True, text="Back Light")
layout.separator()
layout.prop(lamp, "use_atmosphere")
split = layout.split()
col = split.column()
col.active = lamp.use_atmosphere
col.label(text="Intensity:")
col.prop(lamp, "sun_intensity", text="Sun")
col.prop(lamp, "atmosphere_distance_factor", text="Distance")
col = split.column()
col.active = lamp.use_atmosphere
col.label(text="Scattering:")
sub = col.column(align=True)
sub.prop(lamp, "atmosphere_inscattering", slider=True, text="Inscattering")
sub.prop(lamp, "atmosphere_extinction", slider=True, text="Extinction")
class DATA_PT_shadow(DataButtonsPanel, Panel):
bl_label = "Shadow"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type in {'POINT', 'SUN', 'SPOT', 'AREA'}) and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp
layout.prop(lamp, "shadow_method", expand=True)
if lamp.shadow_method == 'NOSHADOW' and lamp.type == 'AREA':
split = layout.split()
col = split.column()
col.label(text="Form factor sampling:")
sub = col.row(align=True)
if lamp.shape == 'SQUARE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples")
elif lamp.shape == 'RECTANGLE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples X")
sub.prop(lamp, "shadow_ray_samples_y", text="Samples Y")
if lamp.shadow_method != 'NOSHADOW':
split = layout.split()
col = split.column()
col.prop(lamp, "shadow_color", text="")
col = split.column()
col.prop(lamp, "use_shadow_layer", text="This Layer Only")
col.prop(lamp, "use_only_shadow")
if lamp.shadow_method == 'RAY_SHADOW':
split = layout.split()
col = split.column()
col.label(text="Sampling:")
if lamp.type in {'POINT', 'SUN', 'SPOT'}:
sub = col.row()
sub.prop(lamp, "shadow_ray_samples", text="Samples")
sub.prop(lamp, "shadow_soft_size", text="Soft Size")
elif lamp.type == 'AREA':
sub = col.row(align=True)
if lamp.shape == 'SQUARE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples")
elif lamp.shape == 'RECTANGLE':
sub.prop(lamp, "shadow_ray_samples_x", text="Samples X")
sub.prop(lamp, "shadow_ray_samples_y", text="Samples Y")
col.row().prop(lamp, "shadow_ray_sample_method", expand=True)
if lamp.shadow_ray_sample_method == 'ADAPTIVE_QMC':
layout.prop(lamp, "shadow_adaptive_threshold", text="Threshold")
if lamp.type == 'AREA' and lamp.shadow_ray_sample_method == 'CONSTANT_JITTERED':
row = layout.row()
row.prop(lamp, "use_umbra")
row.prop(lamp, "use_dither")
row.prop(lamp, "use_jitter")
elif lamp.shadow_method == 'BUFFER_SHADOW':
col = layout.column()
col.label(text="Buffer Type:")
col.row().prop(lamp, "shadow_buffer_type", expand=True)
if lamp.shadow_buffer_type in {'REGULAR', 'HALFWAY', 'DEEP'}:
split = layout.split()
col = split.column()
col.label(text="Filter Type:")
col.prop(lamp, "shadow_filter_type", text="")
sub = col.column(align=True)
sub.prop(lamp, "shadow_buffer_soft", text="Soft")
sub.prop(lamp, "shadow_buffer_bias", text="Bias")
col = split.column()
col.label(text="Sample Buffers:")
col.prop(lamp, "shadow_sample_buffers", text="")
sub = col.column(align=True)
sub.prop(lamp, "shadow_buffer_size", text="Size")
sub.prop(lamp, "shadow_buffer_samples", text="Samples")
if lamp.shadow_buffer_type == 'DEEP':
col.prop(lamp, "compression_threshold")
elif lamp.shadow_buffer_type == 'IRREGULAR':
layout.prop(lamp, "shadow_buffer_bias", text="Bias")
split = layout.split()
col = split.column()
col.prop(lamp, "use_auto_clip_start", text="Autoclip Start")
sub = col.column()
sub.active = not lamp.use_auto_clip_start
sub.prop(lamp, "shadow_buffer_clip_start", text="Clip Start")
col = split.column()
col.prop(lamp, "use_auto_clip_end", text="Autoclip End")
sub = col.column()
sub.active = not lamp.use_auto_clip_end
sub.prop(lamp, "shadow_buffer_clip_end", text=" Clip End")
class DATA_PT_area(DataButtonsPanel, Panel):
bl_label = "Area Shape"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type == 'AREA') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp
col = layout.column()
col.row().prop(lamp, "shape", expand=True)
sub = col.row(align=True)
if lamp.shape == 'SQUARE':
sub.prop(lamp, "size")
elif lamp.shape == 'RECTANGLE':
sub.prop(lamp, "size", text="Size X")
sub.prop(lamp, "size_y", text="Size Y")
class DATA_PT_spot(DataButtonsPanel, Panel):
bl_label = "Spot Shape"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type == 'SPOT') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
lamp = context.lamp
split = layout.split()
col = split.column()
sub = col.column()
sub.prop(lamp, "spot_size", text="Size")
sub.prop(lamp, "spot_blend", text="Blend", slider=True)
col.prop(lamp, "use_square")
col.prop(lamp, "show_cone")
col = split.column()
col.prop(lamp, "use_halo")
sub = col.column(align=True)
sub.active = lamp.use_halo
sub.prop(lamp, "halo_intensity", text="Intensity")
if lamp.shadow_method == 'BUFFER_SHADOW':
sub.prop(lamp, "halo_step", text="Step")
class DATA_PT_falloff_curve(DataButtonsPanel, Panel):
bl_label = "Falloff Curve"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
lamp = context.lamp
engine = context.scene.render.engine
return (lamp and lamp.type in {'POINT', 'SPOT'} and lamp.falloff_type == 'CUSTOM_CURVE') and (engine in cls.COMPAT_ENGINES)
def draw(self, context):
lamp = context.lamp
self.layout.template_curve_mapping(lamp, "falloff_curve")
class DATA_PT_custom_props_lamp(DataButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "object.data"
_property_type = bpy.types.Lamp
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
|
cschenck/blender_sim
|
fluid_sim_deps/blender-2.69/2.69/scripts/startup/bl_ui/properties_data_lamp.py
|
Python
|
gpl-3.0
| 12,966
|
# -*- coding: utf-8 -*-
from __future__ import division
__version__ = "0.4.9"
#
# This file is part of the open-source MarkWrite application.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Need to import pyTables module before pyqt imports pyh5 or error occurs when
# openning an iohub datastore file.
import tables
import pyqtgraph
import sys, os
from appdirs import AppDirs
from file_io import readPickle, writePickle
appdirs = AppDirs("MarkWrite")
default_settings_file_name = u'default_settings.pkl'
current_settings_file_name = u'current_settings.pkl'
current_settings_path = appdirs.user_config_dir
usersettings = readPickle(current_settings_path, current_settings_file_name)
from pyqtgraph.Qt import QtGui
app = QtGui.QApplication(sys.argv)
from gui.projectsettings import ProjectSettingsDialog
_ = ProjectSettingsDialog(savedstate=usersettings)
from gui.projectsettings import SETTINGS
writePickle(current_settings_path, current_settings_file_name, SETTINGS)
default_file_path = os.path.join(current_settings_path,default_settings_file_name)
if not os.path.exists(default_file_path):
writePickle(current_settings_path, default_settings_file_name, SETTINGS)
|
isolver/OpenHandWrite
|
src/markwrite/markwrite/__init__.py
|
Python
|
gpl-3.0
| 1,763
|
# Copyright (c) 2013 AnsibleWorks, Inc.
#
# This file is part of Ansible Commander.
#
# Ansible Commander is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Ansible Commander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible Commander. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from lib.main.models import *
from django.contrib.auth.models import User
from lib.main.serializers import *
from lib.main.rbac import *
from django.core.exceptions import PermissionDenied
from rest_framework import mixins
from rest_framework import generics
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework import status
import exceptions
import datetime
import json as python_json
# FIXME: machinery for auto-adding audit trail logs to all CREATE/EDITS
class BaseList(generics.ListCreateAPIView):
def list_permissions_check(self, request, obj=None):
''' determines some early yes/no access decisions, pre-filtering '''
if request.method == 'GET':
return True
if request.method == 'POST':
if self.__class__.model in [ User ]:
ok = request.user.is_superuser or (request.user.admin_of_organizations.count() > 0)
if not ok:
raise PermissionDenied()
return True
else:
# audit all of these to check ownership/readability of subobjects
if not self.__class__.model.can_user_add(request.user, self.request.DATA):
raise PermissionDenied()
return True
raise exceptions.NotImplementedError
def get_queryset(self):
base = self._get_queryset()
model = self.__class__.model
qs = None
if model == User:
qs = base.filter(is_active=True)
elif model in [ Tag, AuditTrail ]:
qs = base
else:
qs = self._get_queryset().filter(active=True)
order = self.request.QUERY_PARAMS.get('order', None)
if order:
qs = qs.order_by(order)
return qs
class BaseSubList(BaseList):
''' used for subcollections with an overriden post '''
def list_permissions_check(self, request, obj=None):
''' determines some early yes/no access decisions, pre-filtering '''
if request.method == 'GET':
return True
if request.method == 'POST':
# the can_user_attach methods will be called below
return True
raise exceptions.NotImplementedError
def post(self, request, *args, **kwargs):
postable = getattr(self.__class__, 'postable', False)
if not postable:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
parent_id = kwargs['pk']
sub_id = request.DATA.get('id', None)
main = self.__class__.parent_model.objects.get(pk=parent_id)
severable = getattr(self.__class__, 'severable', True)
subs = None
if sub_id:
subs = self.__class__.model.objects.filter(pk=sub_id)
else:
if 'disassociate' in request.DATA:
raise PermissionDenied() # ID is required to disassociate
else:
# this is a little tricky and a little manual
# the object ID was not specified, so it probably doesn't exist in the DB yet.
# we want to see if we can create it. The URL may choose to inject it's primary key into the object
# because we are posting to a subcollection. Use all the normal access control mechanisms.
inject_primary_key = getattr(self.__class__, 'inject_primary_key_on_post_as', None)
if inject_primary_key is not None:
# add the key to the post data using the pk from the URL
request.DATA[inject_primary_key] = kwargs['pk']
# attempt to deserialize the object
ser = self.__class__.serializer_class(data=request.DATA)
if not ser.is_valid():
return Response(status=status.HTTP_400_BAD_REQUEST, data=ser.errors)
# ask the usual access control settings
if not self.__class__.model.can_user_add(request.user, ser.init_data):
raise PermissionDenied()
# save the object through the serializer, reload and returned the saved object deserialized
obj = ser.save()
ser = self.__class__.serializer_class(obj)
# now make sure we could have already attached the two together. If we could not have, raise an exception
# such that the transaction does not commit.
if main == obj:
# no attaching to yourself
raise PermissionDenied()
if self.__class__.parent_model != User:
if not obj.__class__.can_user_read(request.user, obj):
raise PermissionDenied()
if not self.__class__.parent_model.can_user_attach(request.user, main, obj, self.__class__.relationship, request.DATA):
raise PermissionDenied()
else:
if not UserHelper.can_user_read(request.user, obj):
raise PermissionDenied()
# FIXME: should generalize this
if not UserHelper.can_user_attach(request.user, main, obj, self.__class__.relationship, request.DATA):
raise PermissionDenied()
return Response(status=status.HTTP_201_CREATED, data=ser.data)
else:
# view didn't specify a way to get the pk from the URL, so not even trying
return Response(status=status.HTTP_400_BAD_REQUEST, data=python_json.dumps(dict(msg='object cannot be created')))
# we didn't have to create the object, so this is just associating the two objects together now...
# (or disassociating them)
if len(subs) != 1:
return Response(status=status.HTTP_400_BAD_REQUEST)
sub = subs[0]
relationship = getattr(main, self.__class__.relationship)
if not 'disassociate' in request.DATA:
if not request.user.is_superuser:
if type(main) != User:
if not self.__class__.parent_model.can_user_attach(request.user, main, sub, self.__class__.relationship, request.DATA):
raise PermissionDenied()
else:
if not UserHelper.can_user_attach(request.user, main, sub, self.__class__.relationship, request.DATA):
raise PermissionDenied()
if sub in relationship.all():
return Response(status=status.HTTP_409_CONFLICT)
relationship.add(sub)
else:
if not request.user.is_superuser:
if type(main) != User:
if not self.__class__.parent_model.can_user_unattach(request.user, main, sub, self.__class__.relationship):
raise PermissionDenied()
else:
if not UserHelper.can_user_unattach(request.user, main, sub, self.__class__.relationship):
raise PermissionDenied()
if severable:
relationship.remove(sub)
else:
# resource is just a ForeignKey, can't remove it from the set, just set it inactive
sub.name = "_deleted_%s_%s" % (str(datetime.time()), sub.name)
sub.active = False
sub.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class BaseDetail(generics.RetrieveUpdateDestroyAPIView):
def pre_save(self, obj):
if type(obj) not in [ User, Tag, AuditTrail ]:
obj.created_by = self.request.user
def destroy(self, request, *args, **kwargs):
# somewhat lame that delete has to call it's own permissions check
obj = self.model.objects.get(pk=kwargs['pk'])
if getattr(obj, 'active', True) == False:
raise Http404()
if getattr(obj, 'is_active', True) == False:
raise Http404()
if not request.user.is_superuser and not self.delete_permissions_check(request, obj):
raise PermissionDenied()
if isinstance(obj, PrimordialModel):
obj.name = "_deleted_%s_%s" % (str(datetime.time()), obj.name)
obj.active = False
obj.save()
elif type(obj) == User:
obj.username = "_deleted_%s_%s" % (str(datetime.time()), obj.username)
obj.is_active = False
obj.save()
else:
raise Exception("InternalError: destroy() not implemented yet for %s" % obj)
return HttpResponse(status=204)
def delete_permissions_check(self, request, obj):
if isinstance(obj, PrimordialModel):
return self.__class__.model.can_user_delete(request.user, obj)
elif isinstance(obj, User):
return UserHelper.can_user_delete(request.user, obj)
raise PermissionDenied()
def item_permissions_check(self, request, obj):
if request.method == 'GET':
if type(obj) == User:
return UserHelper.can_user_read(request.user, obj)
else:
return self.__class__.model.can_user_read(request.user, obj)
elif request.method in [ 'PUT' ]:
if type(obj) == User:
return UserHelper.can_user_administrate(request.user, obj, request.DATA)
else:
return self.__class__.model.can_user_administrate(request.user, obj, request.DATA)
return False
def put(self, request, *args, **kwargs):
self.put_filter(request, *args, **kwargs)
return super(BaseDetail, self).put(request, *args, **kwargs)
def put_filter(self, request, *args, **kwargs):
''' scrub any fields the user cannot/should not put, based on user context. This runs after read-only serialization filtering '''
pass
class VariableBaseDetail(BaseDetail):
'''
an object that is always 1 to 1 with the foreign key of another object
and does not have it's own key, such as HostVariableDetail
'''
def destroy(self, request, *args, **kwargs):
raise PermissionDenied()
def delete_permissions_check(self, request, obj):
raise PermissionDenied()
def item_permissions_check(self, request, obj):
through_obj = self.__class__.parent_model.objects.get(pk = self.request.args['pk'])
if request.method == 'GET':
return self.__class__.parent_model.can_user_read(request.user, through_obj)
elif request.method in [ 'PUT' ]:
return self.__class__.parent_model.can_user_administrate(request.user, through_obj, request.DATA)
return False
def put(self, request, *args, **kwargs):
# FIXME: lots of overlap between put and get here, need to refactor
through_obj = self.__class__.parent_model.objects.get(pk=kwargs['pk'])
has_permission = Inventory._has_permission_types(request.user, through_obj.inventory, PERMISSION_TYPES_ALLOWING_INVENTORY_WRITE)
if not has_permission:
raise PermissionDenied()
this_object = None
try:
this_object = getattr(through_obj, self.__class__.reverse_relationship, None)
except:
pass
if this_object is None:
this_object = self.__class__.model.objects.create(data=python_json.dumps(request.DATA))
else:
this_object.data = python_json.dumps(request.DATA)
this_object.save()
setattr(through_obj, self.__class__.reverse_relationship, this_object)
through_obj.save()
return Response(status=status.HTTP_200_OK, data=python_json.loads(this_object.data))
def get(self, request, *args, **kwargs):
# if null, recreate a blank object
through_obj = self.__class__.parent_model.objects.get(pk=kwargs['pk'])
this_object = None
try:
this_object = getattr(through_obj, self.__class__.reverse_relationship, None)
except Exception, e:
pass
if this_object is None:
new_args = {}
new_args['data'] = python_json.dumps(dict())
this_object = self.__class__.model.objects.create(**new_args)
setattr(through_obj, self.__class__.reverse_relationship, this_object)
through_obj.save()
has_permission = Inventory._has_permission_types(request.user, through_obj.inventory, PERMISSION_TYPES_ALLOWING_INVENTORY_WRITE)
if not has_permission:
raise PermissionDenied()
return Response(status=status.HTTP_200_OK, data=python_json.loads(this_object.data))
|
mmoya/ansible-commander
|
lib/main/base_views.py
|
Python
|
gpl-3.0
| 13,613
|
"""
Copyright 2014-2021 Vincent Texier <vit@free.fr>
DuniterPy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DuniterPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from .block import Block
from .block_uid import BlockUID, block_uid
from .document import Document, MalformedDocumentError
from .certification import Certification
from .revocation import Revocation
from .identity import Identity
from .membership import Membership
from .transaction import (
SimpleTransaction,
Transaction,
InputSource,
OutputSource,
SIGParameter,
Unlock,
UnlockParameter,
)
from .crc_pubkey import CRCPubkey
|
ucoin-io/ucoin-python-api
|
duniterpy/documents/__init__.py
|
Python
|
gpl-3.0
| 1,119
|
"""
Copyright 2018 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.affine.hstack import hstack
from cvxpy.atoms.axis_atom import AxisAtom
import cvxpy.interface as intf
import numpy as np
class Prod(AxisAtom):
"""Multiply the entries of an expression.
The semantics of this atom are the same as np.prod.
This atom is log-log affine, but it is neither convex nor concave.
Parameters
----------
expr : Expression
The expression to multiply the entries of.
axis : int
The axis along which to sum.
keepdims : bool
Whether to drop dimensions after summing.
"""
def __init__(self, expr, axis=None, keepdims=False):
super(Prod, self).__init__(expr, axis=axis, keepdims=keepdims)
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
if self.args[0].is_nonneg():
return (True, False)
return (False, False)
def is_atom_convex(self):
"""Is the atom convex?
"""
return False
def is_atom_concave(self):
"""Is the atom concave?
"""
return False
def is_atom_log_log_convex(self):
"""Is the atom log-log convex?
"""
return True
def is_atom_log_log_concave(self):
"""Is the atom log-log concave?
"""
return True
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return self.args[0].is_nonneg()
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return False
def numeric(self, values):
"""Takes the product of the entries of value.
"""
if intf.is_sparse(values[0]):
result = np.prod(values[0], axis=self.axis)
if not self.keepdims and self.axis is not None:
result = result.A.flatten()
else:
result = np.prod(values[0], axis=self.axis, keepdims=self.keepdims)
return result
def _column_grad(self, value):
"""Gives the (sub/super)gradient of the atom w.r.t. a column argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
value: A numeric value for a column.
Returns:
A NumPy ndarray or None.
"""
return np.prod(value) / value
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
return self._axis_grad(values)
def prod(expr, axis=None, keepdims=False):
"""Multiply the entries of an expression.
The semantics of this atom are the same as np.prod.
This atom is log-log affine, but it is neither convex nor concave.
Parameters
----------
expr : Expression or list[Expression, Numeric]
The expression to multiply the entries of, or a list of Expressions
and numeric types.
axis : int
The axis along which to take the product; ignored if `expr` is a list.
keepdims : bool
Whether to drop dimensions after taking the product; ignored if `expr`
is a list.
"""
if isinstance(expr, list):
return Prod(hstack(expr))
else:
return Prod(expr, axis, keepdims)
|
SteveDiamond/cvxpy
|
cvxpy/atoms/prod.py
|
Python
|
gpl-3.0
| 4,052
|
#!/usr/bin/env python
# POCSAG Multichannel Realtime Decoder
# Copyright (c) 2012 iZsh -- izsh at fail0verflow.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gruel import pmt
from gnuradio import gr
from gnuradio import extras
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
import sys
import os
import time
import argparse
import osmosdr
import pocsag
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore, uic
import sip
except ImportError:
print "Error: Program requires PyQt4 and gr-qtgui."
sys.exit(1)
BANNER = "POCSAG Multichannel Realtime Decoder -- iZsh (izsh at fail0verflow.com)"
INI_FREQ_CORR = 0.0
INI_FREQ= 0.0
INI_SAMPLERATE = 1e6
INI_SYMRATE = 1200
SPS = 8 # signal per symbol
FFTSIZE = 2048
XLATING_CUTOFF = 10e3
class pocsag_msgsink(gr.block, QtCore.QObject):
pocsag_pagermsg = QtCore.pyqtSignal(dict)
def __init__(self):
gr.block.__init__(
self,
name = "POCSAG message sink",
in_sig = None,
out_sig = None,
has_msg_input = True
)
QtCore.QObject.__init__(self)
def work(self, input_items, output_items):
try:
msg = self.pop_msg_queue()
key = pmt.pmt_symbol_to_string(msg.key)
txt = pmt.to_python(msg.value)
if key == pocsag.POCSAG_ID:
self.pocsag_pagermsg.emit(txt)
return 1
return 0
except:
return -1
# We can't derive from extras.stream_selector... hence the ugly workaround
class stream_selector:
def __init__(self, num_inputs, size_of_items):
self.ss = extras.stream_selector(gr.io_signature(num_inputs, num_inputs, size_of_items),
gr.io_signature(1, 1, size_of_items))
self.num_inputs = num_inputs
def set_output(self, n):
p = [-2] * self.num_inputs
p[n] = 0
self.ss.set_paths(p)
class main_window(QtGui.QMainWindow):
backspacepressed = QtCore.pyqtSignal()
def __init__(self, topblock, args):
QtGui.QMainWindow.__init__(self)
uic.loadUi('pocsag-mrt_main.ui', self)
self.srcsink = uic.loadUi('pocsag-mrt_srcsink.ui')
self.demodsink = uic.loadUi('pocsag-mrt_demodsink.ui')
self.topblock = topblock
self.args = args
self.push_text(BANNER, QtCore.Qt.magenta)
self.init_sink()
# Connect the signals/slots etc.
self.freq_list.installEventFilter(self)
self.srcsink.installEventFilter(self)
self.demodsink.installEventFilter(self)
self.installEventFilter(self)
self.centerfreq_edit.returnPressed.connect(self.centerfreq_edittext)
self.addfreq_edit.returnPressed.connect(self.addfreq_edittext)
self.symrate_edit.returnPressed.connect(self.symrate_edittext)
self.samplerate_edit.returnPressed.connect(self.samplerate_edittext)
self.freqcorr_edit.returnPressed.connect(self.freqcorr_edittext)
self.freq_list.itemSelectionChanged.connect(self.select_freq)
self.backspacepressed.connect(self.remove_selected_freq)
self.debug_check.stateChanged.connect(self.debug_state)
self.srcsink_check.stateChanged.connect(self.srcsink_state)
self.demodsink_check.stateChanged.connect(self.demodsink_state)
# General inits
self.selected_freq = None
self.freqs = dict()
self.set_freqcorr(self.topblock.source.get_freq_corr())
self.set_samplerate(self.topblock.source.get_sample_rate())
self.set_centerfreq(self.topblock.source.get_center_freq())
self.symrate_edit.setText("%d" % self.args.symrate)
self.symrate = float(self.args.symrate)
self.read_channels(args.channels_file)
def init_sink(self):
if args.output_dir:
self.logfiles = {}
logdirpath = os.path.join(args.output_dir, 'unknown-frequency.log')
self.logfiles['unknown'] = open(logdirpath, 'w+')
self.push_text('All messages will be saved in "%s". One file for each frequency.'
% args.output_dir)
self.topblock.stop()
self.topblock.wait()
self.enable_selector_buttons(False)
#
# Source/premodulation
#
# Create the selector and connect the source to it
self.sel_c = stream_selector(3, gr.sizeof_gr_complex)
self.topblock.connect(self.topblock.source, (self.sel_c.ss, 0))
self.srcsink.source_source_radio.setEnabled(True)
self.sel_c.set_output(0)
# Add the sink
self.srcsink.grsink = qtgui.sink_c(FFTSIZE, gr.firdes.WIN_BLACKMAN_hARRIS,
self.topblock.source.get_center_freq(), self.topblock.source.get_sample_rate(),
"Source Signal", True, True, True, False)
self.srcsink.grsink.set_update_time(0.1)
self.topblock.connect(self.sel_c.ss, self.srcsink.grsink)
self.srcsink.sink = sip.wrapinstance(self.srcsink.grsink.pyqwidget(), QtGui.QWidget)
self.srcsink.horizontalLayout.addWidget(self.srcsink.sink)
# add a button group for the radio buttons
self.waveselc = QtGui.QButtonGroup(self.srcsink.verticalLayout)
self.waveselc.addButton(self.srcsink.source_source_radio, 0)
self.waveselc.addButton(self.srcsink.source_xlating_radio, 1)
self.waveselc.addButton(self.srcsink.source_interpolator_radio, 2)
self.waveselc.buttonClicked[int].connect(self.waveselc_toggled)
self.srcsink.source_source_radio.setChecked(True)
#
# Demodulation
#
# Add the sink
self.sel_f = stream_selector(4, gr.sizeof_float)
self.sel_f.set_output(0)
self.demodsink.grsink = qtgui.sink_f(FFTSIZE, gr.firdes.WIN_BLACKMAN_hARRIS,
0, self.args.symrate * SPS, "Demodulated Signal", True, True, True, False)
self.demodsink.grsink.set_update_time(0.1)
self.topblock.connect(self.sel_f.ss, self.demodsink.grsink)
self.demodsink.sink = sip.wrapinstance(self.demodsink.grsink.pyqwidget(), QtGui.QWidget)
self.demodsink.horizontalLayout.addWidget(self.demodsink.sink)
# Add the button group
self.waveself = QtGui.QButtonGroup(self.demodsink.verticalLayout)
self.waveself.addButton(self.demodsink.demodulation_quaddemod_radio, 0)
self.waveself.addButton(self.demodsink.demodulation_lowpass_radio, 1)
self.waveself.addButton(self.demodsink.demodulation_clockrecovery_radio, 2)
self.waveself.addButton(self.demodsink.demodulation_bits_radio, 3)
self.waveself.buttonClicked[int].connect(self.waveself_toggled)
self.demodsink.demodulation_quaddemod_radio.setChecked(True)
#
self.topblock.start()
def read_channels(self, channels_file):
if not channels_file: return
for freq in channels_file.readlines():
self.addfreq(eng_notation.str_to_num(freq.strip()))
def eventFilter(self, watched, event):
if event.type() == QtCore.QEvent.KeyPress and event.key() == QtCore.Qt.Key_Backspace:
self.backspacepressed.emit()
return True
if event.type() == QtCore.QEvent.Close and watched == self.srcsink:
self.srcsink_check.setCheckState(QtCore.Qt.Unchecked)
return True
if event.type() == QtCore.QEvent.Close and watched == self.demodsink:
self.demodsink_check.setCheckState(QtCore.Qt.Unchecked)
return True
if event.type() == QtCore.QEvent.Close and watched == self:
self.srcsink_check.setCheckState(QtCore.Qt.Unchecked)
self.demodsink_check.setCheckState(QtCore.Qt.Unchecked)
# Keep processing the event, we want to close the app
return False
def enable_selector_buttons(self, enabled = True):
# We never disable the source button
self.srcsink.source_xlating_radio.setEnabled(enabled)
self.srcsink.source_interpolator_radio.setEnabled(enabled)
self.demodsink.demodulation_quaddemod_radio.setEnabled(enabled)
self.demodsink.demodulation_lowpass_radio.setEnabled(enabled)
self.demodsink.demodulation_clockrecovery_radio.setEnabled(enabled)
self.demodsink.demodulation_bits_radio.setEnabled(enabled)
def waveselc_toggled(self, Id):
self.sel_c.set_output(Id)
self.set_uisink_frequency_range()
def waveself_toggled(self, Id):
self.sel_f.set_output(Id)
self.set_uisink_frequency_range()
def push_text(self, text, color = QtCore.Qt.black):
self.console.setTextColor(color)
self.console.append(text)
def log_pagermsg(self, txt):
if not args.output_dir: return False
log_time = '%s: ' % time.strftime('%F %T')
eom = ' (eom)' if txt['endofmsg'] else ''
pagertext = 'pager: %d (%d), TXT%s: %s\n' % (txt['addr'], txt['fun'], eom, txt['text'])
ch = txt['channel']
if ch:
self.logfiles[ch].write(log_time + pagertext)
else:
self.logfiles['unknown'].write(log_time + pagertext)
def push_pagermsg(self, txt):
self.log_pagermsg(txt)
ch = "N/A" if txt["channel"] == None else txt["channel"]
pagertext = "Pager message -- Channel %s, From pager %d (%d), TXT: %s" % (ch, txt["addr"], txt["fun"], txt["text"])
if txt["endofmsg"]:
self.push_text(pagertext, QtCore.Qt.blue)
else:
self.push_text(pagertext, QtCore.Qt.red)
def set_uisink_frequency_range(self):
if not hasattr(self, 'freq') or not hasattr(self, 'freqshift') or not hasattr(self, 'samplerate'):
return
if self.waveselc.checkedId() == 0: # the source
self.srcsink.grsink.set_frequency_range(self.centerfreq, self.samplerate)
elif self.waveselc.checkedId() == 1: # the shifted frequency
self.srcsink.grsink.set_frequency_range(self.centerfreq + self.freqshift, self.samplerate)
elif self.waveselc.checkedId() == 2: # interpolated/decimated
self.srcsink.grsink.set_frequency_range(self.centerfreq + self.freqshift, self.symrate * SPS)
if self.waveself.checkedId() == 0: # quad demod
self.demodsink.grsink.set_frequency_range(0, self.symrate * SPS)
elif self.waveself.checkedId() == 1: # lowpass
self.demodsink.grsink.set_frequency_range(0, self.symrate * SPS)
elif self.waveself.checkedId() == 2: # clock recovery
self.demodsink.grsink.set_frequency_range(0, self.symrate)
elif self.waveself.checkedId() == 3: # bits
self.demodsink.grsink.set_frequency_range(0, self.symrate)
def set_centerfreq(self, freq):
self.centerfreq = freq
self.centerfreq_edit.setText("%.3fM" % (self.centerfreq / 1e6))
self.push_text("Setting center frequency to %.3fMhz" % (self.centerfreq / 1e6))
self.update_freqs()
self.topblock.source.set_center_freq(self.centerfreq)
self.set_uisink_frequency_range()
def update_freqs(self):
for freq_txt, values in self.freqs.items():
if abs(self.centerfreq - values["freq"]) > self.samplerate / 2.0:
self.push_text("%s is outside of the bandwidth reach!" % freq_txt)
self.remove_freq(freq_txt)
def addfreq(self, freq):
self.addfreq_edit.clearFocus()
self.addfreq_edit.clear()
freq_txt = "%.6fMHz" % (freq / 1e6)
if args.output_dir:
self.logfiles[freq_txt] = open(os.path.join(args.output_dir, freq_txt + '.log'), 'a')
if freq_txt in self.freqs:
self.push_text("%s is already monitored!" % freq_txt)
return
if abs(self.centerfreq - freq) > self.samplerate / 2.0:
self.push_text("%s is outside of the bandwidth reach!" % freq_txt)
return
self.push_text("Monitoring %s" % freq_txt)
freqshift = self.centerfreq - freq
# reconfigure the flowgraph
# We use stop()/wait() because lock()/unlock() seems to freeze the app
# Can't find the reason...
self.topblock.stop()
self.topblock.wait()
# self.topblock.lock()
freq_xlating_fir_filter = gr.freq_xlating_fir_filter_ccc(1, (gr.firdes.low_pass(1.0, self.samplerate, XLATING_CUTOFF, XLATING_CUTOFF / 2)), freqshift, self.samplerate)
pocsag_decoder = pocsag.pocsag_decoder(self.samplerate, channel_str = freq_txt, symbolrate = self.symrate, debug = self.debug_check.isChecked())
msgsink = pocsag_msgsink() # FIXME: Shouldn't we use only one general msgsink?
self.topblock.connect(self.topblock.source, freq_xlating_fir_filter, pocsag_decoder, msgsink)
# Connect the QT signal from the msgsink to the UI
msgsink.pocsag_pagermsg.connect(self.push_pagermsg)
# self.topblock.unlock()
self.topblock.start()
# Save the blocks
self.freqs[freq_txt] = {
"freq": freq,
"freq_xlating_fir_filter": freq_xlating_fir_filter,
"pocsag_decoder": pocsag_decoder,
"msgsink": msgsink,
"uchar2float": gr.uchar_to_float() # we need a converter to connect it to the qtsink
}
self.freq_list.addItem(freq_txt)
def disconnect_sink(self, freq):
self.topblock.disconnect(self.freqs[freq]["freq_xlating_fir_filter"], (self.sel_c.ss, 1))
self.topblock.disconnect(self.freqs[freq]["pocsag_decoder"].fractional_interpolator, (self.sel_c.ss, 2))
self.topblock.disconnect(self.freqs[freq]["pocsag_decoder"].quadrature_demod, (self.sel_f.ss, 0))
self.topblock.disconnect(self.freqs[freq]["pocsag_decoder"].low_pass_filter, (self.sel_f.ss, 1))
self.topblock.disconnect(self.freqs[freq]["pocsag_decoder"].digital_clock_recovery_mm, (self.sel_f.ss, 2))
self.topblock.disconnect(self.freqs[freq]["pocsag_decoder"].digital_binary_slicer_fb, self.freqs[freq]["uchar2float"], (self.sel_f.ss, 3))
def connect_sink(self, freq):
self.topblock.connect(self.freqs[freq]["freq_xlating_fir_filter"], (self.sel_c.ss, 1))
self.topblock.connect(self.freqs[freq]["pocsag_decoder"].fractional_interpolator, (self.sel_c.ss, 2))
self.topblock.connect(self.freqs[freq]["pocsag_decoder"].quadrature_demod, (self.sel_f.ss, 0))
self.topblock.connect(self.freqs[freq]["pocsag_decoder"].low_pass_filter, (self.sel_f.ss, 1))
self.topblock.connect(self.freqs[freq]["pocsag_decoder"].digital_clock_recovery_mm, (self.sel_f.ss, 2))
self.topblock.connect(self.freqs[freq]["pocsag_decoder"].digital_binary_slicer_fb, self.freqs[freq]["uchar2float"], (self.sel_f.ss, 3))
def select_freq(self):
if len(self.freq_list.selectedItems()) == 0:
return
freq = str(self.freq_list.selectedItems()[0].text())
# Stop the flowchart
self.topblock.stop()
self.topblock.wait()
# self.topblock.lock()
# Disconnect the old selection
if self.selected_freq:
self.disconnect_sink(self.selected_freq)
# Connect the new selection
self.connect_sink(freq)
# Restart the flowgraph
self.topblock.start()
# self.topblock.unlock()
# Adjust the UI info
self.set_uisink_frequency_range()
self.enable_selector_buttons(True)
self.selected_freq = freq
def remove_selected_freq(self):
if self.selected_freq == None: return
self.remove_freq(self.selected_freq)
def remove_freq(self, freq):
if freq == None or freq not in self.freqs: return
self.push_text("Removing %s" % freq)
if args.output_dir:
self.logfiles[freq].close()
self.topblock.stop()
self.topblock.wait()
if self.selected_freq == freq: self.disconnect_sink(freq)
self.topblock.disconnect(self.topblock.source,
self.freqs[freq]["freq_xlating_fir_filter"],
self.freqs[freq]["pocsag_decoder"],
self.freqs[freq]["msgsink"])
self.topblock.start()
self.enable_selector_buttons(False)
del self.freqs[freq]
self.set_uisink_frequency_range()
if self.selected_freq == freq: self.selected_freq = None
self.freq_list.takeItem(self.freq_list.row(self.freq_list.findItems(freq, QtCore.Qt.MatchExactly)[0]))
def set_freqcorr(self, freqcorr):
self.freqcorr = freqcorr
self.topblock.source.set_freq_corr(self.freqcorr, 0)
self.freqcorr_edit.setText("%.3f" % self.freqcorr)
self.push_text("Setting freq. correction to %.3f ppm" % self.freqcorr)
def set_samplerate(self, samplerate):
self.samplerate = samplerate
self.samplerate_edit.setText("%.3fM" % (self.samplerate / 1e6))
self.push_text("Setting sample rate to %.3fMhz" % (self.samplerate / 1e6))
self.update_freqs()
self.set_uisink_frequency_range()
def centerfreq_edittext(self):
# try:
self.set_centerfreq(eng_notation.str_to_num(str(self.centerfreq_edit.text())))
# except ValueError:
# self.push_text("Bad center frequency value entered")
def addfreq_edittext(self):
try:
self.addfreq(eng_notation.str_to_num(str(self.addfreq_edit.text())))
except ValueError:
self.push_text("Bad frequency value entered")
def symrate_edittext(self):
try:
self.symrate = eng_notation.str_to_num(str(self.symrate_edit.text()))
self.push_text("Setting symbol rate to %.3fbaud\n" % self.symrate)
except ValueError:
self.push_text("Bad symbol rate value entered\n")
def samplerate_edittext(self):
try:
self.set_samplerate(eng_notation.str_to_num(str(self.samplerate_edit.text())))
except ValueError:
self.push_text("Bad sample rate value entered")
def freqcorr_edittext(self):
try:
self.set_freqcorr(eng_notation.str_to_num(str(self.freqcorr_edit.text())))
except ValueError:
self.push_text("Bad Freq. correction value entered")
def debug_state(self, state):
for value in self.freqs.values():
value["pocsag_decoder"].set_debug(state == QtCore.Qt.Checked)
def srcsink_state(self, state):
if state == QtCore.Qt.Checked:
self.srcsink.show()
else:
self.srcsink.hide()
def demodsink_state(self, state):
if state == QtCore.Qt.Checked:
self.demodsink.show()
else:
self.demodsink.hide()
class my_top_block(gr.top_block):
def __init__(self, args):
gr.top_block.__init__(self)
self.source = osmosdr.source_c() if not args.input_file else osmosdr.source_c("file=%s,rate=%f,repeat=%s,freq=%f" % (args.input_file, args.samplerate, args.loop, args.centerfreq))
self.source.set_freq_corr(args.freqcorr, 0)
self.source.set_sample_rate(args.samplerate)
self.source.set_center_freq(args.centerfreq, 0)
self.source.set_gain_mode(0, 0)
self.source.set_gain(10, 0)
self.source.set_if_gain(24, 0)
if args.output_file:
self.file_sink = gr.file_sink(gr.sizeof_gr_complex, args.output_file)
self.connect(self.source, self.file_sink)
if args.output_dir:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if __name__ == "__main__":
print BANNER
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', dest='input_file', action='store',
help='read the samples from a file')
parser.add_argument('-l', '--loop', dest='loop', action='store_const',
const='true', default='false', help='when reading from a file, loop the samples')
parser.add_argument('-o', '--output', dest='output_file', action='store',
help='save the samples to a file')
parser.add_argument('-O', '--output-dir-log', dest='output_dir', action='store',
help='save the decoded messages in a log file, one for each frequency')
parser.add_argument('-c', '--freqcorr', dest='freqcorr', action='store',
type=eng_notation.str_to_num, default=INI_FREQ_CORR, help='set the frequency correction (ppm)')
parser.add_argument('-f', '--freq', dest='centerfreq', action='store',
type=eng_notation.str_to_num, default=INI_FREQ, help='set the center frequency')
parser.add_argument('-r', '--samplerate', dest='samplerate', action='store',
type=eng_notation.str_to_num, default=INI_SAMPLERATE, help='set the samplerate')
parser.add_argument('-s', '--symrate', dest='symrate', action='store',
type=int, default=INI_SYMRATE, help='set the symbol rate')
parser.add_argument('-C', '--channelsfile', dest='channels_file', type=file,
help='read an initial channels list from a file')
args = parser.parse_args()
# init the flowgraph and run it
tb = my_top_block(args)
tb.start()
# build and show the UI
qapp = QtGui.QApplication(sys.argv)
main_window = main_window(tb, args)
main_window.show()
# Run rabbit, run!
qapp.exec_()
tb.stop()
|
ckuethe/pocsag-mrt
|
pocsag-mrt.py
|
Python
|
gpl-3.0
| 19,511
|
# Copyright (C) 2016 ycmd contributors.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from ycmd.tests.test_utils import DummyCompleter
from ycmd.user_options_store import DefaultOptions
from nose.tools import eq_
def _FilterAndSortCandidates_Match( candidates, query, expected_matches ):
completer = DummyCompleter( DefaultOptions() )
matches = completer.FilterAndSortCandidates( candidates, query )
eq_( expected_matches, matches )
def FilterAndSortCandidates_OmniCompleter_List_test():
_FilterAndSortCandidates_Match( [ 'password' ],
'p',
[ 'password' ] )
_FilterAndSortCandidates_Match( [ 'words' ],
'w',
[ 'words' ] )
def FilterAndSortCandidates_OmniCompleter_Dictionary_test():
_FilterAndSortCandidates_Match( { 'words': [ 'password' ] },
'p',
[ 'password' ] )
_FilterAndSortCandidates_Match( { 'words': [ { 'word': 'password' } ] },
'p',
[ { 'word': 'password' } ] )
def FilterAndSortCandidates_ServerCompleter_test():
_FilterAndSortCandidates_Match( [ { 'insertion_text': 'password' } ],
'p',
[ { 'insertion_text': 'password' } ] )
|
indianajohn/ycmd
|
ycmd/tests/completer_test.py
|
Python
|
gpl-3.0
| 2,001
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Navigation (back/forward) indicator displayed in the statusbar."""
from qutebrowser.mainwindow.statusbar import textbase
class Backforward(textbase.TextBase):
"""Shows navigation indicator (if you can go backward and/or forward)."""
def __init__(self, parent=None):
super().__init__(parent)
self.enabled = False
def on_tab_cur_url_changed(self, tabs):
"""Called on URL changes."""
tab = tabs.widget.currentWidget()
if tab is None: # pragma: no cover
self.setText('')
self.hide()
return
self.on_tab_changed(tab)
def on_tab_changed(self, tab):
"""Update the text based on the given tab."""
text = ''
if tab.history.can_go_back():
text += '<'
if tab.history.can_go_forward():
text += '>'
if text:
text = '[' + text + ']'
self.setText(text)
self.setVisible(bool(text) and self.enabled)
|
The-Compiler/qutebrowser
|
qutebrowser/mainwindow/statusbar/backforward.py
|
Python
|
gpl-3.0
| 1,784
|
#-*- coding: utf-8 -*-
#
#copyright 2010 Dominik "Socek" Długajczyk
#
#This file is part of Gadu History.
#
#Gadu History is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#Gadu History is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Gadu History; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import curses
from copy import copy
from lib.gui.locals import encode_string
ESCAPES = [ '\xc5', '\xc4', '\xc3' ]
class Text(object):
def __init__(self, y, x, title = None, text = '', only_digits = False, width = 25):
self.x = x
self.y = y
self._title = title
self.text = text
self._running = True
self._win = None
self._cursor = 0
self._low_cursor = 0
self._startpost = 0
self._width = width
self._only_digits = only_digits
self.cursor_end()
self.run()
@property
def text_width(self):
return self._width - 3
def run(self):
gflags = curses.A_BOLD
width = self._width
self._win = curses.newwin( 3, width, self.y, self.x)
self._win.border()
if self._title:
width = self._win.getmaxyx()[1] - 2 # TODO: I don't know why it needs to be "-2" but it does not work otherwise
center = ( width/2 ) - ( len(self._title)/2 ) + 1
self._win.addstr( 0, center, encode_string(self._title), gflags )
self._win.refresh()
self._win2 = curses.newwin( 1, width, self.y+1, self.x+1)
curses.curs_set(1)
while self._running:
self.refresh()
self._win2.keypad(1)
char = self._win2.getch()
self.validator( char )
curses.curs_set(0)
self._win.erase()
self._win.refresh()
def text_length(self):
text = copy( self.text )
for esc in ESCAPES:
text = text.replace( esc, '')
return len( text )
def refresh(self):
width = self.text_width
if self.text_length() < width:
text = self.text
else:
text = self.text[self._startpost: self._startpost+width]
if len( text ) < width:
text = self.text[:width]
self._win2.move( 0, 0 )
self._win2.clrtoeol()
try:
#I don't know what is the source of the problem, so I made workaround
#When someone use "polish" letter in front it prints something strange
if len(text) < 2:
text += ' '
#end of workaround
self._win2.addstr( 0, 0, text, curses.A_BOLD )
except curses.error:
raise RuntimeError( text )
self._win2.move( 0, self._low_cursor )
def cursor_home(self):
self._cursor = 0
self._low_cursor = 0
self._startpost = 0
def cursor_end(self):
self._cursor = self.text_length()
if self.text_length() > self.text_width:
self._low_cursor = self.text_width
else:
self._low_cursor = self.text_length()
self._startpost = self.text_length() - self.text_width
if self._startpost < 0:
self._startpost = 0
def backspace(self):
if self._cursor > 0:
tmp = list( self.text )
listtext = []
last = []
for char in tmp:
if char in ESCAPES:
last = [char]
else:
listtext.append( last + [char] )
last = []
listtext.pop( self._cursor - 1 )
self.text = ''
for char in listtext:
for skladowa in char:
self.text += skladowa
self._cursor -= 1
length = self.text_length()
if length < self.text_width:
self._low_cursor -= 1
else:
self._startpost -= 1
def cursor_right(self):
if self._cursor < self.text_length():
self._cursor += 1
if self._low_cursor < self.text_width:
self._low_cursor += 1
else:
if self._startpost < self.text_length():
self._startpost += 1
def cursor_left(self):
if self._cursor > 0:
self._cursor -= 1
if self._low_cursor > 0:
self._low_cursor -= 1
else:
if self._startpost > 0:
self._startpost -= 1
def validator(self, var):
#raise RuntimeError( var )
#print var
if var == 10:
self._running = False
return False
elif var == 263 or var == 127:
self.backspace()
return False
elif var == 260: # Cursor left
self.cursor_left()
return False
elif var == 261: # Cursor right
self.cursor_right()
return False
elif var == 262: # home
self.cursor_home()
return False
elif var == 360:
self.cursor_end()
return False
elif var == 274:
self._running = False
self.text = None
return False
elif var > 250:
return False
#raise RuntimeError( var )
if self._only_digits and not var in [ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 ]:
return False
# We end up here if there was no other action
if self._cursor >= self.text_length():
self.text += chr( var )
else:
listtext = list( self.text )
listtext[ self._cursor ] = chr( var )
self.text = ''.join( listtext )
self.cursor_right()
return True
class ROText(object):
def __init__(self, y, x, text, title = None ):
self._x = x
self._y = y
self._title = title
self.text = text
self.refresh()
def refresh(self):
width = self.text_length() + 2
gflags = curses.A_BOLD
self._win = curses.newwin( 3, width, self._y, self._x)
self._win.border()
swidth = self._win.getmaxyx()[1] - 2 # TODO: I don't know why it needs to be "-2" but it does not work otherwise
if self._title:
center = ( swidth/2 ) - ( len(self._title)/2 ) + 1
self._win.addstr( 0, center, encode_string(self._title), gflags )
self._win.refresh()
self._win2 = curses.newpad( 1, swidth + 1)
try:
self._win2.addstr( 0, 0, encode_string(self.text), curses.A_BOLD )
except curses.error, er:
raise RuntimeError( self.text )
self._win2.refresh( 0, 0, self._y+1, self._x+1, self._y+2, self._x+swidth)
def clear(self):
self._win.clear()
self._win.refresh()
def run(self):
self._win.getch()
def text_length(self):
text = copy( self.text )
#for esc in ESCAPES:
# text = text.replace( esc, '')
return len( text )
|
socek/Gadu-History
|
lib/gui/text.py
|
Python
|
gpl-3.0
| 7,499
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Signing, verification and key support for MAR files."""
from construct import Int32ub
from construct import Int64ub
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import utils
from mardor.format import mar
from mardor.format import sigs_header
from mardor.utils import file_iter
_hash_algorithms = {
'sha1': hashes.SHA1(),
'sha384': hashes.SHA384(),
}
def get_publickey(keydata):
"""Load the public key from a PEM encoded string."""
try:
key = serialization.load_pem_public_key(
keydata,
backend=default_backend(),
)
return key
except ValueError:
key = serialization.load_pem_private_key(
keydata,
password=None,
backend=default_backend(),
)
key = key.public_key()
return key
def get_privatekey(keydata):
"""Load the private key from a PEM encoded string."""
key = serialization.load_pem_private_key(
keydata,
password=None,
backend=default_backend(),
)
return key
def get_keysize(keydata):
"""Return the key size of a public key."""
key = get_publickey(keydata)
return key.key_size
def get_signature_data(fileobj, filesize):
"""Read data from MAR file that is required for MAR signatures.
Args:
fileboj (file-like object): file-like object to read the MAR data from
filesize (int): the total size of the file
Yields:
blocks of bytes representing the data required to generate or validate
signatures.
"""
# Read everything except the signature entries
# The first 8 bytes are covered, as is everything from the beginning
# of the additional section to the end of the file. The signature
# algorithm id and size fields are also covered.
fileobj.seek(0)
marfile = mar.parse_stream(fileobj)
if not marfile.signatures:
raise IOError("Can't generate signature data for file without signature blocks")
# MAR header
fileobj.seek(0)
block = fileobj.read(8)
yield block
# Signatures header
sigs = sigs_header.parse_stream(fileobj)
sig_types = [(sig.algorithm_id, sig.size) for sig in sigs.sigs]
block = Int64ub.build(filesize) + Int32ub.build(sigs.count)
yield block
# Signature algorithm id and size per entry
for algorithm_id, size in sig_types:
block = Int32ub.build(algorithm_id) + Int32ub.build(size)
yield block
# Everything else in the file is covered
for block in file_iter(fileobj):
yield block
def make_hasher(algorithm_id):
"""Create a hashing object for the given signing algorithm."""
if algorithm_id == 1:
return hashes.Hash(hashes.SHA1(), default_backend())
elif algorithm_id == 2:
return hashes.Hash(hashes.SHA384(), default_backend())
else:
raise ValueError("Unsupported signing algorithm: %s" % algorithm_id)
def sign_hash(private_key, hash, hash_algo):
"""Sign the given hash with the given private key.
Args:
private_key (str): PEM enoded private key
hash (byte str): hash to sign
hash_algo (str): name of hash algorithm used
Returns:
byte string representing the signature
"""
hash_algo = _hash_algorithms[hash_algo]
return get_privatekey(private_key).sign(
hash,
padding.PKCS1v15(),
utils.Prehashed(hash_algo),
)
def verify_signature(public_key, signature, hash, hash_algo):
"""Verify the given signature is correct for the given hash and public key.
Args:
public_key (str): PEM encoded public key
signature (bytes): signature to verify
hash (bytes): hash of data
hash_algo (str): hash algorithm used
Returns:
True if the signature is valid, False otherwise
"""
hash_algo = _hash_algorithms[hash_algo]
try:
return get_publickey(public_key).verify(
signature,
hash,
padding.PKCS1v15(),
utils.Prehashed(hash_algo),
) is None
except InvalidSignature:
return False
def make_rsa_keypair(bits):
"""Generate an RSA keypair.
Args:
bits (int): number of bits to use for the key.
Returns:
(private_key, public_key) - both as PEM encoded strings
"""
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=bits,
backend=default_backend(),
)
private_pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
public_pem = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
return private_pem, public_pem
def make_dummy_signature(algorithm_id):
"""Return dummy signatures of the appropriate length.
Args:
algorithm_id (int): algorithm id for signatures. 1 is for 'sha1', 2 is
for 'sha384'
Returns:
a byte string
"""
if algorithm_id == 1:
return b'\x00' * 256
elif algorithm_id == 2:
return b'\x00' * 512
else:
raise ValueError("Invalid algorithm id: %s" % algorithm_id)
def format_hash(digest, hash_algo):
"""Format a hash as an ASN1 DigestInfo byte string.
Args:
digest (bytes): hash digest
hash_algo (str): hash algorithm used, e.g. 'sha384'
Returns:
Byte string of ASN1 encoded digest info
"""
prefixes = {
'sha1': b'\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14',
'sha384': b'\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30',
}
return prefixes[hash_algo] + digest
|
mozilla/build-mar
|
src/mardor/signing.py
|
Python
|
mpl-2.0
| 6,416
|
#!/usr/bin/python
import os
from test_action_runner import TestActionRunner
def main():
print 'This is example!'
for var in ['TEST_PRE_VAR', 'TEST_VAR', 'TEST_POST_VAR', 'TEAM']:
if var in os.environ:
print '[%s] is [%s]' % (var, os.environ[var])
else:
print 'No [%s]' % var
runner = TestActionRunner()
runner.do_test_pre().do_test().do_test_post()
runner.run()
for var in ['TEST_PRE_VAR', 'TEST_VAR', 'TEST_POST_VAR', 'TEAM']:
if var in os.environ:
print '[%s] is [%s]' % (var, os.environ[var])
else:
print 'No [%s]' % var
if __name__ == '__main__':
main()
|
zapion/combo-runner
|
examples/test/run_test.py
|
Python
|
mpl-2.0
| 673
|
""" Provide connection with Marketplace API
"""
import json
import logging
import time
import urllib
import oauth2 as oauth
import requests
log = logging.getLogger('marketplace.%s' % __name__)
class NotExpectedStatusCode(requests.exceptions.HTTPError):
""" Raise if status code returned from API is not the expected one
"""
pass
def _get_args(consumer):
"""Provide a dict with oauth data
"""
return dict(
oauth_consumer_key=consumer.key,
oauth_nonce=oauth.generate_nonce(),
oauth_signature_method='HMAC-SHA1',
oauth_timestamp=int(time.time()),
oauth_version='1.0')
class Connection:
""" Keeps the consumer class and provides the way to connect to the
Marketplace API
"""
signature_method = oauth.SignatureMethod_HMAC_SHA1()
consumer = None
def __init__(self, consumer_key, consumer_secret):
self.set_consumer(consumer_key, consumer_secret)
def set_consumer(self, consumer_key, consumer_secret):
"""Sets the consumer attribute
"""
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
def prepare_request(self, method, url, body=''):
"""Adds consumer and signs the request
:returns: headers of the signed request
"""
req = oauth.Request(method=method, url=url,
parameters=_get_args(self.consumer))
req.sign_request(self.signature_method, self.consumer, None)
headers = req.to_header()
headers['Content-type'] = 'application/json'
if body:
if method == 'GET':
body = urllib.urlencode(body)
else:
body = json.dumps(body)
return {"headers": headers, "data": body}
@staticmethod
def _get_error_reason(response):
"""Extract error reason from the response. It might be either
the 'reason' or the entire response
"""
body = response.json
if body and 'reason' in body:
return body['reason']
return response.content
def fetch(self, method, url, data=None, expected_status_code=None):
"""Prepare the headers, encode data, call API and provide
data it returns
"""
kwargs = self.prepare_request(method, url, data)
response = getattr(requests, method.lower())(url, **kwargs)
log.debug(str(response.__dict__))
if response.status_code >= 400:
response.raise_for_status()
if (expected_status_code
and response.status_code != expected_status_code):
raise NotExpectedStatusCode(self._get_error_reason(response))
return response
def fetch_json(self, method, url, data=None, expected_status_code=None):
"""Return json decoded data from fetch
"""
return self.fetch(method, url, data, expected_status_code).json()
|
bobsilverberg/Marketplace.Python
|
marketplace/connection.py
|
Python
|
mpl-2.0
| 2,912
|
"""
Tests for Blocks Views
"""
import json
import ddt
from django.test import RequestFactory, TestCase
from django.core.urlresolvers import reverse
import httpretty
from student.tests.factories import UserFactory
from third_party_auth.tests.utils import ThirdPartyOAuthTestMixin, ThirdPartyOAuthTestMixinGoogle
from .. import adapters
from .. import views
from .constants import DUMMY_REDIRECT_URL
class _DispatchingViewTestCase(TestCase):
"""
Base class for tests that exercise DispatchingViews.
"""
dop_adapter = adapters.DOPAdapter()
dot_adapter = adapters.DOTAdapter()
view_class = None
url = None
def setUp(self):
super(_DispatchingViewTestCase, self).setUp()
self.user = UserFactory()
self.dot_app = self.dot_adapter.create_public_client(
name='test dot application',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dot-app-client-id',
)
self.dop_client = self.dop_adapter.create_public_client(
name='test dop client',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dop-app-client-id',
)
def _post_request(self, user, client):
"""
Call the view with a POST request objectwith the appropriate format,
returning the response object.
"""
return self.client.post(self.url, self._post_body(user, client))
def _post_body(self, user, client):
"""
Return a dictionary to be used as the body of the POST request
"""
raise NotImplementedError()
@ddt.ddt
class TestAccessTokenView(_DispatchingViewTestCase):
"""
Test class for AccessTokenView
"""
view_class = views.AccessTokenView
url = reverse('access_token')
def _post_body(self, user, client):
"""
Return a dictionary to be used as the body of the POST request
"""
return {
'client_id': client.client_id,
'grant_type': 'password',
'username': user.username,
'password': 'test',
}
@ddt.data('dop_client', 'dot_app')
def test_access_token_fields(self, client_attr):
client = getattr(self, client_attr)
response = self._post_request(self.user, client)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('access_token', data)
self.assertIn('expires_in', data)
self.assertIn('scope', data)
self.assertIn('token_type', data)
def test_dot_access_token_provides_refresh_token(self):
response = self._post_request(self.user, self.dot_app)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertIn('refresh_token', data)
def test_dop_public_client_access_token(self):
response = self._post_request(self.user, self.dop_client)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotIn('refresh_token', data)
@ddt.ddt
@httpretty.activate
class TestAccessTokenExchangeView(ThirdPartyOAuthTestMixinGoogle, ThirdPartyOAuthTestMixin, _DispatchingViewTestCase):
"""
Test class for AccessTokenExchangeView
"""
view_class = views.AccessTokenExchangeView
url = reverse('exchange_access_token', kwargs={'backend': 'google-oauth2'})
def _post_body(self, user, client):
return {
'client_id': client.client_id,
'access_token': self.access_token,
}
@ddt.data('dop_client', 'dot_app')
def test_access_token_exchange_calls_dispatched_view(self, client_attr):
client = getattr(self, client_attr)
self.oauth_client = client
self._setup_provider_response(success=True)
response = self._post_request(self.user, client)
self.assertEqual(response.status_code, 200)
@ddt.ddt
class TestAuthorizationView(TestCase):
"""
Test class for AuthorizationView
"""
dop_adapter = adapters.DOPAdapter()
def setUp(self):
super(TestAuthorizationView, self).setUp()
self.user = UserFactory()
self.dop_client = self._create_confidential_client(user=self.user, client_id='dop-app-client-id')
def _create_confidential_client(self, user, client_id):
"""
Create a confidential client suitable for testing purposes.
"""
return self.dop_adapter.create_confidential_client(
name='test_app',
user=user,
client_id=client_id,
redirect_uri=DUMMY_REDIRECT_URL
)
def test_authorization_view(self):
self.client.login(username=self.user.username, password='test')
response = self.client.post(
'/oauth2/authorize/',
{
'client_id': self.dop_client.client_id, # TODO: DOT is not yet supported (MA-2124)
'response_type': 'code',
'state': 'random_state_string',
'redirect_uri': DUMMY_REDIRECT_URL,
},
follow=True,
)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
context = response.context # pylint: disable=no-member
self.assertIn('form', context)
self.assertIsNone(context['form']['authorize'].value())
self.assertIn('oauth_data', context)
oauth_data = context['oauth_data']
self.assertEqual(oauth_data['redirect_uri'], DUMMY_REDIRECT_URL)
self.assertEqual(oauth_data['state'], 'random_state_string')
class TestViewDispatch(TestCase):
"""
Test that the DispatchingView dispatches the right way.
"""
dop_adapter = adapters.DOPAdapter()
dot_adapter = adapters.DOTAdapter()
def setUp(self):
super(TestViewDispatch, self).setUp()
self.user = UserFactory()
self.view = views._DispatchingView() # pylint: disable=protected-access
self.dop_adapter.create_public_client(
name='',
user=self.user,
client_id='dop-id',
redirect_uri=DUMMY_REDIRECT_URL
)
self.dot_adapter.create_public_client(
name='',
user=self.user,
client_id='dot-id',
redirect_uri=DUMMY_REDIRECT_URL
)
def assert_is_view(self, view_candidate):
"""
Assert that a given object is a view. That is, it is callable, and
takes a request argument. Note: while technically, the request argument
could take any name, this assertion requires the argument to be named
`request`. This is good practice. You should do it anyway.
"""
_msg_base = u'{view} is not a view: {reason}'
msg_not_callable = _msg_base.format(view=view_candidate, reason=u'it is not callable')
msg_no_request = _msg_base.format(view=view_candidate, reason=u'it has no request argument')
self.assertTrue(hasattr(view_candidate, '__call__'), msg_not_callable)
args = view_candidate.func_code.co_varnames
self.assertTrue(args, msg_no_request)
self.assertEqual(args[0], 'request')
def _get_request(self, client_id):
"""
Return a request with the specified client_id in the body
"""
return RequestFactory().post('/', {'client_id': client_id})
def test_dispatching_to_dot(self):
request = self._get_request('dot-id')
self.assertEqual(self.view.select_backend(request), self.dot_adapter.backend)
def test_dispatching_to_dop(self):
request = self._get_request('dop-id')
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_dispatching_with_no_client(self):
request = self._get_request(None)
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_dispatching_with_invalid_client(self):
request = self._get_request('abcesdfljh')
self.assertEqual(self.view.select_backend(request), self.dop_adapter.backend)
def test_get_view_for_dot(self):
view_object = views.AccessTokenView()
self.assert_is_view(view_object.get_view_for_backend(self.dot_adapter.backend))
def test_get_view_for_dop(self):
view_object = views.AccessTokenView()
self.assert_is_view(view_object.get_view_for_backend(self.dop_adapter.backend))
def test_get_view_for_no_backend(self):
view_object = views.AccessTokenView()
self.assertRaises(KeyError, view_object.get_view_for_backend, None)
|
devs1991/test_edx_docmode
|
lms/djangoapps/oauth_dispatch/tests/test_views.py
|
Python
|
agpl-3.0
| 8,685
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
from django.core.exceptions import ImproperlyConfigured
from shuup.utils.setup import Setup
from . import base_settings
def configure(setup):
base_settings.configure(setup)
local_settings_file = os.getenv('LOCAL_SETTINGS_FILE')
# Backward compatibility: Find from current directory, if
# LOCAL_SETTINGS_FILE environment variables is unset
if local_settings_file is None:
cand = os.path.join(os.path.dirname(__file__), 'local_settings.py')
if os.path.exists(cand):
local_settings_file = cand
# Load local settings from file
if local_settings_file:
local_settings_ns = {
'__file__': local_settings_file,
}
with open(local_settings_file, 'rb') as fp:
compiled = compile(fp.read(), local_settings_file, 'exec')
exec(compiled, local_settings_ns)
if 'configure' not in local_settings_ns:
raise ImproperlyConfigured('No configure in local_settings')
local_configure = local_settings_ns['configure']
local_configure(setup)
return setup
globals().update(Setup.configure(configure))
|
hrayr-artunyan/shuup
|
shuup_workbench/settings/__init__.py
|
Python
|
agpl-3.0
| 1,393
|
# -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{
'name': 'Partner Projects',
"version": "2.0",
'category': 'Project Management',
'summary': 'Quickly view all projects from a partner',
'description':
"Add a button to the partners view to see its related projects.",
'author': 'Grupo ESOC',
'website': 'http://www.grupoesoc.es',
'license': 'AGPL-3',
"installable": True,
"depends" : [
"analytic",
"project",
],
"data" : [
'view/partner.xml',
],
}
|
Antiun/odoo-grupoesoc-addons
|
partner_projects/__openerp__.py
|
Python
|
agpl-3.0
| 1,264
|
# -*- coding: utf-8 -*-
##########################################################################
# #
# Copyright 2015 Lorenzo Battistini - Agile Business Group #
# About license, see __openerp__.py #
# #
##########################################################################
from . import models
|
Eficent/manufacture-reporting
|
mrp_repair_layout/__init__.py
|
Python
|
agpl-3.0
| 496
|
"""DuplicateAnalysis uses Analysis as it's base. Until that's fixed there
is some confusion.
"""
from openerp import fields, models
from base_olims_model import BaseOLiMSModel
from fields.string_field import StringField
from fields.widget.widget import StringWidget
from fields.boolean_field import BooleanField
from openerp.tools.translate import _
DUPLICATE_ANALYSIS_STAES = (
('unassigned','Unassigned'),
('assigned','Assigned'),
('attachment_due','Attachment Outstanding'),
('to_be_verified','To be verified'),
('verified','Verified'),
('rejected','Rejected'),
)
schema = (
fields.Many2one(string='Analysis',
comodel_name='olims.analysis',
required=True,
),
fields.Many2many(string='InterimFields', comodel_name='olims.interimfield'),
StringField(
'Result',
),
StringField(
'ResultDM',
),
BooleanField(
'Retested',
),
# ~~~~~~~ To be implemented ~~~~~~~
# fields.One2many(string='Attachment',
# comodel_name='olims.analysis',
# # multiValued=1,
# # allowed_types=('Attachment',),
# # referenceClass=HoldingReference,
# # relationship='DuplicateAnalysisAttachment',
# ),
StringField(
'Analyst',
),
fields.Many2one(string='Instrument',
comodel_name='olims.instrument',
requied=False,
),
# ~~~~~~~ To be implemented ~~~~~~~
# ComputedField(
# 'SamplePartition',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getSamplePartition()',
# ),
# ComputedField(
# 'ClientOrderNumber',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getClientOrderNumber()',
# ),
# ComputedField(
# 'Service',
# expression='context.getAnalysis() and context.getAnalysis().getService() or ""',
# ),
# ComputedField(
# 'ServiceUID',
# expression='context.getAnalysis() and context.getAnalysis().getServiceUID()',
# ),
# ComputedField(
# 'CategoryUID',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getCategoryUID()',
# ),
# ComputedField(
# 'Calculation',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getCalculation()',
# ),
# ComputedField(
# 'ReportDryMatter',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getReportDryMatter()',
# ),
# ComputedField(
# 'DateReceived',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getDateReceived()',
# ),
# ComputedField(
# 'MaxTimeAllowed',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getMaxTimeAllowed()',
# ),
# ComputedField(
# 'DueDate',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getDueDate()',
# ),
# ComputedField(
# 'Duration',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getDuration()',
# ),
# ComputedField(
# 'Earliness',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getEarliness()',
# ),
# ComputedField(
# 'ClientUID',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getClientUID()',
# ),
# ComputedField(
# 'RequestID',
# expression='context.getAnalysis() and context.getAnalysis().aq_parent.portal_type=="AnalysisRequest" and context.getAnalysis().getRequestID() or ""',
# ),
# ComputedField(
# 'PointOfCapture',
# expression='context.getAnalysis() and context.getAnalysis().getPointOfCapture()',
# ),
StringField(
'ReferenceAnalysesGroupID',
widget=StringWidget(
label=_("ReferenceAnalysesGroupID"),
visible=False,
),
),
fields.Selection(string='state',
selection=DUPLICATE_ANALYSIS_STAES,
default='unassigned',
select=True,
required=True, readonly=True,
copy=False, track_visibility='always'
),
# ~~~~~~~ To be implemented ~~~~~~~
# ComputedField(
# 'Keyword',
# expression="context.getAnalysis().getKeyword()",
# ),
)
class DuplicateAnalysis(models.Model, BaseOLiMSModel): #Analysis
_name='olims.duplicate_analysis'
def workflow_script_submit(self):
return ""
def workflow_script_attach(self):
return ""
def workflow_script_retract(self):
return ""
def workflow_script_verify(self,cr,uid,ids,context=None):
self.write(cr, uid,
ids, {'state': 'verified'},
context=context)
return True
def workflow_script_assign(self,cr,uid,ids,context=None):
self.write(cr, uid,
ids, {'state': 'assigned'},
context=context)
return True
def workflow_script_unassign(self,cr,uid,ids,context=None):
self.write(cr, uid,
ids, {'state': 'assigned'},
context=context)
return True
def workflow_script_attachment_due(self,cr,uid,ids,context=None):
self.write(cr, uid,
ids, {'state': 'attachment_due'},
context=context)
return True
def workflow_script_to_be_verified(self,cr,uid,ids,context=None):
self.write(cr, uid,
ids, {'state': 'to_be_verified'},
context=context)
return True
def workflow_script_rejected(self,cr,uid,ids,context=None):
self.write(cr, uid,
ids, {'state': 'rejected'},
context=context)
return True
DuplicateAnalysis.initialze(schema)
|
yasir1brahim/OLiMS
|
models/duplicateanalysis.py
|
Python
|
agpl-3.0
| 6,784
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import fields, osv
class status_perusahaan(osv.osv):
_name = "base.status_perusahaan"
_description = "Status Perusahaan"
_columns = {
'name': fields.char(
string='Status Perusahaan',
size=100,
required=True,
),
'kode': fields.char(
string='Kode',
size=30,
),
'keterangan': fields.text(
string='Keterangan',
),
'active': fields.boolean(
string='Aktif',
),
}
|
andhit-r/koi_base
|
ar_base_perusahaan/object_module/status_perusahaan.py
|
Python
|
agpl-3.0
| 1,459
|
import json
from graphviz import Digraph
with open('events.json') as events_file:
events = json.load(events_file)
handlers = events["handlers"]
flows = events["flows"]
publishable = events["publishable"]
missions = events["missions"]
process_conclusion = events["process_conclusion"]
def is_publishable(name):
return name in publishable
def node_event(g, name):
color = 'lightblue4' if is_publishable(name) else 'lightblue2'
g.node(name, shape='box', color=color, style='filled')
def node_handler(g, name):
g.node(name, color='cornsilk', style='filled')
def node_flow(g, name):
g.node(name, color='khaki', style='filled')
def node_step(g, name):
g.node(name, color='darkolivegreen3', style='filled')
def handler_graph(g):
for entry in handlers:
handler = entry + ' Handler'
for recv in handlers[entry]['receives']:
node_event(g, recv)
node_handler(g, handler)
g.edge(recv, handler, label='handled by')
for emit in handlers[entry]['emits']:
node_event(g, emit)
node_handler(g, handler)
g.edge(handler, emit, label='emits')
top = "On Process Completion"
for processed in process_conclusion:
node_event(g, processed)
node_handler(g, top)
g.edge(top, processed, label='emits')
g.render()
def flow_graph(g):
for entry in flows:
flow = entry + ' Flow'
for emit in flows[entry]:
node_event(g, emit)
node_flow(g, flow)
g.edge(flow, emit, label='emits')
g.render()
def mission_graph(g):
for mission in missions:
with g.subgraph(name='cluster_' + mission) as gm:
gm.attr(label=mission + ' Mission', color='red')
for step in missions[mission]['steps']:
step_data = missions[mission]['steps'][step]
step = step + ' Step'
for filtered in step_data['filters']:
node_event(gm, filtered)
node_step(gm, step)
gm.edge(step, filtered, label='filters')
for emit in step_data['emits']:
node_event(gm, emit)
node_step(gm, step)
gm.edge(step, emit, label='emits')
g.render()
g1 = Digraph('events_handler', filename='graphs/events_handler.dot')
g2 = Digraph('events_flow', filename='graphs/events_flow.dot')
g3 = Digraph('events_missions', filename='graphs/events_missions.dot')
g1.attr(rankdir='LR')
g2.attr(rankdir='LR')
g3.attr(rankdir='LR')
handler_graph(g1)
flow_graph(g2)
mission_graph(g3)
|
renatomassaro/Helix
|
graph.py
|
Python
|
agpl-3.0
| 2,645
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.dispatch import Signal
element_detached = Signal(providing_args=['path_detached'])
|
uclouvain/OSIS-Louvain
|
program_management/publisher.py
|
Python
|
agpl-3.0
| 1,375
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date
def string2date(string_tanggal):
tanggal = int(string_tanggal[8:10])
bulan = int(string_tanggal[5:7])
tahun = int(string_tanggal[0:4])
return date(tahun, bulan, tanggal)
def cari_tanggal(tanggal, skip):
a = string2date(tanggal)
b = a.toordinal() + skip
c = date.fromordinal(b)
return c.strftime('%Y-%m-%d')
def jumlah_hari(bulan, tahun):
if bulan in [1, 3, 5, 7, 8, 10, 12]:
return 31
elif bulan in [4, 6, 9, 11]:
return 30
else:
if float(tahun) / 4.0 == float(tahun/4):
return 29
else:
return 28
def bulan_selanjutnya(bulan, tahun, skip):
if bulan + skip > 12:
return [12 - bulan + skip, tahun+1]
else:
return [bulan + skip, tahun]
def cek_tanggal_valid(bulan, tahun, tanggal):
hari = jumlah_hari(bulan, tahun)
if tanggal > hari:
return hari
else:
return tanggal
def cari_tanggal_selanjutnya(string_tanggal, skip_tanggal, skip_bulan):
bulan = int(string_tanggal[5:7])
tahun = int(string_tanggal[0:4])
bulan1, tahun1 = bulan_selanjutnya(bulan, tahun, skip_bulan)
tanggal1 = cek_tanggal_valid(bulan1, tahun1, skip_tanggal)
a = date(tahun1, bulan1, tanggal1)
return a.strftime('%Y-%m-%d')
|
kawula-openerp-indonesia/koi_base
|
ar_base_waktu/date_tools.py
|
Python
|
agpl-3.0
| 2,281
|
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import signing
from django.utils.translation import ugettext as _
import datetime
from taiga.base.mails import mail_builder
from taiga.base import exceptions as exc
def request_project_transfer(project, user):
template = mail_builder.transfer_request
email = template(project.owner, {"project": project, "requester": user})
email.send()
def start_project_transfer(project, user, reason):
"""Generates the transfer token for a project transfer and notify to the destination user
:param project: Project trying to transfer
:param user: Destination user
:param reason: Reason to transfer the project
"""
signer = signing.TimestampSigner()
token = signer.sign(user.id)
project.transfer_token = token
project.save()
template = mail_builder.transfer_start
context = {
"project": project,
"receiver": user,
"token": token,
"reason": reason
}
email = template(user, context)
email.send()
def validate_project_transfer_token(token, project, user):
signer = signing.TimestampSigner()
if project.transfer_token != token:
raise exc.WrongArguments(_("Token is invalid"))
try:
value = signer.unsign(token, max_age=datetime.timedelta(days=7))
except signing.SignatureExpired:
raise exc.WrongArguments(_("Token has expired"))
except signing.BadSignature:
raise exc.WrongArguments(_("Token is invalid"))
if str(value) != str(user.id):
raise exc.WrongArguments(_("Token is invalid"))
def reject_project_transfer(project, user, token, reason):
validate_project_transfer_token(token, project, user)
project.transfer_token = None
project.save()
template = mail_builder.transfer_reject
context = {
"project": project,
"rejecter": user,
"reason": reason
}
email = template(project.owner, context)
email.send()
def accept_project_transfer(project, user, token, reason):
validate_project_transfer_token(token, project, user)
# Set new owner as project admin
membership = project.memberships.get(user=user)
if not membership.is_admin:
membership.is_admin = True
membership.save()
# Change the owner of the project
old_owner = project.owner
project.transfer_token = None
project.owner = user
project.save()
# Send mail
template = mail_builder.transfer_accept
context = {
"project": project,
"old_owner": old_owner,
"new_owner": user,
"reason": reason
}
email = template(old_owner, context)
email.send()
|
Rademade/taiga-back
|
taiga/projects/services/transfer.py
|
Python
|
agpl-3.0
| 3,556
|
"""
Tests for the Shopping Cart Models
"""
from factory import DjangoModelFactory
from mock import patch
from django.core import mail
from django.conf import settings
from django.db import DatabaseError
from django.test import TestCase
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from shoppingcart.models import Order, OrderItem, CertificateItem, InvalidCartItem, PaidCourseRegistration
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from course_modes.models import CourseMode
from shoppingcart.exceptions import PurchasedCallbackException
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class OrderTest(ModuleStoreTestCase):
def setUp(self):
self.user = UserFactory.create()
self.course_id = "org/test/Test_Course"
CourseFactory.create(org='org', number='test', display_name='Test Course')
for i in xrange(1, 5):
CourseFactory.create(org='org', number='test', display_name='Test Course {0}'.format(i))
self.cost = 40
def test_get_cart_for_user(self):
# create a cart
cart = Order.get_cart_for_user(user=self.user)
# add something to it
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor')
# should return the same cart
cart2 = Order.get_cart_for_user(user=self.user)
self.assertEquals(cart2.orderitem_set.count(), 1)
def test_cart_clear(self):
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor')
CertificateItem.add_to_order(cart, 'org/test/Test_Course_1', self.cost, 'honor')
self.assertEquals(cart.orderitem_set.count(), 2)
cart.clear()
self.assertEquals(cart.orderitem_set.count(), 0)
def test_add_item_to_cart_currency_match(self):
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor', currency='eur')
# verify that a new item has been added
self.assertEquals(cart.orderitem_set.count(), 1)
# verify that the cart's currency was updated
self.assertEquals(cart.currency, 'eur')
with self.assertRaises(InvalidCartItem):
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor', currency='usd')
# assert that this item did not get added to the cart
self.assertEquals(cart.orderitem_set.count(), 1)
def test_total_cost(self):
cart = Order.get_cart_for_user(user=self.user)
# add items to the order
course_costs = [('org/test/Test_Course_1', 30),
('org/test/Test_Course_2', 40),
('org/test/Test_Course_3', 10),
('org/test/Test_Course_4', 20)]
for course, cost in course_costs:
CertificateItem.add_to_order(cart, course, cost, 'honor')
self.assertEquals(cart.orderitem_set.count(), len(course_costs))
self.assertEquals(cart.total_cost, sum(cost for _course, cost in course_costs))
def test_purchase(self):
# This test is for testing the subclassing functionality of OrderItem, but in
# order to do this, we end up testing the specific functionality of
# CertificateItem, which is not quite good unit test form. Sorry.
cart = Order.get_cart_for_user(user=self.user)
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_id))
item = CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor')
# course enrollment object should be created but still inactive
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_id))
cart.purchase()
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_id))
# test e-mail sending
self.assertEquals(len(mail.outbox), 1)
self.assertEquals('Order Payment Confirmation', mail.outbox[0].subject)
self.assertIn(settings.PAYMENT_SUPPORT_EMAIL, mail.outbox[0].body)
self.assertIn(unicode(cart.total_cost), mail.outbox[0].body)
self.assertIn(item.additional_instruction_text, mail.outbox[0].body)
def test_purchase_item_failure(self):
# once again, we're testing against the specific implementation of
# CertificateItem
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor')
with patch('shoppingcart.models.CertificateItem.save', side_effect=DatabaseError):
with self.assertRaises(DatabaseError):
cart.purchase()
# verify that we rolled back the entire transaction
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_id))
# verify that e-mail wasn't sent
self.assertEquals(len(mail.outbox), 0)
def test_purchase_twice(self):
cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor')
# purchase the cart more than once
cart.purchase()
cart.purchase()
self.assertEquals(len(mail.outbox), 1)
def purchase_with_data(self, cart):
""" purchase a cart with billing information """
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor')
cart.purchase(
first='John',
last='Smith',
street1='11 Cambridge Center',
street2='Suite 101',
city='Cambridge',
state='MA',
postalcode='02412',
country='US',
ccnum='1111',
cardtype='001',
)
@patch.dict(settings.MITX_FEATURES, {'STORE_BILLING_INFO': True})
def test_billing_info_storage_on(self):
cart = Order.get_cart_for_user(self.user)
self.purchase_with_data(cart)
self.assertNotEqual(cart.bill_to_first, '')
self.assertNotEqual(cart.bill_to_last, '')
self.assertNotEqual(cart.bill_to_street1, '')
self.assertNotEqual(cart.bill_to_street2, '')
self.assertNotEqual(cart.bill_to_postalcode, '')
self.assertNotEqual(cart.bill_to_ccnum, '')
self.assertNotEqual(cart.bill_to_cardtype, '')
self.assertNotEqual(cart.bill_to_city, '')
self.assertNotEqual(cart.bill_to_state, '')
self.assertNotEqual(cart.bill_to_country, '')
@patch.dict(settings.MITX_FEATURES, {'STORE_BILLING_INFO': False})
def test_billing_info_storage_off(self):
cart = Order.get_cart_for_user(self.user)
self.purchase_with_data(cart)
self.assertNotEqual(cart.bill_to_first, '')
self.assertNotEqual(cart.bill_to_last, '')
self.assertNotEqual(cart.bill_to_city, '')
self.assertNotEqual(cart.bill_to_state, '')
self.assertNotEqual(cart.bill_to_country, '')
self.assertNotEqual(cart.bill_to_postalcode, '')
# things we expect to be missing when the feature is off
self.assertEqual(cart.bill_to_street1, '')
self.assertEqual(cart.bill_to_street2, '')
self.assertEqual(cart.bill_to_ccnum, '')
self.assertEqual(cart.bill_to_cardtype, '')
class OrderItemTest(TestCase):
def setUp(self):
self.user = UserFactory.create()
def test_orderItem_purchased_callback(self):
"""
This tests that calling purchased_callback on the base OrderItem class raises NotImplementedError
"""
item = OrderItem(user=self.user, order=Order.get_cart_for_user(self.user))
with self.assertRaises(NotImplementedError):
item.purchased_callback()
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class PaidCourseRegistrationTest(ModuleStoreTestCase):
def setUp(self):
self.user = UserFactory.create()
self.course_id = "MITx/999/Robot_Super_Course"
self.cost = 40
self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
self.course_mode = CourseMode(course_id=self.course_id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
self.cart = Order.get_cart_for_user(self.user)
def test_add_to_order(self):
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_id)
self.assertEqual(reg1.unit_cost, self.cost)
self.assertEqual(reg1.line_cost, self.cost)
self.assertEqual(reg1.unit_cost, self.course_mode.min_price)
self.assertEqual(reg1.mode, "honor")
self.assertEqual(reg1.user, self.user)
self.assertEqual(reg1.status, "cart")
self.assertTrue(PaidCourseRegistration.part_of_order(self.cart, self.course_id))
self.assertFalse(PaidCourseRegistration.part_of_order(self.cart, self.course_id + "abcd"))
self.assertEqual(self.cart.total_cost, self.cost)
def test_add_with_default_mode(self):
"""
Tests add_to_cart where the mode specified in the argument is NOT in the database
and NOT the default "honor". In this case it just adds the user in the CourseMode.DEFAULT_MODE, 0 price
"""
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_id, mode_slug="DNE")
self.assertEqual(reg1.unit_cost, 0)
self.assertEqual(reg1.line_cost, 0)
self.assertEqual(reg1.mode, "honor")
self.assertEqual(reg1.user, self.user)
self.assertEqual(reg1.status, "cart")
self.assertEqual(self.cart.total_cost, 0)
self.assertTrue(PaidCourseRegistration.part_of_order(self.cart, self.course_id))
def test_purchased_callback(self):
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_id)
self.cart.purchase()
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_id))
reg1 = PaidCourseRegistration.objects.get(id=reg1.id) # reload from DB to get side-effect
self.assertEqual(reg1.status, "purchased")
def test_purchased_callback_exception(self):
reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_id)
reg1.course_id = "changedforsomereason"
reg1.save()
with self.assertRaises(PurchasedCallbackException):
reg1.purchased_callback()
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_id))
reg1.course_id = "abc/efg/hij"
reg1.save()
with self.assertRaises(PurchasedCallbackException):
reg1.purchased_callback()
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course_id))
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class CertificateItemTest(ModuleStoreTestCase):
"""
Tests for verifying specific CertificateItem functionality
"""
def setUp(self):
self.user = UserFactory.create()
self.course_id = "org/test/Test_Course"
self.cost = 40
CourseFactory.create(org='org', number='test', run='course', display_name='Test Course')
course_mode = CourseMode(course_id=self.course_id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
course_mode.save()
course_mode = CourseMode(course_id=self.course_id,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost)
course_mode.save()
def test_existing_enrollment(self):
CourseEnrollment.enroll(self.user, self.course_id)
cart = Order.get_cart_for_user(user=self.user)
CertificateItem.add_to_order(cart, self.course_id, self.cost, 'verified')
# verify that we are still enrolled
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_id))
cart.purchase()
enrollment = CourseEnrollment.objects.get(user=self.user, course_id=self.course_id)
self.assertEquals(enrollment.mode, u'verified')
def test_single_item_template(self):
cart = Order.get_cart_for_user(user=self.user)
cert_item = CertificateItem.add_to_order(cart, self.course_id, self.cost, 'verified')
self.assertEquals(cert_item.single_item_receipt_template,
'shoppingcart/verified_cert_receipt.html')
cert_item = CertificateItem.add_to_order(cart, self.course_id, self.cost, 'honor')
self.assertEquals(cert_item.single_item_receipt_template,
'shoppingcart/receipt.html')
|
morpheby/levelup-by
|
lms/djangoapps/shoppingcart/tests/test_models.py
|
Python
|
agpl-3.0
| 13,026
|
# -*- coding: utf-8 -*-
"""Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
import copy
import json
import logging
import random
from collections import OrderedDict
from operator import itemgetter
from lxml import etree
from pkg_resources import resource_string
from django.conf import settings
from openedx.core.lib.cache_utils import memoize_in_request_cache
from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.x_module import XModule, module_attr
from xmodule.editing_module import TabsEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.xml_module import is_pointer_tag, name_to_pathname, deserialize_field
from xmodule.exceptions import NotFoundError
from .transcripts_utils import VideoTranscriptsMixin, Transcript, get_html5_ids
from .video_utils import create_youtube_string, get_video_from_cdn, get_poster
from .bumper_utils import bumperize
from .video_xfields import VideoFields
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from xmodule.video_module import manage_video_subtitles_save
from xmodule.mixin import LicenseMixin
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoModule should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoModule out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoModule should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoModule tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
@XBlock.wants('settings')
class VideoModule(VideoFields, VideoTranscriptsMixin, VideoStudentViewHandlers, XModule, LicenseMixin):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
video_time = 0
icon_class = 'video'
# To make sure that js files are called in proper order we use numerical
# index. We do that to avoid issues that occurs in tests.
module = __name__.replace('.video_module', '', 2)
js = {
'js': [
resource_string(module, 'js/src/video/00_component.js'),
resource_string(module, 'js/src/video/00_video_storage.js'),
resource_string(module, 'js/src/video/00_resizer.js'),
resource_string(module, 'js/src/video/00_async_process.js'),
resource_string(module, 'js/src/video/00_i18n.js'),
resource_string(module, 'js/src/video/00_sjson.js'),
resource_string(module, 'js/src/video/00_iterator.js'),
resource_string(module, 'js/src/video/01_initialize.js'),
resource_string(module, 'js/src/video/025_focus_grabber.js'),
resource_string(module, 'js/src/video/02_html5_video.js'),
resource_string(module, 'js/src/video/03_video_player.js'),
resource_string(module, 'js/src/video/035_video_accessible_menu.js'),
resource_string(module, 'js/src/video/04_video_control.js'),
resource_string(module, 'js/src/video/04_video_full_screen.js'),
resource_string(module, 'js/src/video/05_video_quality_control.js'),
resource_string(module, 'js/src/video/06_video_progress_slider.js'),
resource_string(module, 'js/src/video/07_video_volume_control.js'),
resource_string(module, 'js/src/video/08_video_speed_control.js'),
resource_string(module, 'js/src/video/09_video_caption.js'),
resource_string(module, 'js/src/video/09_play_placeholder.js'),
resource_string(module, 'js/src/video/09_play_pause_control.js'),
resource_string(module, 'js/src/video/09_play_skip_control.js'),
resource_string(module, 'js/src/video/09_skip_control.js'),
resource_string(module, 'js/src/video/09_bumper.js'),
resource_string(module, 'js/src/video/09_save_state_plugin.js'),
resource_string(module, 'js/src/video/09_events_plugin.js'),
resource_string(module, 'js/src/video/09_events_bumper_plugin.js'),
resource_string(module, 'js/src/video/09_poster.js'),
resource_string(module, 'js/src/video/095_video_context_menu.js'),
resource_string(module, 'js/src/video/10_commands.js'),
resource_string(module, 'js/src/video/10_main.js')
]
}
css = {'scss': [
resource_string(module, 'css/video/display.scss'),
resource_string(module, 'css/video/accessible_menu.scss'),
]}
js_module_name = "Video"
def get_transcripts_for_student(self, transcripts):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Args:
transcripts (dict): A dict with all transcripts and a sub.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.download_track:
if self.track:
track_url = self.track
elif sub or other_lang:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
transcript_language = self.get_default_transcript_language(transcripts)
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang: native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in other_lang
}
if not other_lang or (other_lang and sub):
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(languages.items(), key=itemgetter(1))
sorted_languages = OrderedDict(sorted_languages)
return track_url, transcript_language, sorted_languages
def get_html(self):
track_status = (self.download_track and self.track)
transcript_download_format = self.transcript_download_format if not track_status else None
sources = filter(None, self.html5_sources)
download_video_link = None
branding_info = None
youtube_streams = ""
# If we have an edx_video_id, we prefer its values over what we store
# internally for download links (source, html5_sources) and the youtube
# stream.
if self.edx_video_id and edxval_api:
try:
val_profiles = ["youtube", "desktop_webm", "desktop_mp4"]
val_video_urls = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles)
# VAL will always give us the keys for the profiles we asked for, but
# if it doesn't have an encoded video entry for that Video + Profile, the
# value will map to `None`
# add the non-youtube urls to the list of alternative sources
# use the last non-None non-youtube url as the link to download the video
for url in [val_video_urls[p] for p in val_profiles if p != "youtube"]:
if url:
if url not in sources:
sources.append(url)
if self.download_video:
download_video_link = url
# set the youtube url
if val_video_urls["youtube"]:
youtube_streams = "1.00:{}".format(val_video_urls["youtube"])
except edxval_api.ValInternalError:
# VAL raises this exception if it can't find data for the edx video ID. This can happen if the
# course data is ported to a machine that does not have the VAL data. So for now, pass on this
# exception and fallback to whatever we find in the VideoDescriptor.
log.warning("Could not retrieve information from VAL for edx Video ID: %s.", self.edx_video_id)
# If the user comes from China use China CDN for html5 videos.
# 'CN' is China ISO 3166-1 country code.
# Video caching is disabled for Studio. User_location is always None in Studio.
# CountryMiddleware disabled for Studio.
cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get(self.system.user_location)
if getattr(self, 'video_speed_optimizations', True) and cdn_url:
branding_info = BrandingInfoConfig.get_config().get(self.system.user_location)
for index, source_url in enumerate(sources):
new_url = get_video_from_cdn(cdn_url, source_url)
if new_url:
sources[index] = new_url
# If there was no edx_video_id, or if there was no download specified
# for it, we fall back on whatever we find in the VideoDescriptor
if not download_video_link and self.download_video:
if self.source:
download_video_link = self.source
elif self.html5_sources:
download_video_link = self.html5_sources[0]
track_url, transcript_language, sorted_languages = self.get_transcripts_for_student(self.get_transcripts_info())
# CDN_VIDEO_URLS is only to be used here and will be deleted
# TODO(ali@edx.org): Delete this after the CDN experiment has completed.
html_id = self.location.html_id()
if self.system.user_location == 'CN' and \
settings.FEATURES.get('ENABLE_VIDEO_BEACON', False) and \
html_id in getattr(settings, 'CDN_VIDEO_URLS', {}).keys():
cdn_urls = getattr(settings, 'CDN_VIDEO_URLS', {})[html_id]
cdn_exp_group, new_source = random.choice(zip(range(len(cdn_urls)), cdn_urls))
if cdn_exp_group > 0:
sources[0] = new_source
cdn_eval = True
else:
cdn_eval = False
cdn_exp_group = None
self.youtube_streams = youtube_streams or create_youtube_string(self) # pylint: disable=W0201
settings_service = self.runtime.service(self, 'settings')
yt_api_key = None
if settings_service:
xblock_settings = settings_service.get_settings_bucket(self)
if xblock_settings and 'YOUTUBE_API_KEY' in xblock_settings:
yt_api_key = xblock_settings['YOUTUBE_API_KEY']
metadata = {
'saveStateUrl': self.system.ajax_url + '/save_user_state',
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),
'streams': self.youtube_streams,
'sub': self.sub,
'sources': sources,
# This won't work when we move to data that
# isn't on the filesystem
'captionDataDir': getattr(self, 'data_dir', None),
'showCaptions': json.dumps(self.show_captions),
'generalSpeed': self.global_speed,
'speed': self.speed,
'savedVideoPosition': self.saved_video_position.total_seconds(),
'start': self.start_time.total_seconds(),
'end': self.end_time.total_seconds(),
'transcriptLanguage': transcript_language,
'transcriptLanguages': sorted_languages,
# TODO: Later on the value 1500 should be taken from some global
# configuration setting field.
'ytTestTimeout': 1500,
'ytApiUrl': settings.YOUTUBE['API'],
'ytMetadataUrl': settings.YOUTUBE['METADATA_URL'],
'ytKey': yt_api_key,
'transcriptTranslationUrl': self.runtime.handler_url(
self, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.runtime.handler_url(
self, 'transcript', 'available_translations'
).rstrip('/?'),
## For now, the option "data-autohide-html5" is hard coded. This option
## either enables or disables autohiding of controls and captions on mouse
## inactivity. If set to true, controls and captions will autohide for
## HTML5 sources (non-YouTube) after a period of mouse inactivity over the
## whole video. When the mouse moves (or a key is pressed while any part of
## the video player is focused), the captions and controls will be shown
## once again.
##
## There is no option in the "Advanced Editor" to set this option. However,
## this option will have an effect if changed to "True". The code on
## front-end exists.
'autohideHtml5': False
}
bumperize(self)
context = {
'bumper_metadata': json.dumps(self.bumper['metadata']), # pylint: disable=E1101
'metadata': json.dumps(OrderedDict(metadata)),
'poster': json.dumps(get_poster(self)),
'branding_info': branding_info,
'cdn_eval': cdn_eval,
'cdn_exp_group': cdn_exp_group,
'id': self.location.html_id(),
'display_name': self.display_name_with_default,
'handout': self.handout,
'download_video_link': download_video_link,
'track': track_url,
'transcript_download_format': transcript_download_format,
'transcript_download_formats_list': self.descriptor.fields['transcript_download_format'].values,
'license': getattr(self, "license", None),
}
return self.system.render_template('video.html', context)
@XBlock.wants("request_cache")
@XBlock.wants("settings")
class VideoDescriptor(VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers,
TabsEditingDescriptor, EmptyDataRawDescriptor, LicenseMixin):
"""
Descriptor for `VideoModule`.
"""
module_class = VideoModule
transcript = module_attr('transcript')
show_in_read_only_mode = True
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
def __init__(self, *args, **kwargs):
"""
Mostly handles backward compatibility issues.
`source` is deprecated field.
a) If `source` exists and `source` is not `html5_sources`: show `source`
field on front-end as not-editable but clearable. Dropdown is a new
field `download_video` and it has value True.
b) If `source` is cleared it is not shown anymore.
c) If `source` exists and `source` in `html5_sources`, do not show `source`
field. `download_video` field has value True.
"""
super(VideoDescriptor, self).__init__(*args, **kwargs)
# For backwards compatibility -- if we've got XML data, parse it out and set the metadata fields
if self.data:
field_data = self._parse_video_xml(etree.fromstring(self.data))
self._field_data.set_many(self, field_data)
del self.data
self.source_visible = False
if self.source:
# If `source` field value exist in the `html5_sources` field values,
# then delete `source` field value and use value from `html5_sources` field.
if self.source in self.html5_sources:
self.source = '' # Delete source field value.
self.download_video = True
else: # Otherwise, `source` field value will be used.
self.source_visible = True
if not self.fields['download_video'].is_set_on(self):
self.download_video = True
# Force download_video field to default value if it's not explicitly set for backward compatibility.
if not self.fields['download_video'].is_set_on(self):
self.download_video = self.download_video
self.force_save_fields(['download_video'])
# for backward compatibility.
# If course was existed and was not re-imported by the moment of adding `download_track` field,
# we should enable `download_track` if following is true:
if not self.fields['download_track'].is_set_on(self) and self.track:
self.download_track = True
def editor_saved(self, user, old_metadata, old_content):
"""
Used to update video values during `self`:save method from CMS.
old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user.
old_content, same as `old_metadata` but for scope=content.
Due to nature of code flow in item.py::_save_item, before current function is called,
fields of `self` instance have been already updated, but not yet saved.
To obtain values, which were changed by user input,
one should compare own_metadata(self) and old_medatada.
Video player has two tabs, and due to nature of sync between tabs,
metadata from Basic tab is always sent when video player is edited and saved first time, for example:
{'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []},
that's why these fields will always present in old_metadata after first save. This should be fixed.
At consequent save requests html5_sources are always sent too, disregard of their change by user.
That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item).
This should be fixed too.
"""
metadata_was_changed_by_user = old_metadata != own_metadata(self)
# There is an edge case when old_metadata and own_metadata are same and we are importing transcript from youtube
# then there is a syncing issue where html5_subs are not syncing with youtube sub, We can make sync better by
# checking if transcript is present for the video and if any html5_ids transcript is not present then trigger
# the manage_video_subtitles_save to create the missing transcript with particular html5_id.
if not metadata_was_changed_by_user and self.sub and hasattr(self, 'html5_sources'):
html5_ids = get_html5_ids(self.html5_sources)
for subs_id in html5_ids:
try:
Transcript.asset(self.location, subs_id)
except NotFoundError:
# If a transcript does not not exist with particular html5_id then there is no need to check other
# html5_ids because we have to create a new transcript with this missing html5_id by turning on
# metadata_was_changed_by_user flag.
metadata_was_changed_by_user = True
break
if metadata_was_changed_by_user:
manage_video_subtitles_save(
self,
user,
old_metadata if old_metadata else None,
generate_translation=True
)
def save_with_metadata(self, user):
"""
Save module with updated metadata to database."
"""
self.save()
self.runtime.modulestore.update_item(self, user.id)
@property
def editable_metadata_fields(self):
editable_fields = super(VideoDescriptor, self).editable_metadata_fields
settings_service = self.runtime.service(self, 'settings')
if settings_service:
xb_settings = settings_service.get_settings_bucket(self)
if not xb_settings.get("licensing_enabled", False) and "license" in editable_fields:
del editable_fields["license"]
if self.source_visible:
editable_fields['source']['non_editable'] = True
else:
editable_fields.pop('source')
languages = [{'label': label, 'code': lang} for lang, label in settings.ALL_LANGUAGES if lang != u'en']
languages.sort(key=lambda l: l['label'])
editable_fields['transcripts']['languages'] = languages
editable_fields['transcripts']['type'] = 'VideoTranslations'
editable_fields['transcripts']['urlRoot'] = self.runtime.handler_url(
self,
'studio_transcript',
'translation'
).rstrip('/?')
editable_fields['handout']['type'] = 'FileUploader'
return editable_fields
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
id_generator is used to generate course-specific urls and identifiers
"""
xml_object = etree.fromstring(xml_data)
url_name = xml_object.get('url_name', xml_object.get('slug'))
block_type = 'video'
definition_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(definition_id)
if is_pointer_tag(xml_object):
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
xml_object = cls.load_file(filepath, system.resources_fs, usage_id)
system.parse_asides(xml_object, definition_id, usage_id, id_generator)
field_data = cls._parse_video_xml(xml_object, id_generator)
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
video = system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, block_type, definition_id, usage_id),
field_data,
)
return video
def definition_to_xml(self, resource_fs):
"""
Returns an xml string representing this module.
"""
xml = etree.Element('video')
youtube_string = create_youtube_string(self)
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't need to write it out.
if youtube_string and youtube_string != '1.00:3_yD_cEKoCk':
xml.set('youtube', unicode(youtube_string))
xml.set('url_name', self.url_name)
attrs = {
'display_name': self.display_name,
'show_captions': json.dumps(self.show_captions),
'start_time': self.start_time,
'end_time': self.end_time,
'sub': self.sub,
'download_track': json.dumps(self.download_track),
'download_video': json.dumps(self.download_video),
}
for key, value in attrs.items():
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't write it out.
if value:
if key in self.fields and self.fields[key].is_set_on(self):
xml.set(key, unicode(value))
for source in self.html5_sources:
ele = etree.Element('source')
ele.set('src', source)
xml.append(ele)
if self.track:
ele = etree.Element('track')
ele.set('src', self.track)
xml.append(ele)
if self.handout:
ele = etree.Element('handout')
ele.set('src', self.handout)
xml.append(ele)
# sorting for easy testing of resulting xml
for transcript_language in sorted(self.transcripts.keys()):
ele = etree.Element('transcript')
ele.set('language', transcript_language)
ele.set('src', self.transcripts[transcript_language])
xml.append(ele)
if self.edx_video_id and edxval_api:
try:
xml.append(edxval_api.export_to_xml(self.edx_video_id))
except edxval_api.ValVideoNotFoundError:
pass
# handle license specifically
self.add_license_to_xml(xml)
return xml
def get_context(self):
"""
Extend context by data for transcript basic tab.
"""
_context = super(VideoDescriptor, self).get_context()
metadata_fields = copy.deepcopy(self.editable_metadata_fields)
display_name = metadata_fields['display_name']
video_url = metadata_fields['html5_sources']
youtube_id_1_0 = metadata_fields['youtube_id_1_0']
def get_youtube_link(video_id):
"""
Returns the fully-qualified YouTube URL for the given video identifier
"""
# First try a lookup in VAL. If we have a YouTube entry there, it overrides the
# one passed in.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube")
if val_youtube_id:
video_id = val_youtube_id
if video_id:
return 'http://youtu.be/{0}'.format(video_id)
else:
return ''
_ = self.runtime.service(self, "i18n").ugettext
video_url.update({
'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or .webm video file hosted elsewhere on the Internet.'), # pylint: disable=line-too-long
'display_name': _('Default Video URL'),
'field_name': 'video_url',
'type': 'VideoList',
'default_value': [get_youtube_link(youtube_id_1_0['default_value'])]
})
youtube_id_1_0_value = get_youtube_link(youtube_id_1_0['value'])
if youtube_id_1_0_value:
video_url['value'].insert(0, youtube_id_1_0_value)
metadata = {
'display_name': display_name,
'video_url': video_url
}
_context.update({'transcripts_basic_tab_metadata': metadata})
return _context
@classmethod
def _parse_youtube(cls, data):
"""
Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD"
into a dictionary. Necessary for backwards compatibility with
XML-based courses.
"""
ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
videos = data.split(',')
for video in videos:
pieces = video.split(':')
try:
speed = '%.2f' % float(pieces[0]) # normalize speed
# Handle the fact that youtube IDs got double-quoted for a period of time.
# Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing.
youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1])
ret[speed] = youtube_id
except (ValueError, IndexError):
log.warning('Invalid YouTube ID: %s', video)
return ret
@classmethod
def _parse_video_xml(cls, xml, id_generator=None):
"""
Parse video fields out of xml_data. The fields are set if they are
present in the XML.
Arguments:
id_generator is used to generate course-specific urls and identifiers
"""
field_data = {}
# Convert between key types for certain attributes --
# necessary for backwards compatibility.
conversions = {
# example: 'start_time': cls._example_convert_start_time
}
# Convert between key names for certain attributes --
# necessary for backwards compatibility.
compat_keys = {
'from': 'start_time',
'to': 'end_time'
}
sources = xml.findall('source')
if sources:
field_data['html5_sources'] = [ele.get('src') for ele in sources]
track = xml.find('track')
if track is not None:
field_data['track'] = track.get('src')
handout = xml.find('handout')
if handout is not None:
field_data['handout'] = handout.get('src')
transcripts = xml.findall('transcript')
if transcripts:
field_data['transcripts'] = {tr.get('language'): tr.get('src') for tr in transcripts}
for attr, value in xml.items():
if attr in compat_keys:
attr = compat_keys[attr]
if attr in cls.metadata_to_strip + ('url_name', 'name'):
continue
if attr == 'youtube':
speeds = cls._parse_youtube(value)
for speed, youtube_id in speeds.items():
# should have made these youtube_id_1_00 for
# cleanliness, but hindsight doesn't need glasses
normalized_speed = speed[:-1] if speed.endswith('0') else speed
# If the user has specified html5 sources, make sure we don't use the default video
if youtube_id != '' or 'html5_sources' in field_data:
field_data['youtube_id_{0}'.format(normalized_speed.replace('.', '_'))] = youtube_id
elif attr in conversions:
field_data[attr] = conversions[attr](value)
elif attr not in cls.fields:
field_data.setdefault('xml_attributes', {})[attr] = value
else:
# We export values with json.dumps (well, except for Strings, but
# for about a month we did it for Strings also).
field_data[attr] = deserialize_field(cls.fields[attr], value)
# For backwards compatibility: Add `source` if XML doesn't have `download_video`
# attribute.
if 'download_video' not in field_data and sources:
field_data['source'] = field_data['html5_sources'][0]
# For backwards compatibility: if XML doesn't have `download_track` attribute,
# it means that it is an old format. So, if `track` has some value,
# `download_track` needs to have value `True`.
if 'download_track' not in field_data and track is not None:
field_data['download_track'] = True
video_asset_elem = xml.find('video_asset')
if (
edxval_api and
video_asset_elem is not None and
'edx_video_id' in field_data
):
# Allow ValCannotCreateError to escape
edxval_api.import_from_xml(
video_asset_elem,
field_data['edx_video_id'],
course_id=getattr(id_generator, 'target_course_id', None)
)
# load license if it exists
field_data = LicenseMixin.parse_license_from_xml(field_data, xml)
return field_data
def index_dictionary(self):
xblock_body = super(VideoDescriptor, self).index_dictionary()
video_body = {
"display_name": self.display_name,
}
def _update_transcript_for_index(language=None):
""" Find video transcript - if not found, don't update index """
try:
transcripts = self.get_transcripts_info()
transcript = self.get_transcript(
transcripts, transcript_format='txt', lang=language
)[0].replace("\n", " ")
transcript_index_name = "transcript_{}".format(language if language else self.transcript_language)
video_body.update({transcript_index_name: transcript})
except NotFoundError:
pass
if self.sub:
_update_transcript_for_index()
# Check to see if there are transcripts in other languages besides default transcript
if self.transcripts:
for language in self.transcripts.keys():
_update_transcript_for_index(language)
if "content" in xblock_body:
xblock_body["content"].update(video_body)
else:
xblock_body["content"] = video_body
xblock_body["content_type"] = "Video"
return xblock_body
@property
def request_cache(self):
"""
Returns the request_cache from the runtime.
"""
return self.runtime.service(self, "request_cache")
@memoize_in_request_cache('request_cache')
def get_cached_val_data_for_course(self, video_profile_names, course_id):
"""
Returns the VAL data for the requested video profiles for the given course.
"""
return edxval_api.get_video_info_for_course_and_profiles(unicode(course_id), video_profile_names)
def student_view_data(self, context=None):
"""
Returns a JSON representation of the student_view of this XModule.
The contract of the JSON content is between the caller and the particular XModule.
"""
context = context or {}
# If the "only_on_web" field is set on this video, do not return the rest of the video's data
# in this json view, since this video is to be accessed only through its web view."
if self.only_on_web:
return {"only_on_web": True}
encoded_videos = {}
val_video_data = {}
# Check in VAL data first if edx_video_id exists
if self.edx_video_id:
video_profile_names = context.get("profiles", ["mobile_low"])
# get and cache bulk VAL data for course
val_course_data = self.get_cached_val_data_for_course(video_profile_names, self.location.course_key)
val_video_data = val_course_data.get(self.edx_video_id, {})
# Get the encoded videos if data from VAL is found
if val_video_data:
encoded_videos = val_video_data.get('profiles', {})
# If information for this edx_video_id is not found in the bulk course data, make a
# separate request for this individual edx_video_id, unless cache misses are disabled.
# This is useful/required for videos that don't have a course designated, such as the introductory video
# that is shared across many courses. However, this results in a separate database request so watch
# out for any performance hit if many such videos exist in a course. Set the 'allow_cache_miss' parameter
# to False to disable this fall back.
elif context.get("allow_cache_miss", "True").lower() == "true":
try:
val_video_data = edxval_api.get_video_info(self.edx_video_id)
# Unfortunately, the VAL API is inconsistent in how it returns the encodings, so remap here.
for enc_vid in val_video_data.pop('encoded_videos'):
encoded_videos[enc_vid['profile']] = {key: enc_vid[key] for key in ["url", "file_size"]}
except edxval_api.ValVideoNotFoundError:
pass
# Fall back to other video URLs in the video module if not found in VAL
if not encoded_videos:
video_url = self.html5_sources[0] if self.html5_sources else self.source
if video_url:
encoded_videos["fallback"] = {
"url": video_url,
"file_size": 0, # File size is unknown for fallback URLs
}
transcripts_info = self.get_transcripts_info()
transcripts = {
lang: self.runtime.handler_url(self, 'transcript', 'download', query="lang=" + lang, thirdparty=True)
for lang in self.available_translations(transcripts_info, verify_assets=False)
}
return {
"only_on_web": self.only_on_web,
"duration": val_video_data.get('duration', None),
"transcripts": transcripts,
"encoded_videos": encoded_videos,
}
|
jbzdak/edx-platform
|
common/lib/xmodule/xmodule/video_module/video_module.py
|
Python
|
agpl-3.0
| 39,080
|
#!env/python3
# coding: utf-8
import os
from core.framework.common import *
from core.framework.postgresql import *
Report = Base.classes.report
|
REGOVAR/regovar-server
|
annso/core/model/report.py
|
Python
|
agpl-3.0
| 151
|
# -*- coding: utf-8 -*-
"""
ZUGBRUECKE
Calling routines in Windows DLLs from Python scripts running on unixlike systems
https://github.com/pleiszenburg/zugbruecke
tests/test_callback_optional.py: Optional callback routines as arguments
Required to run on platform / side: [UNIX, WINE]
Copyright (C) 2017-2019 Sebastian M. Ernst <ernst@pleiszenburg.de>
<LICENSE_BLOCK>
The contents of this file are subject to the GNU Lesser General Public License
Version 2.1 ("LGPL" or "License"). You may not use this file except in
compliance with the License. You may obtain a copy of the License at
https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt
https://github.com/pleiszenburg/zugbruecke/blob/master/LICENSE
Software distributed under the License is distributed on an "AS IS" basis,
WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the
specific language governing rights and limitations under the License.
</LICENSE_BLOCK>
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# IMPORT
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# import pytest
from sys import platform
if any([platform.startswith(os_name) for os_name in ['linux', 'darwin', 'freebsd']]):
import zugbruecke as ctypes
elif platform.startswith('win'):
import ctypes
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CLASSES AND ROUTINES
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class sample_class_a:
def __init__(self):
self.__dll__ = ctypes.windll.LoadLibrary('tests/demo_dll.dll')
conveyor_belt = ctypes.WINFUNCTYPE(ctypes.c_int16, ctypes.c_int16)
self.__use_optional_callback__ = self.__dll__.use_optional_callback_a
self.__use_optional_callback__.argtypes = (ctypes.c_int16, conveyor_belt)
self.__use_optional_callback__.restype = ctypes.c_int16
@conveyor_belt
def process_data(in_data):
return in_data ** 2
self.__process_data__ = process_data
def use_optional_callback(self, some_data):
return self.__use_optional_callback__(some_data, self.__process_data__)
class sample_class_b:
def __init__(self):
self.__dll__ = ctypes.windll.LoadLibrary('tests/demo_dll.dll')
self.__use_optional_callback__ = self.__dll__.use_optional_callback_b
self.__use_optional_callback__.argtypes = (ctypes.c_int16, ctypes.c_void_p)
self.__use_optional_callback__.restype = ctypes.c_int16
def do_not_use_optional_callback(self, some_data):
return self.__use_optional_callback__(some_data, None)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# TEST(s)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_use_optional_callback():
sample = sample_class_a()
assert 18 == sample.use_optional_callback(3)
def test_do_not_use_optional_callback():
sample = sample_class_b()
assert 14 == sample.do_not_use_optional_callback(7)
|
pleiszenburg/zugbruecke
|
tests/test_callback_optional.py
|
Python
|
lgpl-2.1
| 3,143
|
#!/usr/bin/python
# A very simple process to combine the floating base estimate
# with the kinematics and output the combined message
# input: POSE_BODY, FORCE_TORQUE and CORE_ROBOT_STATE output: EST_ROBOT_STATE
#
import os,sys
import lcm
import time
from lcm import LCM
from math import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
home_dir =os.getenv("DRC_BASE")
if (home_dir is not None):
#print home_dir
sys.path.append(home_dir + "/software/build/lib/python2.7/site-packages")
sys.path.append(home_dir + "/software/build/lib/python2.7/dist-packages")
from bot_core.pose_t import pose_t
from bot_core.robot_state_t import robot_state_t
from bot_core.joint_state_t import joint_state_t
from bot_core.vector_3d_t import vector_3d_t
from bot_core.position_3d_t import position_3d_t
from bot_core.twist_t import twist_t
from bot_core.quaternion_t import quaternion_t
from bot_core.twist_t import twist_t
from bot_core.force_torque_t import force_torque_t
########################################################################################
def timestamp_now (): return int (time.time () * 1000000)
global core_robot_state
core_robot_state = joint_state_t()
def on_core_robot_state(channel, data):
global core_robot_state
core_robot_state = joint_state_t.decode(data)
def on_pose_body(channel, data):
global core_robot_state, joint_names
if (core_robot_state.num_joints==0):
return
m = pose_t.decode(data)
o = robot_state_t()
o.utime = m.utime
o.num_joints = core_robot_state.num_joints
o.joint_name = core_robot_state.joint_name
o.joint_position = core_robot_state.joint_position
o.joint_velocity = core_robot_state.joint_velocity
o.joint_effort = core_robot_state.joint_effort
nrot = quaternion_t()
nvec = vector_3d_t()
p = position_3d_t()
p.rotation = nrot
p.translation = nvec
o.pose = p
t = twist_t()
t.linear_velocity = nvec
t.angular_velocity = nvec
o.twist = t
o.pose.translation.x =m.pos[0];
o.pose.translation.y =m.pos[1];
o.pose.translation.z =m.pos[2];
o.pose.rotation.w = m.orientation[0];
o.pose.rotation.x = m.orientation[1];
o.pose.rotation.y = m.orientation[2];
o.pose.rotation.z = m.orientation[3];
ft = force_torque_t()
o.force_torque = ft
lc.publish("EST_ROBOT_STATE",o.encode())
####################################################################
lc = lcm.LCM()
print "started"
sub1 = lc.subscribe("VAL_CORE_ROBOT_STATE|CORE_ROBOT_STATE", on_core_robot_state)
sub2 = lc.subscribe("POSE_BODY", on_pose_body)
while True:
lc.handle()
|
mitdrc/pronto
|
motion_estimate/scripts/state-sync-simple.py
|
Python
|
lgpl-2.1
| 2,596
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 KillerInstinct
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class DarkCometRegkeys(Signature):
name = "darkcomet_regkeys"
description = "与已知DarkComet注册表键交互"
severity = 3
categories = ["rat"]
families = ["darkcomet"]
authors = ["KillerInstinct"]
minimum = "0.5"
def run(self):
dc_keys = False
indicators = [
".*\\\\Software\\\\DC3_FEXEC$",
".*\\\\Software\\\\DC3_FEXEC\\\\.*",
".*\\\\Software\\\\DC2_USERS$",
]
for indicator in indicators:
match = self.check_key(pattern=indicator, regex=True)
if match:
self.data.append({"Key": match})
dc_keys = True
return dc_keys
|
lixiangning888/whole_project
|
modules/signatures_merge_tmp/darkcomet_regkeys.py
|
Python
|
lgpl-3.0
| 1,454
|
import sys
def setup(core, object):
object.setStfFilename('static_item_n')
object.setStfName('item_necklace_set_medic_utility_a_01_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_necklace_set_medic_utility_a_01_01')
object.setStringAttribute('class_required', 'Medic')
object.setIntAttribute('required_combat_level', 85)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_cooldown_line_me_evasion', 6)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_freeshot_me_heal', 1)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:fast_attack_line_me_heal', 2)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:fast_attack_line_me_revive', 1)
object.setStringAttribute('@set_bonus:piece_bonus_count_3', '@set_bonus:set_medic_utility_a_1')
object.setStringAttribute('@set_bonus:piece_bonus_count_4', '@set_bonus:set_medic_utility_a_2')
object.setStringAttribute('@set_bonus:piece_bonus_count_5', '@set_bonus:set_medic_utility_a_3')
object.setAttachment('setBonus', 'set_medic_utility_a')
return
|
ProjectSWGCore/NGECore2
|
scripts/object/tangible/wearables/necklace/item_necklace_set_medic_utility_a_01_01.py
|
Python
|
lgpl-3.0
| 1,055
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('djanjinja_test.shortcuts.views',
url(r'^plain/$', 'plain', name='shortcuts-plain'),
url(r'^context/$', 'context', name='shortcuts-context'),
url(r'^req_context/$', 'req_context', name='shortcuts-req_context'),
url(r'^middleware/$', 'middleware', name='shortcuts-middleware'),
)
|
adamnfish/DjanJinja2
|
djanjinja_test/shortcuts/urls.py
|
Python
|
unlicense
| 396
|
import unittest
import tempfile
import inspect
import shutil
import sys
import os
import re
from os.path import basename, dirname, exists, join, normpath
from robot.errors import DataError
from robot.utils import abspath, JYTHON, WINDOWS, PY3
from robot.utils.importer import Importer, ByPathImporter
from robot.utils.asserts import (assert_equal, assert_true, assert_raises,
assert_raises_with_msg)
CURDIR = dirname(abspath(__file__))
LIBDIR = normpath(join(CURDIR, '..', '..', 'atest', 'testresources', 'testlibs'))
TEMPDIR = tempfile.gettempdir()
TESTDIR = join(TEMPDIR, 'robot-importer-testing')
WINDOWS_PATH_IN_ERROR = re.compile(r"'\w:\\")
if PY3:
unicode = str
def assert_prefix(error, expected):
message = unicode(error)
count = 3 if WINDOWS_PATH_IN_ERROR.search(message) else 2
prefix = ':'.join(message.split(':')[:count]) + ':'
if 'ImportError:' in expected and sys.version_info >= (3, 6):
expected = expected.replace('ImportError:', 'ModuleNotFoundError:')
assert_equal(prefix, expected)
def create_temp_file(name, attr=42, extra_content=''):
if not exists(TESTDIR):
os.mkdir(TESTDIR)
path = join(TESTDIR, name)
with open(path, 'w') as file:
file.write('''
attr = %r
def func():
return attr
''' % attr)
file.write(extra_content)
return path
class LoggerStub(object):
def __init__(self, remove_extension=False):
self.messages = []
self.remove_extension = remove_extension
def info(self, msg):
if self.remove_extension:
for ext in '$py.class', '.pyc', '.py':
msg = msg.replace(ext, '')
self.messages.append(self._normalize_drive_letter(msg))
def assert_message(self, msg, index=0):
assert_equal(self.messages[index], self._normalize_drive_letter(msg))
def _normalize_drive_letter(self, msg):
if not WINDOWS:
return msg
return re.sub("'\\w:", lambda match: match.group().upper(), msg)
class TestImportByPath(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
if exists(TESTDIR):
shutil.rmtree(TESTDIR)
def test_python_file(self):
path = create_temp_file('test.py')
self._import_and_verify(path, remove='test')
self._assert_imported_message('test', path)
def test_python_directory(self):
create_temp_file('__init__.py')
module_name = basename(TESTDIR)
self._import_and_verify(TESTDIR, remove=module_name)
self._assert_imported_message(module_name, TESTDIR)
def test_import_same_file_multiple_times(self):
path = create_temp_file('test.py')
self._import_and_verify(path, remove='test')
self._assert_imported_message('test', path)
self._import_and_verify(path)
self._assert_imported_message('test', path)
self._import_and_verify(path, name='library')
self._assert_imported_message('test', path, type='library module')
def test_import_different_file_and_directory_with_same_name(self):
path1 = create_temp_file('test.py', attr=1)
self._import_and_verify(path1, attr=1, remove='test')
self._assert_imported_message('test', path1)
path2 = join(TESTDIR, 'test')
os.mkdir(path2)
create_temp_file(join(path2, '__init__.py'), attr=2)
self._import_and_verify(path2, attr=2, directory=path2)
self._assert_removed_message('test')
self._assert_imported_message('test', path2, index=1)
path3 = create_temp_file(join(path2, 'test.py'), attr=3)
self._import_and_verify(path3, attr=3, directory=path2)
self._assert_removed_message('test')
self._assert_imported_message('test', path3, index=1)
def test_import_class_from_file(self):
path = create_temp_file('test.py', extra_content='''
class test:
def method(self):
return 42
''')
klass = self._import(path, remove='test')
self._assert_imported_message('test', path, type='class')
assert_true(inspect.isclass(klass))
assert_equal(klass.__name__, 'test')
assert_equal(klass().method(), 42)
def test_invalid_python_file(self):
path = create_temp_file('test.py', extra_content='invalid content')
error = assert_raises(DataError, self._import_and_verify, path, remove='test')
assert_prefix(error, "Importing '%s' failed: SyntaxError:" % path)
if JYTHON:
def test_java_class_with_java_extension(self):
path = join(CURDIR, 'ImportByPath.java')
self._import_and_verify(path, remove='ImportByPath')
self._assert_imported_message('ImportByPath', path, type='class')
def test_java_class_with_class_extension(self):
path = join(CURDIR, 'ImportByPath.class')
self._import_and_verify(path, remove='ImportByPath', name='java')
self._assert_imported_message('ImportByPath', path, type='java class')
def test_importing_java_package_fails(self):
path = join(LIBDIR, 'javapkg')
assert_raises_with_msg(DataError,
"Importing '%s' failed: Expected class or "
"module, got javapackage." % path,
self._import, path, remove='javapkg')
def test_removing_from_sys_modules_when_importing_multiple_times(self):
path = join(CURDIR, 'ImportByPath.java')
self._import(path, name='java', remove='ImportByPath')
self._assert_imported_message('ImportByPath', path, 'java class')
self._import(path)
self._assert_removed_message('ImportByPath')
self._assert_imported_message('ImportByPath', path, 'class', index=1)
def _import_and_verify(self, path, attr=42, directory=TESTDIR,
name=None, remove=None):
module = self._import(path, name, remove)
assert_equal(module.attr, attr)
assert_equal(module.func(), attr)
if hasattr(module, '__file__'):
assert_equal(dirname(abspath(module.__file__)), directory)
def _import(self, path, name=None, remove=None):
if remove and remove in sys.modules:
sys.modules.pop(remove)
self.logger = LoggerStub()
importer = Importer(name, self.logger)
sys_path_before = sys.path[:]
try:
return importer.import_class_or_module_by_path(path)
finally:
assert_equal(sys.path, sys_path_before)
def _assert_imported_message(self, name, source, type='module', index=0):
msg = "Imported %s '%s' from '%s'." % (type, name, source)
self.logger.assert_message(msg, index=index)
def _assert_removed_message(self, name, index=0):
msg = "Removed module '%s' from sys.modules to import fresh module." % name
self.logger.assert_message(msg, index=index)
class TestInvalidImportPath(unittest.TestCase):
def test_non_existing(self):
path = 'non-existing.py'
assert_raises_with_msg(DataError,
"Importing '%s' failed: File or directory does not exist." % path,
Importer().import_class_or_module_by_path, path)
path = abspath(path)
assert_raises_with_msg(DataError,
"Importing test file '%s' failed: File or directory does not exist." % path,
Importer('test file').import_class_or_module_by_path, path)
def test_non_absolute(self):
path = os.listdir('.')[0]
assert_raises_with_msg(DataError,
"Importing '%s' failed: Import path must be absolute." % path,
Importer().import_class_or_module_by_path, path)
assert_raises_with_msg(DataError,
"Importing file '%s' failed: Import path must be absolute." % path,
Importer('file').import_class_or_module_by_path, path)
def test_invalid_format(self):
path = join(CURDIR, '..', '..', 'README.rst')
assert_raises_with_msg(DataError,
"Importing '%s' failed: Not a valid file or directory to import." % path,
Importer().import_class_or_module_by_path, path)
assert_raises_with_msg(DataError,
"Importing xxx '%s' failed: Not a valid file or directory to import." % path,
Importer('xxx').import_class_or_module_by_path, path)
class TestImportClassOrModule(unittest.TestCase):
def test_import_module_file(self):
module = self._import_module('classes')
assert_equal(module.__version__, 'N/A')
def test_import_module_directory(self):
module = self._import_module('pythonmodule')
assert_equal(module.some_string, 'Hello, World!')
def test_import_non_existing(self):
error = assert_raises(DataError, self._import, 'NonExisting')
assert_prefix(error, "Importing 'NonExisting' failed: ImportError:")
def test_import_sub_module(self):
module = self._import_module('pythonmodule.library')
assert_equal(module.keyword_from_submodule('Kitty'), 'Hello, Kitty!')
module = self._import_module('pythonmodule.submodule')
assert_equal(module.attribute, 42)
module = self._import_module('pythonmodule.submodule.sublib')
assert_equal(module.keyword_from_deeper_submodule(), 'hi again')
def test_import_class_with_same_name_as_module(self):
klass = self._import_class('ExampleLibrary')
assert_equal(klass().return_string_from_library('xxx'), 'xxx')
def test_import_class_from_module(self):
klass = self._import_class('ExampleLibrary.ExampleLibrary')
assert_equal(klass().return_string_from_library('yyy'), 'yyy')
def test_import_class_from_sub_module(self):
klass = self._import_class('pythonmodule.submodule.sublib.Sub')
assert_equal(klass().keyword_from_class_in_deeper_submodule(), 'bye')
def test_import_non_existing_item_from_existing_module(self):
assert_raises_with_msg(DataError,
"Importing 'pythonmodule.NonExisting' failed: "
"Module 'pythonmodule' does not contain 'NonExisting'.",
self._import, 'pythonmodule.NonExisting')
assert_raises_with_msg(DataError,
"Importing test library 'pythonmodule.none' failed: "
"Module 'pythonmodule' does not contain 'none'.",
self._import, 'pythonmodule.none', 'test library')
def test_invalid_item_from_existing_module(self):
assert_raises_with_msg(DataError,
"Importing 'pythonmodule.some_string' failed: "
"Expected class or module, got string.",
self._import, 'pythonmodule.some_string')
assert_raises_with_msg(DataError,
"Importing xxx 'pythonmodule.submodule.attribute' failed: "
"Expected class or module, got integer.",
self._import, 'pythonmodule.submodule.attribute', 'xxx')
def test_item_from_non_existing_module(self):
error = assert_raises(DataError, self._import, 'nonex.item')
assert_prefix(error, "Importing 'nonex.item' failed: ImportError:")
def test_import_file_by_path(self):
import module_library as expected
module = self._import_module(join(LIBDIR, 'module_library.py'))
assert_equal(module.__name__, expected.__name__)
assert_equal(dirname(normpath(module.__file__)),
dirname(normpath(expected.__file__)))
assert_equal(dir(module), dir(expected))
def test_import_class_from_file_by_path(self):
klass = self._import_class(join(LIBDIR, 'ExampleLibrary.py'))
assert_equal(klass().return_string_from_library('test'), 'test')
def test_invalid_file_by_path(self):
path = join(TEMPDIR, 'robot_import_invalid_test_file.py')
try:
with open(path, 'w') as file:
file.write('invalid content')
error = assert_raises(DataError, self._import, path)
assert_prefix(error, "Importing '%s' failed: SyntaxError:" % path)
finally:
os.remove(path)
def test_logging_when_importing_module(self):
logger = LoggerStub(remove_extension=True)
self._import_module('classes', 'test library', logger)
logger.assert_message("Imported test library module 'classes' from '%s'."
% join(LIBDIR, 'classes'))
def test_logging_when_importing_python_class(self):
logger = LoggerStub(remove_extension=True)
self._import_class('ExampleLibrary', logger=logger)
logger.assert_message("Imported class 'ExampleLibrary' from '%s'."
% join(LIBDIR, 'ExampleLibrary'))
if JYTHON:
def test_import_java_class(self):
klass = self._import_class('ExampleJavaLibrary')
assert_equal(klass().getCount(), 1)
def test_import_java_class_in_package(self):
klass = self._import_class('javapkg.JavaPackageExample')
assert_equal(klass().returnValue('xmas'), 'xmas')
def test_import_java_file_by_path(self):
import ExampleJavaLibrary as expected
klass = self._import_class(join(LIBDIR, 'ExampleJavaLibrary.java'))
assert_equal(klass().getCount(), 1)
assert_equal(klass.__name__, expected.__name__)
assert_equal(dir(klass), dir(expected))
def test_importing_java_package_fails(self):
assert_raises_with_msg(DataError,
"Importing test library 'javapkg' failed: "
"Expected class or module, got javapackage.",
self._import, 'javapkg', 'test library')
def test_logging_when_importing_java_class(self):
logger = LoggerStub()
self._import_class('ExampleJavaLibrary', 'java', logger)
logger.assert_message("Imported java class 'ExampleJavaLibrary' "
"from unknown location.")
def _import_module(self, name, type=None, logger=None):
module = self._import(name, type, logger)
assert_true(inspect.ismodule(module))
return module
def _import_class(self, name, type=None, logger=None):
klass = self._import(name, type, logger)
assert_true(inspect.isclass(klass))
return klass
def _import(self, name, type=None, logger=None):
return Importer(type, logger or LoggerStub()).import_class_or_module(name)
class TestErrorDetails(unittest.TestCase):
def test_no_traceback(self):
error = self._failing_import('NoneExisting')
assert_equal(self._get_traceback(error),
'Traceback (most recent call last):\n None')
def test_traceback(self):
path = create_temp_file('tb.py', extra_content='import nonex')
try:
error = self._failing_import(path)
finally:
shutil.rmtree(TESTDIR)
assert_equal(self._get_traceback(error),
'Traceback (most recent call last):\n'
' File "%s", line 5, in <module>\n'
' import nonex' % path)
def test_pythonpath(self):
error = self._failing_import('NoneExisting')
lines = self._get_pythonpath(error).splitlines()
assert_equal(lines[0], 'PYTHONPATH:')
for line in lines[1:]:
assert_true(line.startswith(' '))
def test_non_ascii_bytes_in_pythonpath(self):
sys.path.append('hyv\xe4')
try:
error = self._failing_import('NoneExisting')
finally:
sys.path.pop()
last_line = self._get_pythonpath(error).splitlines()[-1].strip()
assert_true(last_line.startswith('hyv'))
if JYTHON:
def test_classpath(self):
error = self._failing_import('NoneExisting')
lines = self._get_classpath(error).splitlines()
assert_equal(lines[0], 'CLASSPATH:')
for line in lines[1:]:
assert_true(line.startswith(' '))
def test_structure(self):
error = self._failing_import('NoneExisting')
quote = "'" if PY3 else ''
type = 'Import' if sys.version_info < (3, 6) else 'ModuleNotFound'
message = ("Importing 'NoneExisting' failed: {type}Error: No module "
"named {q}NoneExisting{q}".format(q=quote, type=type))
expected = (message, self._get_traceback(error),
self._get_pythonpath(error), self._get_classpath(error))
assert_equal(unicode(error), '\n'.join(expected).strip())
def _failing_import(self, name):
importer = Importer().import_class_or_module
return assert_raises(DataError, importer, name)
def _get_traceback(self, error):
return '\n'.join(self._block(error, 'Traceback (most recent call last):',
'PYTHONPATH:'))
def _get_pythonpath(self, error):
return '\n'.join(self._block(error, 'PYTHONPATH:', 'CLASSPATH:'))
def _get_classpath(self, error):
return '\n'.join(self._block(error, 'CLASSPATH:'))
def _block(self, error, start, end=None):
include = False
for line in unicode(error).splitlines():
if line == end:
return
if line == start:
include = True
if include:
yield line
class TestSplitPathToModule(unittest.TestCase):
def _verify(self, file_name, expected_name):
path = abspath(file_name)
actual = ByPathImporter(None)._split_path_to_module(path)
assert_equal(actual, (dirname(path), expected_name))
def test_normal_file(self):
self._verify('hello.py', 'hello')
self._verify('hello.class', 'hello')
self._verify('hello.world.java', 'hello.world')
def test_jython_class_file(self):
self._verify('hello$py.class', 'hello')
self._verify('__init__$py.class', '__init__')
def test_directory(self):
self._verify('hello', 'hello')
self._verify('hello'+os.sep, 'hello')
class TestInstantiation(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
if exists(TESTDIR):
shutil.rmtree(TESTDIR)
def test_when_importing_by_name(self):
from ExampleLibrary import ExampleLibrary
lib = Importer().import_class_or_module('ExampleLibrary',
instantiate_with_args=())
assert_true(not inspect.isclass(lib))
assert_true(isinstance(lib, ExampleLibrary))
def test_with_arguments(self):
lib = Importer().import_class_or_module('libswithargs.Mixed', range(5))
assert_equal(lib.get_args(), (0, 1, '2 3 4'))
def test_when_importing_by_path(self):
path = create_temp_file('args.py', extra_content='class args: a=1')
lib = Importer().import_class_or_module_by_path(path, ())
assert_true(not inspect.isclass(lib))
assert_equal(lib.__class__.__name__, 'args')
assert_equal(lib.a, 1)
def test_instantiate_failure(self):
err = assert_raises(DataError, Importer().import_class_or_module,
'ExampleLibrary', ['accepts', 'no', 'args'])
assert_true(unicode(err).startswith("Importing 'ExampleLibrary' failed: "
"Creating instance failed: TypeError:"))
def test_modules_do_not_take_arguments(self):
path = create_temp_file('no_args_allowed.py')
assert_raises_with_msg(DataError,
"Importing '%s' failed: Modules do not take arguments." % path,
Importer().import_class_or_module_by_path,
path, ['invalid'])
if __name__ == '__main__':
unittest.main()
|
alexandrul-ci/robotframework
|
utest/utils/test_importer_util.py
|
Python
|
apache-2.0
| 20,230
|
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 19, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
#=======================================================================================================================
# JSON format
#=======================================================================================================================
resource = """{
"resources": [
{
"kind": "http://schemas.ogf.org/occi/infrastructure#compute",
"mixins": [
"http://example.com/template/resource#medium"
],
"attributes": {
"occi": {
"compute": {
"speed": 2,
"memory": 4,
"cores": 12
}
}
},
"actions": [
{
"title": "Start My Server",
"href": "/compute/996ad860-2a9a-504f-8861-aeafd0b2ae29?action=start",
"category": "http://schemas.ogf.org/occi/infrastructure/compute/action#start"
}
],
"id": "9930",
"title": "Compute resource",
"summary": "This is a compute resource"
}
]
}
"""
#=======================================================================================================================
link = """
{
"links": [
{
"kind": "http://schemas.ogf.org/occi/infrastructure#compute",
"mixins": [
"http://example.com/template/resource#medium"
],
"attributes": {
"occi": {
"infrastructure": {
"networkinterface": {
"interface": "eth0",
"mac": "00:80:41:ae:fd:7e",
"address": "192.168.0.100",
"gateway": "192.168.0.1",
"allocation": "dynamic"
}
}
}
},
"id": "22fe83ae-a20f-54fc-b436-cec85c94c5e8",
"title": "Mynetworkinterface",
"target": "http://127.0.0.1:8090/bilel/vms/v2",
"source": "http://127.0.0.1:8090/bilel/vms/v1"
}
]
}
"""
#=======================================================================================================================
j_occi_att = """
{
"resources": [
{
"attributes": {
"occi": {
"compute": {
"speed": 2,
"memory": 4,
"cores": 12
}
}
}
}
]
}
"""
action_plus_attributes =\
"""
{
"actions": [
{
"term": "start",
"scheme": "http://schemas.ogf.org/occi/infrastructure/compute/action#",
"title": "Start Compute instance now",
"attributes": {
"method": {
"mutable": true,
"required": false,
"type": "string",
"pattern": "graceful|acpion|poweron",
"default": "poweron"
}
}
}
],
"attributes": {
"occi": {
"infrastructure": {
"networkinterface": {
"interface": "eth0",
"mac": "00:80:41:ae:fd:7e",
"address": "192.168.0.100",
"gateway": "192.168.0.1",
"allocation": "dynamic"
}
}
}
}
}
"""
#=======================================================================================================================
# HTTP format
#=======================================================================================================================
entity_http = "Category: compute; scheme=\"http://schemas.ogf.org/occi/infrastructure#\"; class=\"kind\";"\
"Category: my_stuff; scheme=\"http://example.com/template/resource#\"; class=\"medium\";"\
"X-OCCI-Attribute: occi.compute.cores=2"\
"Link: </users/foo/compute/b9ff813e-fee5-4a9d-b839-673f39746096?action=start>;"\
"rel=\"http://schemas.ogf.org/occi/infrastructure/compute/action#start\""
#=======================================================================================================================
x_occi_att = "X-OCCI-Attribute: occi.compute.cores=20:2"
action_att_http = """Category: start;
scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#";
class=action;
X-OCCI-Attribute: occi.compute.cores=20:2
"""
|
MarouenMechtri/CNG-Manager
|
pyocni/TDD/fake_Data/entities.py
|
Python
|
apache-2.0
| 5,785
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from django.utils import text
assert "42" == text.slugify(42)
|
tdyas/pants
|
contrib/mypy/examples/src/python/mypy_plugin/invalid.py
|
Python
|
apache-2.0
| 195
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Virtual Hard Disk image path specification implementation."""
import unittest
from dfvfs.path import vhdi_path_spec
from tests.path import test_lib
class VHDIPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the Virtual Hard Disk image path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = vhdi_path_spec.VHDIPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
with self.assertRaises(ValueError):
vhdi_path_spec.VHDIPathSpec(parent=None)
with self.assertRaises(ValueError):
vhdi_path_spec.VHDIPathSpec(parent=self._path_spec, bogus='BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = vhdi_path_spec.VHDIPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VHDI',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
|
joachimmetz/dfvfs
|
tests/path/vhdi_path_spec.py
|
Python
|
apache-2.0
| 1,141
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import os.path as osp
import shutil
import requests
import subprocess
import hashlib
import tarfile
import zipfile
import time
from collections import OrderedDict
try:
from tqdm import tqdm
except:
class tqdm(object):
def __init__(self, total=None):
self.total = total
self.n = 0
def update(self, n):
self.n += n
if self.total is None:
sys.stderr.write("\r{0:.1f} bytes".format(self.n))
else:
sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(
self.total)))
sys.stderr.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr.write('\n')
import logging
logger = logging.getLogger(__name__)
__all__ = ['get_weights_path_from_url']
WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/hapi/weights")
DOWNLOAD_RETRY_LIMIT = 3
def is_url(path):
"""
Whether path is URL.
Args:
path (string): URL string or not.
"""
return path.startswith('http://') or path.startswith('https://')
def get_weights_path_from_url(url, md5sum=None):
"""Get weights path from WEIGHT_HOME, if not exists,
download it from url.
Args:
url (str): download url
md5sum (str): md5 sum of download package
Returns:
str: a local path to save downloaded weights.
Examples:
.. code-block:: python
from paddle.utils.download import get_weights_path_from_url
resnet18_pretrained_weight_url = 'https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams'
local_weight_path = get_weights_path_from_url(resnet18_pretrained_weight_url)
"""
path = get_path_from_url(url, WEIGHTS_HOME, md5sum)
return path
def _map_path(url, root_dir):
# parse path after download under root_dir
fname = osp.split(url)[-1]
fpath = fname
return osp.join(root_dir, fpath)
def _get_unique_endpoints(trainer_endpoints):
# Sorting is to avoid different environmental variables for each card
trainer_endpoints.sort()
ips = set()
unique_endpoints = set()
for endpoint in trainer_endpoints:
ip = endpoint.split(":")[0]
if ip in ips:
continue
ips.add(ip)
unique_endpoints.add(endpoint)
logger.info("unique_endpoints {}".format(unique_endpoints))
return unique_endpoints
def get_path_from_url(url,
root_dir,
md5sum=None,
check_exist=True,
decompress=True,
method='get'):
""" Download from given url to root_dir.
if file or directory specified by url is exists under
root_dir, return the path directly, otherwise download
from url and decompress it, return the path.
Args:
url (str): download url
root_dir (str): root dir for downloading, it should be
WEIGHTS_HOME or DATASET_HOME
md5sum (str): md5 sum of download package
decompress (bool): decompress zip or tar file. Default is `True`
method (str): which download method to use. Support `wget` and `get`. Default is `get`.
Returns:
str: a local path to save downloaded models & weights & datasets.
"""
from paddle.fluid.dygraph.parallel import ParallelEnv
assert is_url(url), "downloading from {} not a url".format(url)
# parse path after download to decompress under root_dir
fullpath = _map_path(url, root_dir)
# Mainly used to solve the problem of downloading data from different
# machines in the case of multiple machines. Different ips will download
# data, and the same ip will only download data once.
unique_endpoints = _get_unique_endpoints(ParallelEnv().trainer_endpoints[:])
if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum):
logger.info("Found {}".format(fullpath))
else:
if ParallelEnv().current_endpoint in unique_endpoints:
fullpath = _download(url, root_dir, md5sum, method=method)
else:
while not os.path.exists(fullpath):
time.sleep(1)
if ParallelEnv().current_endpoint in unique_endpoints:
if decompress and (tarfile.is_tarfile(fullpath) or
zipfile.is_zipfile(fullpath)):
fullpath = _decompress(fullpath)
return fullpath
def _get_download(url, fullname):
# using requests.get method
fname = osp.basename(fullname)
try:
req = requests.get(url, stream=True)
except Exception as e: # requests.exceptions.ConnectionError
logger.info("Downloading {} from {} failed with exception {}".format(
fname, url, str(e)))
return False
if req.status_code != 200:
raise RuntimeError("Downloading from {} failed with code "
"{}!".format(url, req.status_code))
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
with open(tmp_fullname, 'wb') as f:
if total_size:
with tqdm(total=(int(total_size) + 1023) // 1024) as pbar:
for chunk in req.iter_content(chunk_size=1024):
f.write(chunk)
pbar.update(1)
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
return fullname
def _wget_download(url, fullname):
# using wget to download url
tmp_fullname = fullname + "_tmp"
# –user-agent
command = 'wget -O {} -t {} {}'.format(tmp_fullname, DOWNLOAD_RETRY_LIMIT,
url)
subprc = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_ = subprc.communicate()
if subprc.returncode != 0:
raise RuntimeError(
'{} failed. Please make sure `wget` is installed or {} exists'.
format(command, url))
shutil.move(tmp_fullname, fullname)
return fullname
_download_methods = {
'get': _get_download,
'wget': _wget_download,
}
def _download(url, path, md5sum=None, method='get'):
"""
Download from url, save to path.
url (str): download url
path (str): download to given path
md5sum (str): md5 sum of download package
method (str): which download method to use. Support `wget` and `get`. Default is `get`.
"""
assert method in _download_methods, 'make sure `{}` implemented'.format(
method)
if not osp.exists(path):
os.makedirs(path)
fname = osp.split(url)[-1]
fullname = osp.join(path, fname)
retry_cnt = 0
logger.info("Downloading {} from {}".format(fname, url))
while not (osp.exists(fullname) and _md5check(fullname, md5sum)):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
raise RuntimeError("Download from {} failed. "
"Retry limit reached".format(url))
if not _download_methods[method](url, fullname):
time.sleep(1)
continue
return fullname
def _md5check(fullname, md5sum=None):
if md5sum is None:
return True
logger.info("File {} md5 checking...".format(fullname))
md5 = hashlib.md5()
with open(fullname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
calc_md5sum = md5.hexdigest()
if calc_md5sum != md5sum:
logger.info("File {} md5 check failed, {}(calc) != "
"{}(base)".format(fullname, calc_md5sum, md5sum))
return False
return True
def _decompress(fname):
"""
Decompress for zip and tar file
"""
logger.info("Decompressing {}...".format(fname))
# For protecting decompressing interupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
if tarfile.is_tarfile(fname):
uncompressed_path = _uncompress_file_tar(fname)
elif zipfile.is_zipfile(fname):
uncompressed_path = _uncompress_file_zip(fname)
else:
raise TypeError("Unsupport compress file type {}".format(fname))
return uncompressed_path
def _uncompress_file_zip(filepath):
with zipfile.ZipFile(filepath, 'r') as files:
file_list = files.namelist()
file_dir = os.path.dirname(filepath)
if _is_a_single_file(file_list):
rootpath = file_list[0]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
elif _is_a_single_dir(file_list):
# `strip(os.sep)` to remove `os.sep` in the tail of path
rootpath = os.path.splitext(file_list[0].strip(os.sep))[0].split(
os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
else:
rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
if not os.path.exists(uncompressed_path):
os.makedirs(uncompressed_path)
files.extractall(os.path.join(file_dir, rootpath))
return uncompressed_path
def _uncompress_file_tar(filepath, mode="r:*"):
with tarfile.open(filepath, mode) as files:
file_list = files.getnames()
file_dir = os.path.dirname(filepath)
if _is_a_single_file(file_list):
rootpath = file_list[0]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
elif _is_a_single_dir(file_list):
rootpath = os.path.splitext(file_list[0].strip(os.sep))[0].split(
os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
files.extractall(file_dir)
else:
rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
if not os.path.exists(uncompressed_path):
os.makedirs(uncompressed_path)
files.extractall(os.path.join(file_dir, rootpath))
return uncompressed_path
def _is_a_single_file(file_list):
if len(file_list) == 1 and file_list[0].find(os.sep) < 0:
return True
return False
def _is_a_single_dir(file_list):
new_file_list = []
for file_path in file_list:
if '/' in file_path:
file_path = file_path.replace('/', os.sep)
elif '\\' in file_path:
file_path = file_path.replace('\\', os.sep)
new_file_list.append(file_path)
file_name = new_file_list[0].split(os.sep)[0]
for i in range(1, len(new_file_list)):
if file_name != new_file_list[i].split(os.sep)[0]:
return False
return True
|
luotao1/Paddle
|
python/paddle/utils/download.py
|
Python
|
apache-2.0
| 11,957
|
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datetime
import hashlib
import os
import re
import time
import urllib
import jinja2
GITHUB_VIEW_TEMPLATE = 'https://github.com/kubernetes/kubernetes/blob/%s/%s#L%s'
GITHUB_COMMIT_TEMPLATE = 'https://github.com/kubernetes/kubernetes/commit/%s'
def do_timestamp(unix_time, css_class='timestamp', tmpl='%F %H:%M'):
"""Convert an int Unix timestamp into a human-readable datetime."""
t = datetime.datetime.utcfromtimestamp(unix_time)
return jinja2.Markup('<span class="%s" data-epoch="%s">%s</span>' %
(css_class, unix_time, t.strftime(tmpl)))
def do_dt_to_epoch(dt):
return time.mktime(dt.timetuple())
def do_shorttimestamp(unix_time):
t = datetime.datetime.utcfromtimestamp(unix_time)
return jinja2.Markup('<span class="shorttimestamp" data-epoch="%s">%s</span>' %
(unix_time, t.strftime('%d %H:%M')))
def do_duration(seconds):
"""Convert a numeric duration in seconds into a human-readable string."""
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if hours:
return '%dh%dm' % (hours, minutes)
if minutes:
return '%dm%ds' % (minutes, seconds)
else:
if seconds < 10:
return '%.2fs' % seconds
return '%ds' % seconds
def do_slugify(inp):
"""Convert an arbitrary string into a url-safe slug."""
inp = re.sub(r'[^\w\s-]+', '', inp)
return re.sub(r'\s+', '-', inp).lower()
def do_linkify_stacktrace(inp, commit):
"""Add links to a source code viewer for every mentioned source line."""
inp = unicode(jinja2.escape(inp))
if not commit:
return jinja2.Markup(inp) # this was already escaped, mark it safe!
def rep(m):
path, line = m.groups()
return '<a href="%s">%s</a>' % (
GITHUB_VIEW_TEMPLATE % (commit, path, line), m.group(0))
return jinja2.Markup(re.sub(r'^/\S*/kubernetes/(\S+):(\d+)$', rep, inp,
flags=re.MULTILINE))
def do_github_commit_link(commit):
commit_url = jinja2.escape(GITHUB_COMMIT_TEMPLATE % commit)
return jinja2.Markup('<a href="%s">%s</a>' % (commit_url, commit[:6]))
def do_testcmd(name):
if name.startswith('k8s.io/'):
try:
pkg, name = name.split(' ')
except ValueError: # don't block the page render
logging.error('Unexpected Go unit test name %r', name)
return name
return 'go test -v %s -run %s$' % (pkg, name)
else:
name = re.sub(r'^\[k8s\.io\] ', '', name)
name_escaped = re.escape(name).replace('\\ ', '\\s')
test_args = ('--ginkgo.focus=%s$' % name_escaped)
return "go run hack/e2e.go -v -test --test_args='%s'" % test_args
def do_parse_pod_name(text):
"""Find the pod name from the failure and return the pod name."""
p = re.search(r'(.*) pod (.*?) .*', text)
if p:
return re.sub(r'[\'"\\:]', '', p.group(2))
else:
return ""
def do_label_attr(labels, name):
'''
>> do_label_attr(['needs-rebase', 'size/XS'], 'size')
'XS'
'''
name += '/'
for label in labels:
if label.startswith(name):
return label[len(name):]
return ''
def do_classify_size(payload):
'''
Determine the size class for a PR, based on either its labels or
on the magnitude of its changes.
'''
size = do_label_attr(payload['labels'], 'size')
if not size and 'additions' in payload and 'deletions' in payload:
lines = payload['additions'] + payload['deletions']
# based on mungegithub/mungers/size.go
for limit, label in [
(10, 'XS'),
(30, 'S'),
(100, 'M'),
(500, 'L'),
(1000, 'XL')
]:
if lines < limit:
return label
return 'XXL'
return size
def do_render_status(payload, user):
states = set()
text = 'Pending'
if 'lgtm' in payload.get('labels', []):
text = 'LGTM'
elif user in payload.get('attn', {}):
text = payload['attn'][user].title()
for ctx, (state, _url, desc) in payload.get('status', {}).items():
if ctx == 'Submit Queue' and state == 'pending':
if 'does not have LGTM' in desc:
# Don't show overall status as pending when Submit
# won't continue without LGTM.
continue
if ctx == 'code-review/reviewable' and state == 'pending':
# Reviewable isn't a CI, so we don't care if it's pending.
# Its dashboard might replace all of this eventually.
continue
states.add(state)
icon = ''
if 'failure' in states:
icon = 'x'
state = 'failure'
elif 'pending' in states:
icon = 'primitive-dot'
state = 'pending'
elif 'success' in states:
icon = 'check'
state = 'success'
if icon:
icon = '<span class="text-%s octicon octicon-%s"></span>' % (
state, icon)
return jinja2.Markup('%s%s' % (icon, text))
def do_ltrim(s, needle):
if s.startswith(needle):
return s[len(needle):]
return s
def do_select(seq, pred):
return filter(pred, seq)
def do_tg_url(testgrid_query, test_name=''):
if test_name:
regex = '^Overall$|' + re.escape(test_name)
testgrid_query += '&include-filter-by-regex=%s' % urllib.quote(regex)
return 'https://k8s-testgrid.appspot.com/%s' % testgrid_query
static_hashes = {}
def do_static(filename):
filename = 'static/%s' % filename
if filename not in static_hashes:
data = open(filename).read()
static_hashes[filename] = hashlib.sha1(data).hexdigest()[:10]
return '/%s?%s' % (filename, static_hashes[filename])
do_basename = os.path.basename
do_dirname = os.path.dirname
do_quote_plus = urllib.quote_plus
def register(filters):
"""Register do_* functions in this module in a dictionary."""
for name, func in globals().items():
if name.startswith('do_'):
filters[name[3:]] = func
|
maisem/test-infra
|
gubernator/filters.py
|
Python
|
apache-2.0
| 6,694
|
import datetime
import difflib
import flask
import flask.json
import sqlalchemy
import pytz
from www import server
from www import login
@server.app.route('/history')
@login.require_mod
def history(session):
page = flask.request.values.get('page', 'all')
assert page in ('responses', 'explanations', 'spam', 'link_spam', 'all')
history = server.db.metadata.tables["history"]
users = server.db.metadata.tables["users"]
query = sqlalchemy.select([
history.c.id, history.c.section, history.c.changetime, users.c.display_name,
sqlalchemy.func.length(history.c.jsondata.cast(sqlalchemy.Text))
]).select_from(history.join(users, history.c.changeuser == users.c.id, isouter=True)) \
.order_by(history.c.changetime)
if page != 'all':
query = query.where(history.c.section == page)
with server.db.engine.begin() as conn:
data = [
{'key': key, 'section': section, 'time': time, 'user': user, 'datalen': datalen}
for key, section, time, user, datalen in conn.execute(query).fetchall()
]
lastlen = {}
lastkey = {}
for i in data:
i['lengthdiff'] = i['datalen'] - lastlen.get(i['section'], 0)
lastlen[i['section']] = i['datalen']
if i['user'] is None:
i['user'] = "unknown"
i['lastkey'], lastkey[i['section']] = lastkey.get(i['section']), i['key']
data.reverse()
return flask.render_template("historylist.html", page=page, data=data, session=session)
@server.app.route('/history/<int:historykey>')
@login.require_mod
def history_show(session, historykey):
history = server.db.metadata.tables["history"]
users = server.db.metadata.tables["users"]
with server.db.engine.begin() as conn:
section, time, user, data = conn.execute(sqlalchemy.select([
history.c.section, history.c.changetime, users.c.display_name, history.c.jsondata
]).select_from(history.join(users, history.c.changeuser == users.c.id, isouter=True))
.where(history.c.id == historykey)).first()
if section in ('responses', 'explanations'):
for row in data.values():
if not isinstance(row['response'], (tuple, list)):
row['response'] = [row['response']]
row['response'] = [{"text": i, "mode": "both"} for i in row['response']]
row['access'] = {"from": row['access'], "to": row['access']}
row['mode'] = "both nochange"
data = list(data.items())
data.sort(key=lambda a:a[0].lower())
elif section in ('spam', 'link_spam'):
for row in data:
row['mode'] = "both nochange"
headdata = build_headdata(historykey, historykey, section, user, time)
return flask.render_template("historyshow.html", data=data, headdata=headdata, session=session)
@server.app.route('/history/<int:fromkey>/<int:tokey>')
@login.require_mod
def history_diff(session, fromkey, tokey):
history = server.db.metadata.tables["history"]
users = server.db.metadata.tables["users"]
with server.db.engine.begin() as conn:
fromsection, fromdata = conn.execute(sqlalchemy.select([
history.c.section, history.c.jsondata
]).where(history.c.id == fromkey)).first()
tosection, totime, touser, todata = conn.execute(sqlalchemy.select([
history.c.section, history.c.changetime, users.c.display_name, history.c.jsondata
]).select_from(history.join(users, history.c.changeuser == users.c.id, isouter=True))
.where(history.c.id == tokey)).first()
assert fromsection == tosection
if tosection in ('responses', 'explanations'):
data = {}
keys = set(fromdata.keys()) | set(todata.keys())
for key in keys:
fromrow = fromdata.get(key)
torow = todata.get(key)
row = {}
if fromrow is None:
if not isinstance(torow['response'], (tuple, list)):
row['response'] = [torow['response']]
else:
row['response'] = torow['response']
row['response'] = [{"text": i, "mode": "to"} for i in row['response']]
row['access'] = {"from": torow['access'], "to": torow['access']}
row['mode'] = "to"
elif torow is None:
if not isinstance(fromrow['response'], (tuple, list)):
row['response'] = [fromrow['response']]
else:
row['response'] = fromrow['response']
row['response'] = [{"text": i, "mode": "from"} for i in row['response']]
row['access'] = {"from": fromrow['access'], "to": fromrow['access']}
row['mode'] = "from"
else:
if not isinstance(fromrow['response'], (tuple, list)):
fromrow['response'] = [fromrow['response']]
if not isinstance(torow['response'], (tuple, list)):
torow['response'] = [torow['response']]
row['response'] = []
differ = difflib.SequenceMatcher(a=fromrow['response'], b=torow['response'])
for op, i1, j1, i2, j2 in differ.get_opcodes():
if op == "equal":
for i in range(i1, j1):
row['response'].append({"text": fromrow['response'][i], "mode": "both"})
else:
for i in range(i1, j1):
row['response'].append({"text": fromrow['response'][i], "mode": "from"})
for i in range(i2, j2):
row['response'].append({"text": torow['response'][i], "mode": "to"})
row['access'] = {"from": fromrow['access'], "to": torow['access']}
if all(i['mode'] == "both" for i in row['response']) and row['access']['from'] == row['access']['to']:
row['mode'] = "both nochange"
else:
row['mode'] = "both"
data[key] = row
data = list(data.items())
data.sort(key=lambda a:a[0].lower())
elif tosection in ('spam', 'link_spam'):
fromdata = [(i['re'], i['message']) for i in fromdata]
todata = [(i['re'], i['message']) for i in todata]
data = []
differ = difflib.SequenceMatcher(a=fromdata, b=todata)
for op, i1, j1, i2, j2 in differ.get_opcodes():
if op == "equal":
for i in range(i1, j1):
data.append({'re': fromdata[i][0], 'message': fromdata[i][1], 'mode': 'both nochange'})
else:
for i in range(i1, j1):
data.append({'re': fromdata[i][0], 'message': fromdata[i][1], 'mode': 'from'})
for i in range(i2, j2):
data.append({'re': todata[i][0], 'message': todata[i][1], 'mode': 'to'})
headdata = build_headdata(fromkey, tokey, tosection, touser, totime)
return flask.render_template("historyshow.html", data=data, headdata=headdata, session=session)
def build_headdata(fromkey, tokey, section, user, time):
history = server.db.metadata.tables["history"]
with server.db.engine.begin() as conn:
prevkey = conn.execute(sqlalchemy.select([sqlalchemy.func.max(history.c.id)])
.where((history.c.id < fromkey) & (history.c.section == section))).first()
nextkey = conn.execute(sqlalchemy.select([sqlalchemy.func.min(history.c.id)])
.where((history.c.id > tokey) & (history.c.section == section))).first()
if prevkey is not None:
prevkey = prevkey[0]
if nextkey is not None:
nextkey = nextkey[0]
return {
"page": section,
"user": user,
"time": time,
"fromkey": fromkey,
"tokey": tokey,
"prevkey": prevkey,
"nextkey": nextkey,
"isdiff": fromkey != tokey,
}
def store(section, user, jsondata):
with server.db.engine.begin() as conn:
conn.execute(server.db.metadata.tables["history"].insert(),
section=section,
changetime=datetime.datetime.now(tz=pytz.utc),
changeuser=user,
jsondata=jsondata,
)
|
andreasots/lrrbot
|
www/history.py
|
Python
|
apache-2.0
| 7,065
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# In this test, I will check and make sure the scoring history metrics of GLM with Lambda Search on,
# on , cv on or off will contain the correct content.
def test_glm_scoring_history_multinomial():
col_list_compare = ["iterations", "training_logloss", "validation_logloss", "training_classification_error",
"validation_classification_error", "deviance_train", "deviance_test"]
print("Preparing dataset....")
h2o_data = h2o.import_file(
pyunit_utils.locate("smalldata/glm_test/multinomial_10_classes_10_cols_10000_Rows_train.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
h2o_data["C3"] = h2o_data["C3"].asfactor()
h2o_data["C4"] = h2o_data["C4"].asfactor()
h2o_data["C5"] = h2o_data["C5"].asfactor()
h2o_data["C11"] = h2o_data["C11"].asfactor()
splits_frames = h2o_data.split_frame(ratios=[.8], seed=1234)
train = splits_frames[0]
valid = splits_frames[1]
print("Building model with score_each_iteration turned on, with lambda search.")
h2o_model_score_each = glm(family="multinomial", score_each_iteration=True, lambda_search=True, nlambdas=10,
generate_scoring_history=True)
h2o_model_score_each.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
print("Building model with score_interval=1. Should generate same model as score_each_iteration turned on.")
h2o_model = glm(family="multinomial", score_iteration_interval=1, lambda_search=True, nlambdas=10,
generate_scoring_history=True)
h2o_model.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
pyunit_utils.assert_equal_scoring_history(h2o_model_score_each, h2o_model, col_list_compare)
col_list_compare.append("deviance_xval")
col_list_compare.append("deviance_se")
print("Building model with score_each_iteration turned on, with lambda search and CV.")
h2o_model_score_each_cv = glm(family="multinomial", score_each_iteration=True, lambda_search=True, nlambdas=10,
nfolds=2, fold_assignment='modulo', generate_scoring_history=True)
h2o_model_score_each_cv .train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
print("Building model with score_interval=1. Should generate same model as score_each_iteration turned on, with "
"lambda search and CV.")
h2o_model_cv = glm(family="multinomial", score_iteration_interval=1, lambda_search=True, nlambdas=10, nfolds=2,
fold_assignment='modulo', generate_scoring_history=True)
h2o_model_cv.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
pyunit_utils.assert_equal_scoring_history(h2o_model_score_each_cv, h2o_model_cv, col_list_compare)
h2o_model_4th_cv = glm(family="multinomial", score_iteration_interval=4, lambda_search=True, nlambdas=10, nfolds=2,
fold_assignment='modulo', generate_scoring_history=True)
h2o_model_4th_cv.train(x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], y="C11", training_frame=train,
validation_frame=valid)
pyunit_utils.assertEqualScoringHistoryIteration(h2o_model_cv, h2o_model_4th_cv, col_list_compare)
print("Done")
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_scoring_history_multinomial)
else:
test_glm_scoring_history_multinomial()
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_7968_scoring_history_glm_multinomial_lambda_search_generate_scoring_history_cv.py
|
Python
|
apache-2.0
| 3,914
|
async def prep(wallet_handle, my_vk, their_vk, msg):
msg = bytes(msg, "utf-8")
encrypted = await crypto.auth_crypt(wallet_handle, my_vk, their_vk, msg)
# encrypted = await crypto.anon_crypt(their_vk, msg)
print('encrypted = %s' % repr(encrypted))
with open('message.dat', 'wb') as f:
f.write(encrypted)
print('prepping %s' % msg)
|
Artemkaaas/indy-sdk
|
docs/how-tos/send-secure-msg/python/step5.py
|
Python
|
apache-2.0
| 361
|
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_import as h2i
import h2o_browse as h2b
argcaseList = [
{
'response': 106,
'family': 'gaussian',
'lambda': 1.0E-5,
'max_iter': 50,
'n_folds': 0,
'alpha': 1,
'beta_epsilon': 1.0E-4
},
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=1)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_hhp_107_01_browse(self):
h2o.beta_features = True
csvPathname = 'hhp_107_01.data.gz'
print "\n" + csvPathname
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put',
hex_key="hhp_107_01.data.hex", timeoutSecs=15)
# pop open a browser on the cloud
# h2b.browseTheCloud()
trial = 0
for argcase in argcaseList:
print "\nTrial #", trial, "start"
kwargs = argcase
print 'response:', kwargs['response']
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, browseAlso=True, timeoutSecs=200, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "\nTrial #", trial
if __name__ == '__main__':
h2o.unit_main()
|
janezhango/BigDataMachineLearning
|
py/testdir_single_jvm/test_GLM2_hhp107_01_browse.py
|
Python
|
apache-2.0
| 1,588
|
# Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Deployment Unit Tests - These tests are intended for our
internal setup only and will most likely not work on
other HaaS configurations."""
from haas import api, model, deferred, server
from haas.model import db
from haas.test_common import *
import pytest
@pytest.fixture
def configure():
config_testsuite()
config.load_extensions()
fresh_database = pytest.fixture(fresh_database)
@pytest.fixture
def server_init():
server.register_drivers()
server.validate_state()
with_request_context = pytest.yield_fixture(with_request_context)
site_layout = pytest.fixture(site_layout)
pytestmark = pytest.mark.usefixtures('configure',
'server_init',
'fresh_database',
'with_request_context',
'site_layout')
class TestNativeNetwork(NetworkTest):
def test_isolated_networks(self):
def create_networks():
nodes = self.collect_nodes()
# Create two networks
network_create_simple('net-0', 'anvil-nextgen')
network_create_simple('net-1', 'anvil-nextgen')
ports = self.get_all_ports(nodes)
# Assert that n0 and n1 are not on any network
port_networks = self.get_port_networks(ports)
assert self.get_network(nodes[0].nics[0].port, port_networks) == set()
assert self.get_network(nodes[1].nics[0].port, port_networks) == set()
# Connect n0 and n1 to net-0 and net-1 respectively
api.node_connect_network(nodes[0].label, nodes[0].nics[0].label, 'net-0')
api.node_connect_network(nodes[1].label, nodes[1].nics[0].label, 'net-1')
deferred.apply_networking()
# Assert that n0 and n1 are on isolated networks
port_networks = self.get_port_networks(ports)
assert self.get_network(nodes[0].nics[0].port, port_networks) == \
set([nodes[0].nics[0].port])
assert self.get_network(nodes[1].nics[0].port, port_networks) == \
set([nodes[1].nics[0].port])
# Add n2 and n3 to the same networks as n0 and n1 respectively
api.node_connect_network(nodes[2].label, nodes[2].nics[0].label, 'net-0')
api.node_connect_network(nodes[3].label, nodes[3].nics[0].label, 'net-1')
deferred.apply_networking()
# Assert that n2 and n3 have been added to n0 and n1's networks
# respectively
port_networks = self.get_port_networks(ports)
assert self.get_network(nodes[0].nics[0].port, port_networks) == \
set([nodes[0].nics[0].port, nodes[2].nics[0].port])
assert self.get_network(nodes[1].nics[0].port, port_networks) == \
set([nodes[1].nics[0].port, nodes[3].nics[0].port])
def delete_networks():
# Query the DB for nodes on this project
project = api._must_find(model.Project, 'anvil-nextgen')
nodes = project.nodes
ports = self.get_all_ports(nodes)
# Remove all nodes from their networks
for node in nodes:
attachment = model.NetworkAttachment.query \
.filter_by(nic=node.nics[0]).one()
api.node_detach_network(node.label,
node.nics[0].label,
attachment.network.label)
deferred.apply_networking()
# Assert that none of the nodes are on any network
port_networks = self.get_port_networks(ports)
for node in nodes:
assert self.get_network(node.nics[0].port, port_networks) == set()
# Delete the networks
api.network_delete('net-0')
api.network_delete('net-1')
# Create a project
api.project_create('anvil-nextgen')
create_networks()
delete_networks()
|
henn/hil
|
tests/deployment/native_networks.py
|
Python
|
apache-2.0
| 4,640
|
import sys
from textwrap import dedent
import py
import pytest
import tox
import tox.config
from tox.config import * # noqa
from tox.venv import VirtualEnv
class TestVenvConfig:
def test_config_parsing_minimal(self, tmpdir, newconfig):
config = newconfig([], """
[testenv:py1]
""")
assert len(config.envconfigs) == 1
assert config.toxworkdir.realpath() == tmpdir.join(".tox").realpath()
assert config.envconfigs['py1'].basepython == sys.executable
assert config.envconfigs['py1'].deps == []
assert config.envconfigs['py1'].platform == ".*"
def test_config_parsing_multienv(self, tmpdir, newconfig):
config = newconfig([], """
[tox]
toxworkdir = %s
indexserver =
xyz = xyz_repo
[testenv:py1]
deps=hello
[testenv:py2]
deps=
world1
:xyz:http://hello/world
""" % (tmpdir, ))
assert config.toxworkdir == tmpdir
assert len(config.envconfigs) == 2
assert config.envconfigs['py1'].envdir == tmpdir.join("py1")
dep = config.envconfigs['py1'].deps[0]
assert dep.name == "hello"
assert dep.indexserver is None
assert config.envconfigs['py2'].envdir == tmpdir.join("py2")
dep1, dep2 = config.envconfigs['py2'].deps
assert dep1.name == "world1"
assert dep2.name == "http://hello/world"
assert dep2.indexserver.name == "xyz"
assert dep2.indexserver.url == "xyz_repo"
def test_envdir_set_manually(self, tmpdir, newconfig):
config = newconfig([], """
[testenv:devenv]
envdir = devenv
""")
envconfig = config.envconfigs['devenv']
assert envconfig.envdir == tmpdir.join('devenv')
def test_envdir_set_manually_with_substitutions(self, tmpdir, newconfig):
config = newconfig([], """
[testenv:devenv]
envdir = {toxworkdir}/foobar
""")
envconfig = config.envconfigs['devenv']
assert envconfig.envdir == config.toxworkdir.join('foobar')
def test_force_dep_version(self, initproj):
"""
Make sure we can override dependencies configured in tox.ini when using the command line
option --force-dep.
"""
initproj("example123-0.5", filedefs={
'tox.ini': '''
[tox]
[testenv]
deps=
dep1==1.0
dep2>=2.0
dep3
dep4==4.0
'''
})
config = parseconfig(
['--force-dep=dep1==1.5', '--force-dep=dep2==2.1',
'--force-dep=dep3==3.0'])
assert config.option.force_dep == [
'dep1==1.5', 'dep2==2.1', 'dep3==3.0']
assert [str(x) for x in config.envconfigs['python'].deps] == [
'dep1==1.5', 'dep2==2.1', 'dep3==3.0', 'dep4==4.0',
]
def test_is_same_dep(self):
"""
Ensure correct parseini._is_same_dep is working with a few samples.
"""
assert DepOption._is_same_dep('pkg_hello-world3==1.0', 'pkg_hello-world3')
assert DepOption._is_same_dep('pkg_hello-world3==1.0', 'pkg_hello-world3>=2.0')
assert DepOption._is_same_dep('pkg_hello-world3==1.0', 'pkg_hello-world3>2.0')
assert DepOption._is_same_dep('pkg_hello-world3==1.0', 'pkg_hello-world3<2.0')
assert DepOption._is_same_dep('pkg_hello-world3==1.0', 'pkg_hello-world3<=2.0')
assert not DepOption._is_same_dep('pkg_hello-world3==1.0', 'otherpkg>=2.0')
class TestConfigPlatform:
def test_config_parse_platform(self, newconfig):
config = newconfig([], """
[testenv:py1]
platform = linux2
""")
assert len(config.envconfigs) == 1
assert config.envconfigs['py1'].platform == "linux2"
def test_config_parse_platform_rex(self, newconfig, mocksession, monkeypatch):
config = newconfig([], """
[testenv:py1]
platform = a123|b123
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['py1']
venv = VirtualEnv(envconfig, session=mocksession)
assert not venv.matching_platform()
monkeypatch.setattr(sys, "platform", "a123")
assert venv.matching_platform()
monkeypatch.setattr(sys, "platform", "b123")
assert venv.matching_platform()
monkeypatch.undo()
assert not venv.matching_platform()
@pytest.mark.parametrize("plat", ["win", "lin", ])
def test_config_parse_platform_with_factors(self, newconfig, plat, monkeypatch):
monkeypatch.setattr(sys, "platform", "win32")
config = newconfig([], """
[tox]
envlist = py27-{win,lin,osx}
[testenv]
platform =
win: win32
lin: linux2
""")
assert len(config.envconfigs) == 3
platform = config.envconfigs['py27-' + plat].platform
expected = {"win": "win32", "lin": "linux2"}.get(plat)
assert platform == expected
class TestConfigPackage:
def test_defaults(self, tmpdir, newconfig):
config = newconfig([], "")
assert config.setupdir.realpath() == tmpdir.realpath()
assert config.toxworkdir.realpath() == tmpdir.join(".tox").realpath()
envconfig = config.envconfigs['python']
assert envconfig.args_are_paths
assert not envconfig.recreate
assert not envconfig.pip_pre
def test_defaults_distshare(self, tmpdir, newconfig):
config = newconfig([], "")
assert config.distshare == config.homedir.join(".tox", "distshare")
def test_defaults_changed_dir(self, tmpdir, newconfig):
tmpdir.mkdir("abc").chdir()
config = newconfig([], "")
assert config.setupdir.realpath() == tmpdir.realpath()
assert config.toxworkdir.realpath() == tmpdir.join(".tox").realpath()
def test_project_paths(self, tmpdir, newconfig):
config = newconfig("""
[tox]
toxworkdir=%s
""" % tmpdir)
assert config.toxworkdir == tmpdir
class TestParseconfig:
def test_search_parents(self, tmpdir):
b = tmpdir.mkdir("a").mkdir("b")
toxinipath = tmpdir.ensure("tox.ini")
old = b.chdir()
try:
config = parseconfig([])
finally:
old.chdir()
assert config.toxinipath == toxinipath
def test_get_homedir(monkeypatch):
monkeypatch.setattr(py.path.local, "_gethomedir",
classmethod(lambda x: {}[1]))
assert not get_homedir()
monkeypatch.setattr(py.path.local, "_gethomedir",
classmethod(lambda x: 0 / 0))
assert not get_homedir()
monkeypatch.setattr(py.path.local, "_gethomedir",
classmethod(lambda x: "123"))
assert get_homedir() == "123"
class TestGetcontextname:
def test_blank(self, monkeypatch):
monkeypatch.setattr(os, "environ", {})
assert getcontextname() is None
def test_jenkins(self, monkeypatch):
monkeypatch.setattr(os, "environ", {"JENKINS_URL": "xyz"})
assert getcontextname() == "jenkins"
def test_hudson_legacy(self, monkeypatch):
monkeypatch.setattr(os, "environ", {"HUDSON_URL": "xyz"})
assert getcontextname() == "jenkins"
class TestIniParserAgainstCommandsKey:
"""Test parsing commands with substitutions"""
def test_command_substitution_from_other_section(self, newconfig):
config = newconfig("""
[section]
key = whatever
[testenv]
commands =
echo {[section]key}
""")
reader = SectionReader("testenv", config._cfg)
x = reader.getargvlist("commands")
assert x == [["echo", "whatever"]]
def test_command_substitution_from_other_section_multiline(self, newconfig):
"""Ensure referenced multiline commands form from other section injected
as multiple commands."""
config = newconfig("""
[section]
commands =
cmd1 param11 param12
# comment is omitted
cmd2 param21 \
param22
[base]
commands = cmd 1 \
2 3 4
cmd 2
[testenv]
commands =
{[section]commands}
{[section]commands}
# comment is omitted
echo {[base]commands}
""")
reader = SectionReader("testenv", config._cfg)
x = reader.getargvlist("commands")
assert x == [
"cmd1 param11 param12".split(),
"cmd2 param21 param22".split(),
"cmd1 param11 param12".split(),
"cmd2 param21 param22".split(),
["echo", "cmd", "1", "2", "3", "4", "cmd", "2"],
]
class TestIniParser:
def test_getstring_single(self, tmpdir, newconfig):
config = newconfig("""
[section]
key=value
""")
reader = SectionReader("section", config._cfg)
x = reader.getstring("key")
assert x == "value"
assert not reader.getstring("hello")
x = reader.getstring("hello", "world")
assert x == "world"
def test_missing_substitution(self, tmpdir, newconfig):
config = newconfig("""
[mydefault]
key2={xyz}
""")
reader = SectionReader("mydefault", config._cfg, fallbacksections=['mydefault'])
assert reader is not None
with py.test.raises(tox.exception.ConfigError):
reader.getstring("key2")
def test_getstring_fallback_sections(self, tmpdir, newconfig):
config = newconfig("""
[mydefault]
key2=value2
[section]
key=value
""")
reader = SectionReader("section", config._cfg, fallbacksections=['mydefault'])
x = reader.getstring("key2")
assert x == "value2"
x = reader.getstring("key3")
assert not x
x = reader.getstring("key3", "world")
assert x == "world"
def test_getstring_substitution(self, tmpdir, newconfig):
config = newconfig("""
[mydefault]
key2={value2}
[section]
key={value}
""")
reader = SectionReader("section", config._cfg, fallbacksections=['mydefault'])
reader.addsubstitutions(value="newvalue", value2="newvalue2")
x = reader.getstring("key2")
assert x == "newvalue2"
x = reader.getstring("key3")
assert not x
x = reader.getstring("key3", "{value2}")
assert x == "newvalue2"
def test_getlist(self, tmpdir, newconfig):
config = newconfig("""
[section]
key2=
item1
{item2}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions(item1="not", item2="grr")
x = reader.getlist("key2")
assert x == ['item1', 'grr']
def test_getdict(self, tmpdir, newconfig):
config = newconfig("""
[section]
key2=
key1=item1
key2={item2}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions(item1="not", item2="grr")
x = reader.getdict("key2")
assert 'key1' in x
assert 'key2' in x
assert x['key1'] == 'item1'
assert x['key2'] == 'grr'
x = reader.getdict("key3", {1: 2})
assert x == {1: 2}
def test_getstring_environment_substitution(self, monkeypatch, newconfig):
monkeypatch.setenv("KEY1", "hello")
config = newconfig("""
[section]
key1={env:KEY1}
key2={env:KEY2}
""")
reader = SectionReader("section", config._cfg)
x = reader.getstring("key1")
assert x == "hello"
with py.test.raises(tox.exception.ConfigError):
reader.getstring("key2")
def test_getstring_environment_substitution_with_default(self, monkeypatch, newconfig):
monkeypatch.setenv("KEY1", "hello")
config = newconfig("""
[section]
key1={env:KEY1:DEFAULT_VALUE}
key2={env:KEY2:DEFAULT_VALUE}
key3={env:KEY3:}
""")
reader = SectionReader("section", config._cfg)
x = reader.getstring("key1")
assert x == "hello"
x = reader.getstring("key2")
assert x == "DEFAULT_VALUE"
x = reader.getstring("key3")
assert x == ""
def test_value_matches_section_substituion(self):
assert is_section_substitution("{[setup]commands}")
def test_value_doesn_match_section_substitution(self):
assert is_section_substitution("{[ ]commands}") is None
assert is_section_substitution("{[setup]}") is None
assert is_section_substitution("{[setup] commands}") is None
def test_getstring_other_section_substitution(self, newconfig):
config = newconfig("""
[section]
key = rue
[testenv]
key = t{[section]key}
""")
reader = SectionReader("testenv", config._cfg)
x = reader.getstring("key")
assert x == "true"
def test_argvlist(self, tmpdir, newconfig):
config = newconfig("""
[section]
key2=
cmd1 {item1} {item2}
cmd2 {item2}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions(item1="with space", item2="grr")
# py.test.raises(tox.exception.ConfigError,
# "reader.getargvlist('key1')")
assert reader.getargvlist('key1') == []
x = reader.getargvlist("key2")
assert x == [["cmd1", "with", "space", "grr"],
["cmd2", "grr"]]
def test_argvlist_windows_escaping(self, tmpdir, newconfig):
config = newconfig("""
[section]
comm = py.test {posargs}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions([r"hello\this"])
argv = reader.getargv("comm")
assert argv == ["py.test", "hello\\this"]
def test_argvlist_multiline(self, tmpdir, newconfig):
config = newconfig("""
[section]
key2=
cmd1 {item1} \ # a comment
{item2}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions(item1="with space", item2="grr")
# py.test.raises(tox.exception.ConfigError,
# "reader.getargvlist('key1')")
assert reader.getargvlist('key1') == []
x = reader.getargvlist("key2")
assert x == [["cmd1", "with", "space", "grr"]]
def test_argvlist_quoting_in_command(self, tmpdir, newconfig):
config = newconfig("""
[section]
key1=
cmd1 'with space' \ # a comment
'after the comment'
""")
reader = SectionReader("section", config._cfg)
x = reader.getargvlist("key1")
assert x == [["cmd1", "with space", "after the comment"]]
def test_argvlist_positional_substitution(self, tmpdir, newconfig):
config = newconfig("""
[section]
key2=
cmd1 []
cmd2 {posargs:{item2} \
other}
""")
reader = SectionReader("section", config._cfg)
posargs = ['hello', 'world']
reader.addsubstitutions(posargs, item2="value2")
# py.test.raises(tox.exception.ConfigError,
# "reader.getargvlist('key1')")
assert reader.getargvlist('key1') == []
argvlist = reader.getargvlist("key2")
assert argvlist[0] == ["cmd1"] + posargs
assert argvlist[1] == ["cmd2"] + posargs
reader = SectionReader("section", config._cfg)
reader.addsubstitutions([], item2="value2")
# py.test.raises(tox.exception.ConfigError,
# "reader.getargvlist('key1')")
assert reader.getargvlist('key1') == []
argvlist = reader.getargvlist("key2")
assert argvlist[0] == ["cmd1"]
assert argvlist[1] == ["cmd2", "value2", "other"]
def test_argvlist_quoted_posargs(self, tmpdir, newconfig):
config = newconfig("""
[section]
key2=
cmd1 --foo-args='{posargs}'
cmd2 -f '{posargs}'
cmd3 -f {posargs}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions(["foo", "bar"])
assert reader.getargvlist('key1') == []
x = reader.getargvlist("key2")
assert x == [["cmd1", "--foo-args=foo bar"],
["cmd2", "-f", "foo bar"],
["cmd3", "-f", "foo", "bar"]]
def test_argvlist_posargs_with_quotes(self, tmpdir, newconfig):
config = newconfig("""
[section]
key2=
cmd1 -f {posargs}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions(["foo", "'bar", "baz'"])
assert reader.getargvlist('key1') == []
x = reader.getargvlist("key2")
assert x == [["cmd1", "-f", "foo", "bar baz"]]
def test_positional_arguments_are_only_replaced_when_standing_alone(self, tmpdir, newconfig):
config = newconfig("""
[section]
key=
cmd0 []
cmd1 -m '[abc]'
cmd2 -m '\'something\'' []
cmd3 something[]else
""")
reader = SectionReader("section", config._cfg)
posargs = ['hello', 'world']
reader.addsubstitutions(posargs)
argvlist = reader.getargvlist('key')
assert argvlist[0] == ['cmd0'] + posargs
assert argvlist[1] == ['cmd1', '-m', '[abc]']
assert argvlist[2] == ['cmd2', '-m', "something"] + posargs
assert argvlist[3] == ['cmd3', 'something[]else']
def test_substitution_with_multiple_words(self, newconfig):
inisource = """
[section]
key = py.test -n5 --junitxml={envlogdir}/junit-{envname}.xml []
"""
config = newconfig(inisource)
reader = SectionReader("section", config._cfg)
posargs = ['hello', 'world']
reader.addsubstitutions(posargs, envlogdir='ENV_LOG_DIR', envname='ENV_NAME')
expected = [
'py.test', '-n5', '--junitxml=ENV_LOG_DIR/junit-ENV_NAME.xml', 'hello', 'world'
]
assert reader.getargvlist('key')[0] == expected
def test_getargv(self, newconfig):
config = newconfig("""
[section]
key=some command "with quoting"
""")
reader = SectionReader("section", config._cfg)
expected = ['some', 'command', 'with quoting']
assert reader.getargv('key') == expected
def test_getpath(self, tmpdir, newconfig):
config = newconfig("""
[section]
path1={HELLO}
""")
reader = SectionReader("section", config._cfg)
reader.addsubstitutions(toxinidir=tmpdir, HELLO="mypath")
x = reader.getpath("path1", tmpdir)
assert x == tmpdir.join("mypath")
def test_getbool(self, tmpdir, newconfig):
config = newconfig("""
[section]
key1=True
key2=False
key1a=true
key2a=falsE
key5=yes
""")
reader = SectionReader("section", config._cfg)
assert reader.getbool("key1") is True
assert reader.getbool("key1a") is True
assert reader.getbool("key2") is False
assert reader.getbool("key2a") is False
py.test.raises(KeyError, 'reader.getbool("key3")')
py.test.raises(tox.exception.ConfigError, 'reader.getbool("key5")')
class TestConfigTestEnv:
def test_commentchars_issue33(self, tmpdir, newconfig):
config = newconfig("""
[testenv] # hello
deps = http://abc#123
commands=
python -c "x ; y"
""")
envconfig = config.envconfigs["python"]
assert envconfig.deps[0].name == "http://abc#123"
assert envconfig.commands[0] == ["python", "-c", "x ; y"]
def test_defaults(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
commands=
xyz --abc
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
assert envconfig.commands == [["xyz", "--abc"]]
assert envconfig.changedir == config.setupdir
assert envconfig.sitepackages is False
assert envconfig.usedevelop is False
assert envconfig.ignore_errors is False
assert envconfig.envlogdir == envconfig.envdir.join("log")
assert list(envconfig.setenv.keys()) == ['PYTHONHASHSEED']
hashseed = envconfig.setenv['PYTHONHASHSEED']
assert isinstance(hashseed, str)
# The following line checks that hashseed parses to an integer.
int_hashseed = int(hashseed)
# hashseed is random by default, so we can't assert a specific value.
assert int_hashseed > 0
def test_sitepackages_switch(self, tmpdir, newconfig):
config = newconfig(["--sitepackages"], "")
envconfig = config.envconfigs['python']
assert envconfig.sitepackages is True
def test_installpkg_tops_develop(self, newconfig):
config = newconfig(["--installpkg=abc"], """
[testenv]
usedevelop = True
""")
assert not config.envconfigs["python"].usedevelop
def test_specific_command_overrides(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
commands=xyz
[testenv:py]
commands=abc
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['py']
assert envconfig.commands == [["abc"]]
def test_whitelist_externals(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
whitelist_externals = xyz
commands=xyz
[testenv:x]
[testenv:py]
whitelist_externals = xyz2
commands=abc
""")
assert len(config.envconfigs) == 2
envconfig = config.envconfigs['py']
assert envconfig.commands == [["abc"]]
assert envconfig.whitelist_externals == ["xyz2"]
envconfig = config.envconfigs['x']
assert envconfig.whitelist_externals == ["xyz"]
def test_changedir(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
changedir=xyz
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
assert envconfig.changedir.basename == "xyz"
assert envconfig.changedir == config.toxinidir.join("xyz")
def test_ignore_errors(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
ignore_errors=True
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
assert envconfig.ignore_errors is True
def test_envbindir(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
basepython=python
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
assert envconfig.envpython == envconfig.envbindir.join("python")
@pytest.mark.parametrize("bp", ["jython", "pypy", "pypy3"])
def test_envbindir_jython(self, tmpdir, newconfig, bp):
config = newconfig("""
[testenv]
basepython=%s
""" % bp)
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
# on win32 and linux virtualenv uses "bin" for pypy/jython
assert envconfig.envbindir.basename == "bin"
if bp == "jython":
assert envconfig.envpython == envconfig.envbindir.join(bp)
def test_setenv_overrides(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
setenv =
PYTHONPATH = something
ANOTHER_VAL=else
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
assert 'PYTHONPATH' in envconfig.setenv
assert 'ANOTHER_VAL' in envconfig.setenv
assert envconfig.setenv['PYTHONPATH'] == 'something'
assert envconfig.setenv['ANOTHER_VAL'] == 'else'
@pytest.mark.parametrize("plat", ["win32", "linux2"])
def test_passenv_as_multiline_list(self, tmpdir, newconfig, monkeypatch, plat):
monkeypatch.setattr(sys, "platform", plat)
monkeypatch.setenv("A123A", "a")
monkeypatch.setenv("A123B", "b")
monkeypatch.setenv("BX23", "0")
config = newconfig("""
[testenv]
passenv =
A123*
# isolated comment
B?23
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
if plat == "win32":
assert "PATHEXT" in envconfig.passenv
assert "SYSTEMDRIVE" in envconfig.passenv
assert "SYSTEMROOT" in envconfig.passenv
assert "TEMP" in envconfig.passenv
assert "TMP" in envconfig.passenv
else:
assert "TMPDIR" in envconfig.passenv
assert "PATH" in envconfig.passenv
assert "PIP_INDEX_URL" in envconfig.passenv
assert "LANG" in envconfig.passenv
assert "A123A" in envconfig.passenv
assert "A123B" in envconfig.passenv
@pytest.mark.parametrize("plat", ["win32", "linux2"])
def test_passenv_as_space_separated_list(self, tmpdir, newconfig, monkeypatch, plat):
monkeypatch.setattr(sys, "platform", plat)
monkeypatch.setenv("A123A", "a")
monkeypatch.setenv("A123B", "b")
monkeypatch.setenv("BX23", "0")
config = newconfig("""
[testenv]
passenv =
# comment
A123* B?23
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
if plat == "win32":
assert "PATHEXT" in envconfig.passenv
assert "SYSTEMDRIVE" in envconfig.passenv
assert "SYSTEMROOT" in envconfig.passenv
assert "TEMP" in envconfig.passenv
assert "TMP" in envconfig.passenv
else:
assert "TMPDIR" in envconfig.passenv
assert "PATH" in envconfig.passenv
assert "PIP_INDEX_URL" in envconfig.passenv
assert "LANG" in envconfig.passenv
assert "A123A" in envconfig.passenv
assert "A123B" in envconfig.passenv
def test_passenv_with_factor(self, tmpdir, newconfig, monkeypatch):
monkeypatch.setenv("A123A", "a")
monkeypatch.setenv("A123B", "b")
monkeypatch.setenv("A123C", "c")
monkeypatch.setenv("A123D", "d")
monkeypatch.setenv("BX23", "0")
monkeypatch.setenv("CCA43", "3")
monkeypatch.setenv("CB21", "4")
config = newconfig("""
[tox]
envlist = {x1,x2}
[testenv]
passenv =
x1: A123A CC*
x1: CB21
# passed to both environments
A123C
x2: A123B A123D
""")
assert len(config.envconfigs) == 2
assert "A123A" in config.envconfigs["x1"].passenv
assert "A123C" in config.envconfigs["x1"].passenv
assert "CCA43" in config.envconfigs["x1"].passenv
assert "CB21" in config.envconfigs["x1"].passenv
assert "A123B" not in config.envconfigs["x1"].passenv
assert "A123D" not in config.envconfigs["x1"].passenv
assert "BX23" not in config.envconfigs["x1"].passenv
assert "A123B" in config.envconfigs["x2"].passenv
assert "A123D" in config.envconfigs["x2"].passenv
assert "A123A" not in config.envconfigs["x2"].passenv
assert "A123C" in config.envconfigs["x2"].passenv
assert "CCA43" not in config.envconfigs["x2"].passenv
assert "CB21" not in config.envconfigs["x2"].passenv
assert "BX23" not in config.envconfigs["x2"].passenv
def test_passenv_from_global_env(self, tmpdir, newconfig, monkeypatch):
monkeypatch.setenv("A1", "a1")
monkeypatch.setenv("A2", "a2")
monkeypatch.setenv("TOX_TESTENV_PASSENV", "A1")
config = newconfig("""
[testenv]
passenv = A2
""")
env = config.envconfigs["python"]
assert "A1" in env.passenv
assert "A2" in env.passenv
def test_changedir_override(self, tmpdir, newconfig):
config = newconfig("""
[testenv]
changedir=xyz
[testenv:python]
changedir=abc
basepython=python2.6
""")
assert len(config.envconfigs) == 1
envconfig = config.envconfigs['python']
assert envconfig.changedir.basename == "abc"
assert envconfig.changedir == config.setupdir.join("abc")
def test_install_command_setting(self, newconfig):
config = newconfig("""
[testenv]
install_command=some_install {packages}
""")
envconfig = config.envconfigs['python']
assert envconfig.install_command == [
'some_install', '{packages}']
def test_install_command_must_contain_packages(self, newconfig):
py.test.raises(tox.exception.ConfigError, newconfig, """
[testenv]
install_command=pip install
""")
def test_install_command_substitutions(self, newconfig):
config = newconfig("""
[testenv]
install_command=some_install --arg={toxinidir}/foo \
{envname} {opts} {packages}
""")
envconfig = config.envconfigs['python']
assert envconfig.install_command == [
'some_install', '--arg=%s/foo' % config.toxinidir, 'python',
'{opts}', '{packages}']
def test_pip_pre(self, newconfig):
config = newconfig("""
[testenv]
pip_pre=true
""")
envconfig = config.envconfigs['python']
assert envconfig.pip_pre
def test_pip_pre_cmdline_override(self, newconfig):
config = newconfig(
['--pre'],
"""
[testenv]
pip_pre=false
""")
envconfig = config.envconfigs['python']
assert envconfig.pip_pre
def test_downloadcache(self, newconfig, monkeypatch):
monkeypatch.delenv("PIP_DOWNLOAD_CACHE", raising=False)
config = newconfig("""
[testenv]
downloadcache=thecache
""")
envconfig = config.envconfigs['python']
assert envconfig.downloadcache.basename == 'thecache'
def test_downloadcache_env_override(self, newconfig, monkeypatch):
monkeypatch.setenv("PIP_DOWNLOAD_CACHE", 'fromenv')
config = newconfig("""
[testenv]
downloadcache=somepath
""")
envconfig = config.envconfigs['python']
assert envconfig.downloadcache.basename == "fromenv"
def test_downloadcache_only_if_in_config(self, newconfig, tmpdir,
monkeypatch):
monkeypatch.setenv("PIP_DOWNLOAD_CACHE", tmpdir)
config = newconfig('')
envconfig = config.envconfigs['python']
assert not envconfig.downloadcache
def test_simple(tmpdir, newconfig):
config = newconfig("""
[testenv:py26]
basepython=python2.6
[testenv:py27]
basepython=python2.7
""")
assert len(config.envconfigs) == 2
assert "py26" in config.envconfigs
assert "py27" in config.envconfigs
def test_substitution_error(tmpdir, newconfig):
py.test.raises(tox.exception.ConfigError, newconfig, """
[testenv:py27]
basepython={xyz}
""")
def test_substitution_defaults(tmpdir, newconfig):
config = newconfig("""
[testenv:py27]
commands =
{toxinidir}
{toxworkdir}
{envdir}
{envbindir}
{envtmpdir}
{envpython}
{homedir}
{distshare}
{envlogdir}
""")
conf = config.envconfigs['py27']
argv = conf.commands
assert argv[0][0] == config.toxinidir
assert argv[1][0] == config.toxworkdir
assert argv[2][0] == conf.envdir
assert argv[3][0] == conf.envbindir
assert argv[4][0] == conf.envtmpdir
assert argv[5][0] == conf.envpython
assert argv[6][0] == str(config.homedir)
assert argv[7][0] == config.homedir.join(".tox", "distshare")
assert argv[8][0] == conf.envlogdir
def test_substitution_positional(self, newconfig):
inisource = """
[testenv:py27]
commands =
cmd1 [hello] \
world
cmd1 {posargs:hello} \
world
"""
conf = newconfig([], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1", "[hello]", "world"]
assert argv[1] == ["cmd1", "hello", "world"]
conf = newconfig(['brave', 'new'], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1", "[hello]", "world"]
assert argv[1] == ["cmd1", "brave", "new", "world"]
def test_substitution_noargs_issue240(self, newconfig):
inisource = """
[testenv]
commands = echo {posargs:foo}
"""
conf = newconfig([""], inisource).envconfigs['python']
argv = conf.commands
assert argv[0] == ["echo"]
def test_posargs_backslashed_or_quoted(self, tmpdir, newconfig):
inisource = """
[testenv:py27]
commands =
echo "\{posargs\}" = {posargs}
echo "posargs = " "{posargs}"
"""
conf = newconfig([], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ['echo', '\\{posargs\\}', '=']
assert argv[1] == ['echo', 'posargs = ', ""]
conf = newconfig(['dog', 'cat'], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ['echo', '\\{posargs\\}', '=', 'dog', 'cat']
assert argv[1] == ['echo', 'posargs = ', 'dog cat']
def test_rewrite_posargs(self, tmpdir, newconfig):
inisource = """
[testenv:py27]
args_are_paths = True
changedir = tests
commands = cmd1 {posargs:hello}
"""
conf = newconfig([], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1", "hello"]
conf = newconfig(["tests/hello"], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1", "tests/hello"]
tmpdir.ensure("tests", "hello")
conf = newconfig(["tests/hello"], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1", "hello"]
def test_rewrite_simple_posargs(self, tmpdir, newconfig):
inisource = """
[testenv:py27]
args_are_paths = True
changedir = tests
commands = cmd1 {posargs}
"""
conf = newconfig([], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1"]
conf = newconfig(["tests/hello"], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1", "tests/hello"]
tmpdir.ensure("tests", "hello")
conf = newconfig(["tests/hello"], inisource).envconfigs['py27']
argv = conf.commands
assert argv[0] == ["cmd1", "hello"]
def test_take_dependencies_from_other_testenv(self, newconfig):
inisource = """
[testenv]
deps=
pytest
pytest-cov
[testenv:py27]
deps=
{[testenv]deps}
fun
"""
conf = newconfig([], inisource).envconfigs['py27']
packages = [dep.name for dep in conf.deps]
assert packages == ['pytest', 'pytest-cov', 'fun']
def test_take_dependencies_from_other_section(self, newconfig):
inisource = """
[testing:pytest]
deps=
pytest
pytest-cov
[testing:mock]
deps=
mock
[testenv]
deps=
{[testing:pytest]deps}
{[testing:mock]deps}
fun
"""
conf = newconfig([], inisource)
env = conf.envconfigs['python']
packages = [dep.name for dep in env.deps]
assert packages == ['pytest', 'pytest-cov', 'mock', 'fun']
def test_multilevel_substitution(self, newconfig):
inisource = """
[testing:pytest]
deps=
pytest
pytest-cov
[testing:mock]
deps=
mock
[testing]
deps=
{[testing:pytest]deps}
{[testing:mock]deps}
[testenv]
deps=
{[testing]deps}
fun
"""
conf = newconfig([], inisource)
env = conf.envconfigs['python']
packages = [dep.name for dep in env.deps]
assert packages == ['pytest', 'pytest-cov', 'mock', 'fun']
def test_recursive_substitution_cycle_fails(self, newconfig):
inisource = """
[testing:pytest]
deps=
{[testing:mock]deps}
[testing:mock]
deps=
{[testing:pytest]deps}
[testenv]
deps=
{[testing:pytest]deps}
"""
py.test.raises(ValueError, newconfig, [], inisource)
def test_single_value_from_other_secton(self, newconfig, tmpdir):
inisource = """
[common]
changedir = testing
[testenv]
changedir = {[common]changedir}
"""
conf = newconfig([], inisource).envconfigs['python']
assert conf.changedir.basename == 'testing'
assert conf.changedir.dirpath().realpath() == tmpdir.realpath()
def test_factors(self, newconfig):
inisource = """
[tox]
envlist = a-x,b
[testenv]
deps=
dep-all
a: dep-a
b: dep-b
x: dep-x
"""
conf = newconfig([], inisource)
configs = conf.envconfigs
assert [dep.name for dep in configs['a-x'].deps] == \
["dep-all", "dep-a", "dep-x"]
assert [dep.name for dep in configs['b'].deps] == ["dep-all", "dep-b"]
def test_factor_ops(self, newconfig):
inisource = """
[tox]
envlist = {a,b}-{x,y}
[testenv]
deps=
a,b: dep-a-or-b
a-x: dep-a-and-x
{a,b}-y: dep-ab-and-y
"""
configs = newconfig([], inisource).envconfigs
get_deps = lambda env: [dep.name for dep in configs[env].deps]
assert get_deps("a-x") == ["dep-a-or-b", "dep-a-and-x"]
assert get_deps("a-y") == ["dep-a-or-b", "dep-ab-and-y"]
assert get_deps("b-x") == ["dep-a-or-b"]
assert get_deps("b-y") == ["dep-a-or-b", "dep-ab-and-y"]
def test_default_factors(self, newconfig):
inisource = """
[tox]
envlist = py{26,27,33,34}-dep
[testenv]
deps=
dep: dep
"""
conf = newconfig([], inisource)
configs = conf.envconfigs
for name, config in configs.items():
assert config.basepython == 'python%s.%s' % (name[2], name[3])
@pytest.mark.issue188
def test_factors_in_boolean(self, newconfig):
inisource = """
[tox]
envlist = py{27,33}
[testenv]
recreate =
py27: True
"""
configs = newconfig([], inisource).envconfigs
assert configs["py27"].recreate
assert not configs["py33"].recreate
@pytest.mark.issue190
def test_factors_in_setenv(self, newconfig):
inisource = """
[tox]
envlist = py27,py26
[testenv]
setenv =
py27: X = 1
"""
configs = newconfig([], inisource).envconfigs
assert configs["py27"].setenv["X"] == "1"
assert "X" not in configs["py26"].setenv
@pytest.mark.issue191
def test_factor_use_not_checked(self, newconfig):
inisource = """
[tox]
envlist = py27-{a,b}
[testenv]
deps = b: test
"""
configs = newconfig([], inisource).envconfigs
assert set(configs.keys()) == set(['py27-a', 'py27-b'])
@pytest.mark.issue198
def test_factors_groups_touch(self, newconfig):
inisource = """
[tox]
envlist = {a,b}{-x,}
[testenv]
deps=
a,b,x,y: dep
"""
configs = newconfig([], inisource).envconfigs
assert set(configs.keys()) == set(['a', 'a-x', 'b', 'b-x'])
def test_period_in_factor(self, newconfig):
inisource = """
[tox]
envlist = py27-{django1.6,django1.7}
[testenv]
deps =
django1.6: Django==1.6
django1.7: Django==1.7
"""
configs = newconfig([], inisource).envconfigs
assert sorted(configs) == ["py27-django1.6", "py27-django1.7"]
assert [d.name for d in configs["py27-django1.6"].deps] \
== ["Django==1.6"]
class TestGlobalOptions:
def test_notest(self, newconfig):
config = newconfig([], "")
assert not config.option.notest
config = newconfig(["--notest"], "")
assert config.option.notest
def test_verbosity(self, newconfig):
config = newconfig([], "")
assert config.option.verbosity == 0
config = newconfig(["-v"], "")
assert config.option.verbosity == 1
config = newconfig(["-vv"], "")
assert config.option.verbosity == 2
def test_substitution_jenkins_default(self, tmpdir,
monkeypatch, newconfig):
monkeypatch.setenv("HUDSON_URL", "xyz")
config = newconfig("""
[testenv:py27]
commands =
{distshare}
""")
conf = config.envconfigs['py27']
argv = conf.commands
expect_path = config.toxworkdir.join("distshare")
assert argv[0][0] == expect_path
def test_substitution_jenkins_context(self, tmpdir, monkeypatch, newconfig):
monkeypatch.setenv("HUDSON_URL", "xyz")
monkeypatch.setenv("WORKSPACE", tmpdir)
config = newconfig("""
[tox:jenkins]
distshare = {env:WORKSPACE}/hello
[testenv:py27]
commands =
{distshare}
""")
conf = config.envconfigs['py27']
argv = conf.commands
assert argv[0][0] == config.distshare
assert config.distshare == tmpdir.join("hello")
def test_sdist_specification(self, tmpdir, newconfig):
config = newconfig("""
[tox]
sdistsrc = {distshare}/xyz.zip
""")
assert config.sdistsrc == config.distshare.join("xyz.zip")
config = newconfig([], "")
assert not config.sdistsrc
def test_env_selection(self, tmpdir, newconfig, monkeypatch):
inisource = """
[tox]
envlist = py26
[testenv:py26]
basepython=python2.6
[testenv:py31]
basepython=python3.1
[testenv:py27]
basepython=python2.7
"""
# py.test.raises(tox.exception.ConfigError,
# "newconfig(['-exyz'], inisource)")
config = newconfig([], inisource)
assert config.envlist == ["py26"]
config = newconfig(["-epy31"], inisource)
assert config.envlist == ["py31"]
monkeypatch.setenv("TOXENV", "py31,py26")
config = newconfig([], inisource)
assert config.envlist == ["py31", "py26"]
monkeypatch.setenv("TOXENV", "ALL")
config = newconfig([], inisource)
assert config.envlist == ['py26', 'py27', 'py31']
config = newconfig(["-eALL"], inisource)
assert config.envlist == ['py26', 'py27', 'py31']
def test_py_venv(self, tmpdir, newconfig, monkeypatch):
config = newconfig(["-epy"], "")
env = config.envconfigs['py']
assert str(env.basepython) == sys.executable
def test_default_environments(self, tmpdir, newconfig, monkeypatch):
envs = "py26,py27,py32,py33,py34,py35,py36,jython,pypy,pypy3"
inisource = """
[tox]
envlist = %s
""" % envs
config = newconfig([], inisource)
envlist = envs.split(",")
assert config.envlist == envlist
for name in config.envlist:
env = config.envconfigs[name]
if name == "jython":
assert env.basepython == "jython"
elif name.startswith("pypy"):
assert env.basepython == name
else:
assert name.startswith("py")
bp = "python%s.%s" % (name[2], name[3])
assert env.basepython == bp
def test_envlist_expansion(self, newconfig):
inisource = """
[tox]
envlist = py{26,27},docs
"""
config = newconfig([], inisource)
assert config.envlist == ["py26", "py27", "docs"]
def test_envlist_cross_product(self, newconfig):
inisource = """
[tox]
envlist = py{26,27}-dep{1,2}
"""
config = newconfig([], inisource)
assert config.envlist == \
["py26-dep1", "py26-dep2", "py27-dep1", "py27-dep2"]
def test_envlist_multiline(self, newconfig):
inisource = """
[tox]
envlist =
py27
py34
"""
config = newconfig([], inisource)
assert config.envlist == \
["py27", "py34"]
def test_minversion(self, tmpdir, newconfig, monkeypatch):
inisource = """
[tox]
minversion = 3.0
"""
config = newconfig([], inisource)
assert config.minversion == "3.0"
def test_skip_missing_interpreters_true(self, tmpdir, newconfig, monkeypatch):
inisource = """
[tox]
skip_missing_interpreters = True
"""
config = newconfig([], inisource)
assert config.option.skip_missing_interpreters
def test_skip_missing_interpreters_false(self, tmpdir, newconfig, monkeypatch):
inisource = """
[tox]
skip_missing_interpreters = False
"""
config = newconfig([], inisource)
assert not config.option.skip_missing_interpreters
def test_defaultenv_commandline(self, tmpdir, newconfig, monkeypatch):
config = newconfig(["-epy27"], "")
env = config.envconfigs['py27']
assert env.basepython == "python2.7"
assert not env.commands
def test_defaultenv_partial_override(self, tmpdir, newconfig, monkeypatch):
inisource = """
[tox]
envlist = py27
[testenv:py27]
commands= xyz
"""
config = newconfig([], inisource)
env = config.envconfigs['py27']
assert env.basepython == "python2.7"
assert env.commands == [['xyz']]
class TestHashseedOption:
def _get_envconfigs(self, newconfig, args=None, tox_ini=None,
make_hashseed=None):
if args is None:
args = []
if tox_ini is None:
tox_ini = """
[testenv]
"""
if make_hashseed is None:
make_hashseed = lambda: '123456789'
original_make_hashseed = tox.config.make_hashseed
tox.config.make_hashseed = make_hashseed
try:
config = newconfig(args, tox_ini)
finally:
tox.config.make_hashseed = original_make_hashseed
return config.envconfigs
def _get_envconfig(self, newconfig, args=None, tox_ini=None):
envconfigs = self._get_envconfigs(newconfig, args=args,
tox_ini=tox_ini)
return envconfigs["python"]
def _check_hashseed(self, envconfig, expected):
assert envconfig.setenv == {'PYTHONHASHSEED': expected}
def _check_testenv(self, newconfig, expected, args=None, tox_ini=None):
envconfig = self._get_envconfig(newconfig, args=args, tox_ini=tox_ini)
self._check_hashseed(envconfig, expected)
def test_default(self, tmpdir, newconfig):
self._check_testenv(newconfig, '123456789')
def test_passing_integer(self, tmpdir, newconfig):
args = ['--hashseed', '1']
self._check_testenv(newconfig, '1', args=args)
def test_passing_string(self, tmpdir, newconfig):
args = ['--hashseed', 'random']
self._check_testenv(newconfig, 'random', args=args)
def test_passing_empty_string(self, tmpdir, newconfig):
args = ['--hashseed', '']
self._check_testenv(newconfig, '', args=args)
@pytest.mark.xfail(sys.version_info >= (3, 2),
reason="at least Debian python 3.2/3.3 have a bug: "
"http://bugs.python.org/issue11884")
def test_passing_no_argument(self, tmpdir, newconfig):
"""Test that passing no arguments to --hashseed is not allowed."""
args = ['--hashseed']
try:
self._check_testenv(newconfig, '', args=args)
except SystemExit:
e = sys.exc_info()[1]
assert e.code == 2
return
assert False # getting here means we failed the test.
def test_setenv(self, tmpdir, newconfig):
"""Check that setenv takes precedence."""
tox_ini = """
[testenv]
setenv =
PYTHONHASHSEED = 2
"""
self._check_testenv(newconfig, '2', tox_ini=tox_ini)
args = ['--hashseed', '1']
self._check_testenv(newconfig, '2', args=args, tox_ini=tox_ini)
def test_noset(self, tmpdir, newconfig):
args = ['--hashseed', 'noset']
envconfig = self._get_envconfig(newconfig, args=args)
assert envconfig.setenv == {}
def test_noset_with_setenv(self, tmpdir, newconfig):
tox_ini = """
[testenv]
setenv =
PYTHONHASHSEED = 2
"""
args = ['--hashseed', 'noset']
self._check_testenv(newconfig, '2', args=args, tox_ini=tox_ini)
def test_one_random_hashseed(self, tmpdir, newconfig):
"""Check that different testenvs use the same random seed."""
tox_ini = """
[testenv:hash1]
[testenv:hash2]
"""
next_seed = [1000]
# This function is guaranteed to generate a different value each time.
def make_hashseed():
next_seed[0] += 1
return str(next_seed[0])
# Check that make_hashseed() works.
assert make_hashseed() == '1001'
envconfigs = self._get_envconfigs(newconfig, tox_ini=tox_ini,
make_hashseed=make_hashseed)
self._check_hashseed(envconfigs["hash1"], '1002')
# Check that hash2's value is not '1003', for example.
self._check_hashseed(envconfigs["hash2"], '1002')
def test_setenv_in_one_testenv(self, tmpdir, newconfig):
"""Check using setenv in one of multiple testenvs."""
tox_ini = """
[testenv:hash1]
setenv =
PYTHONHASHSEED = 2
[testenv:hash2]
"""
envconfigs = self._get_envconfigs(newconfig, tox_ini=tox_ini)
self._check_hashseed(envconfigs["hash1"], '2')
self._check_hashseed(envconfigs["hash2"], '123456789')
class TestIndexServer:
def test_indexserver(self, tmpdir, newconfig):
config = newconfig("""
[tox]
indexserver =
name1 = XYZ
name2 = ABC
""")
assert config.indexserver['default'].url is None
assert config.indexserver['name1'].url == "XYZ"
assert config.indexserver['name2'].url == "ABC"
def test_parse_indexserver(self, newconfig):
inisource = """
[tox]
indexserver =
default = http://pypi.testrun.org
name1 = whatever
"""
config = newconfig([], inisource)
assert config.indexserver['default'].url == "http://pypi.testrun.org"
assert config.indexserver['name1'].url == "whatever"
config = newconfig(['-i', 'qwe'], inisource)
assert config.indexserver['default'].url == "qwe"
assert config.indexserver['name1'].url == "whatever"
config = newconfig(['-i', 'name1=abc', '-i', 'qwe2'], inisource)
assert config.indexserver['default'].url == "qwe2"
assert config.indexserver['name1'].url == "abc"
config = newconfig(["-i", "ALL=xzy"], inisource)
assert len(config.indexserver) == 2
assert config.indexserver["default"].url == "xzy"
assert config.indexserver["name1"].url == "xzy"
def test_multiple_homedir_relative_local_indexservers(self, newconfig):
inisource = """
[tox]
indexserver =
default = file://{homedir}/.pip/downloads/simple
local1 = file://{homedir}/.pip/downloads/simple
local2 = file://{toxinidir}/downloads/simple
pypi = http://pypi.python.org/simple
"""
config = newconfig([], inisource)
expected = "file://%s/.pip/downloads/simple" % config.homedir
assert config.indexserver['default'].url == expected
assert config.indexserver['local1'].url == config.indexserver['default'].url
class TestParseEnv:
def test_parse_recreate(self, newconfig):
inisource = ""
config = newconfig([], inisource)
assert not config.envconfigs['python'].recreate
config = newconfig(['--recreate'], inisource)
assert config.envconfigs['python'].recreate
config = newconfig(['-r'], inisource)
assert config.envconfigs['python'].recreate
inisource = """
[testenv:hello]
recreate = True
"""
config = newconfig([], inisource)
assert config.envconfigs['hello'].recreate
class TestCmdInvocation:
def test_help(self, cmd):
result = cmd.run("tox", "-h")
assert not result.ret
result.stdout.fnmatch_lines([
"*help*",
])
def test_version(self, cmd):
result = cmd.run("tox", "--version")
assert not result.ret
stdout = result.stdout.str()
assert tox.__version__ in stdout
assert "imported from" in stdout
def test_listenvs(self, cmd, initproj):
initproj('listenvs', filedefs={
'tox.ini': '''
[tox]
envlist=py26,py27,py33,pypy,docs
[testenv:notincluded]
changedir = whatever
[testenv:docs]
changedir = docs
''',
})
result = cmd.run("tox", "-l")
result.stdout.fnmatch_lines("""
*py26*
*py27*
*py33*
*pypy*
*docs*
""")
def test_config_specific_ini(self, tmpdir, cmd):
ini = tmpdir.ensure("hello.ini")
result = cmd.run("tox", "-c", ini, "--showconfig")
assert not result.ret
result.stdout.fnmatch_lines([
"*config-file*hello.ini*",
])
def test_no_tox_ini(self, cmd, initproj):
initproj("noini-0.5", )
result = cmd.run("tox")
assert result.ret
result.stderr.fnmatch_lines([
"*ERROR*tox.ini*not*found*",
])
def test_showconfig_with_force_dep_version(self, cmd, initproj):
initproj('force_dep_version', filedefs={
'tox.ini': '''
[tox]
[testenv]
deps=
dep1==2.3
dep2
''',
})
result = cmd.run("tox", "--showconfig")
assert result.ret == 0
result.stdout.fnmatch_lines([
r'*deps*dep1==2.3, dep2*',
])
# override dep1 specific version, and force version for dep2
result = cmd.run("tox", "--showconfig", "--force-dep=dep1",
"--force-dep=dep2==5.0")
assert result.ret == 0
result.stdout.fnmatch_lines([
r'*deps*dep1, dep2==5.0*',
])
@pytest.mark.parametrize("cmdline,envlist", [
("-e py26", ['py26']),
("-e py26,py33", ['py26', 'py33']),
("-e py26,py26", ['py26', 'py26']),
("-e py26,py33 -e py33,py27", ['py26', 'py33', 'py33', 'py27'])
])
def test_env_spec(cmdline, envlist):
args = cmdline.split()
config = parseconfig(args)
assert config.envlist == envlist
class TestCommandParser:
def test_command_parser_for_word(self):
p = CommandParser('word')
# import pytest; pytest.set_trace()
assert list(p.words()) == ['word']
def test_command_parser_for_posargs(self):
p = CommandParser('[]')
assert list(p.words()) == ['[]']
def test_command_parser_for_multiple_words(self):
p = CommandParser('w1 w2 w3 ')
assert list(p.words()) == ['w1', ' ', 'w2', ' ', 'w3']
def test_command_parser_for_substitution_with_spaces(self):
p = CommandParser('{sub:something with spaces}')
assert list(p.words()) == ['{sub:something with spaces}']
def test_command_parser_with_complex_word_set(self):
complex_case = (
'word [] [literal] {something} {some:other thing} w{ord} w{or}d w{ord} '
'w{o:rd} w{o:r}d {w:or}d w[]ord {posargs:{a key}}')
p = CommandParser(complex_case)
parsed = list(p.words())
expected = [
'word', ' ', '[]', ' ', '[literal]', ' ', '{something}', ' ', '{some:other thing}',
' ', 'w', '{ord}', ' ', 'w', '{or}', 'd', ' ', 'w', '{ord}', ' ', 'w', '{o:rd}', ' ',
'w', '{o:r}', 'd', ' ', '{w:or}', 'd',
' ', 'w[]ord', ' ', '{posargs:{a key}}',
]
assert parsed == expected
def test_command_with_runs_of_whitespace(self):
cmd = "cmd1 {item1}\n {item2}"
p = CommandParser(cmd)
parsed = list(p.words())
assert parsed == ['cmd1', ' ', '{item1}', '\n ', '{item2}']
def test_command_with_split_line_in_subst_arguments(self):
cmd = dedent(""" cmd2 {posargs:{item2}
other}""")
p = CommandParser(cmd)
parsed = list(p.words())
assert parsed == ['cmd2', ' ', '{posargs:{item2}\n other}']
def test_command_parsing_for_issue_10(self):
cmd = "nosetests -v -a !deferred --with-doctest []"
p = CommandParser(cmd)
parsed = list(p.words())
assert parsed == [
'nosetests', ' ', '-v', ' ', '-a', ' ', '!deferred', ' ',
'--with-doctest', ' ', '[]'
]
@pytest.mark.skipif("sys.platform != 'win32'")
def test_commands_with_backslash(self, newconfig):
config = newconfig([r"hello\world"], """
[testenv:py26]
commands = some {posargs}
""")
envconfig = config.envconfigs["py26"]
assert envconfig.commands[0] == ["some", r"hello\world"]
|
kimh/circleci-python-sandbox
|
tox/tests/test_config.py
|
Python
|
apache-2.0
| 60,658
|
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import json
import requests
from pypuppetdb.errors import (
ImproperlyConfiguredError,
EmptyResponseError,
UnsupportedVersionError,
APIError,
)
log = logging.getLogger(__name__)
API_VERSIONS = {
2: 'v2',
3: 'v3',
}
ENDPOINTS = {
2: {
'facts': 'facts',
'fact-names': 'fact-names',
'nodes': 'nodes',
'resources': 'resources',
'metrics': 'metrics',
'mbean': 'metrics/mbean',
},
3: {
'facts': 'facts',
'fact-names': 'fact-names',
'nodes': 'nodes',
'resources': 'resources',
'catalogs': 'catalogs',
'metrics': 'metrics',
'mbean': 'metrics/mbean',
'reports': 'reports',
'events': 'events',
'event-counts': 'event-counts',
'aggregate-event-counts': 'aggregate-event-counts',
'server-time': 'server-time',
'version': 'version',
},
}
ERROR_STRINGS = {
'timeout': 'Connection to PuppetDB timed out on',
'refused': 'Could not reach PuppetDB on',
}
class BaseAPI(object):
"""This is a Base or Abstract class and is not meant to be instantiated
or used directly.
The BaseAPI object defines a set of methods that can be
reused across different versions of the PuppetDB API. If querying for a
certain resource is done in an identical fashion across different versions
it will be implemented here and should be overridden in their respective
versions if they deviate.
If :attr:`ssl` is set to `True` but either :attr:`ssl_key` or\
:attr:`ssl_cert` are `None` this will raise an error.
When at initialisation :obj:`api_version` isn't found in\
:const:`API_VERSIONS` this will raise an error.
:param api_version: Version of the API we're initialising.
:type api_version: :obj:`int`
:param host: (optional) Hostname or IP of PuppetDB.
:type host: :obj:`string`
:param port: (optional) Port on which to talk to PuppetDB.
:type port: :obj:`int`
:param ssl_verify: (optional) Verify PuppetDB server certificate.
:type ssl_verify: :obj:`bool`
:param ssl_key: (optional) Path to our client secret key.
:type ssl_key: :obj:`None` or :obj:`string` representing a filesystem\
path.
:param ssl_cert: (optional) Path to our client certificate.
:type ssl_cert: :obj:`None` or :obj:`string` representing a filesystem\
path.
:param timeout: (optional) Number of seconds to wait for a response.
:type timeout: :obj:`int`
:param protocol: (optional) Explicitly specify the protocol to be used
(especially handy when using HTTPS with ssl_verify=False and
without certs)
:type protocol: :obj:`None` or :obj:`string`
:param url_path: (optional) The URL path where PuppetDB is served
(if not at the root / path)
:type url_path: :obj:`None` or :obj:`string`
:param username: (optional) The username to use for HTTP basic
authentication
:type username: :obj:`None` or :obj:`string`
:param password: (optional) The password to use for HTTP basic
authentication
:type password: :obj:`None` or :obj:`string`
:raises: :class:`~pypuppetdb.errors.ImproperlyConfiguredError`
:raises: :class:`~pypuppetdb.errors.UnsupportedVersionError`
"""
def __init__(self, api_version, host='localhost', port=8080,
ssl_verify=True, ssl_key=None, ssl_cert=None, timeout=10,
protocol=None, url_path=None, username=None, password=None):
"""Initialises our BaseAPI object passing the parameters needed in
order to be able to create the connection strings, set up SSL and
timeouts and so forth."""
if api_version in API_VERSIONS:
self.api_version = API_VERSIONS[api_version]
else:
raise UnsupportedVersionError
self.host = host
self.port = port
self.ssl_verify = ssl_verify
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.timeout = timeout
# Standardise the URL path to a format similar to /puppetdb
if url_path:
if not url_path.startswith('/'):
url_path = '/' + url_path
if url_path.endswith('/'):
url_path = url_path[:-1]
else:
url_path = ''
self.url_path = url_path
if username and password:
self.username = username
self.password = password
else:
self.username = None
self.password = None
self.endpoints = ENDPOINTS[api_version]
self._session = requests.Session()
self._session.headers = {
'content-type': 'application/json',
'accept': 'application/json',
'accept-charset': 'utf-8'
}
if protocol is not None:
protocol = protocol.lower()
if protocol not in ['http', 'https']:
raise ValueError('Protocol specified must be http or https')
self.protocol = protocol
elif self.ssl_key is not None and self.ssl_cert is not None:
self.protocol = 'https'
else:
self.protocol = 'http'
@property
def version(self):
"""The version of the API we're querying against.
:returns: Current API version.
:rtype: :obj:`string`"""
return self.api_version
@property
def base_url(self):
"""A base_url that will be used to construct the final
URL we're going to query against.
:returns: A URL of the form: ``proto://host:port``.
:rtype: :obj:`string`
"""
return '{proto}://{host}:{port}{url_path}'.format(
proto=self.protocol,
host=self.host,
port=self.port,
url_path=self.url_path,
)
@property
def total(self):
"""The total-count of the last request to PuppetDB
if enabled as parameter in _query method
:returns Number of total results
:rtype :obj:`int`
"""
if self.last_total is not None:
return int(self.last_total)
def _normalize_resource_type(self, type_):
"""Normalizes the type passed to the api by capitalizing each part
of the type. For example:
sysctl::value -> Sysctl::Value
user -> User
"""
return '::'.join([s.capitalize() for s in type_.split('::')])
def _url(self, endpoint, path=None):
"""The complete URL we will end up querying. Depending on the
endpoint we pass in this will result in different URL's with
different prefixes.
:param endpoint: The PuppetDB API endpoint we want to query.
:type endpoint: :obj:`string`
:param path: An additional path if we don't wish to query the\
bare endpoint.
:type path: :obj:`string`
:returns: A URL constructed from :func:`base_url` with the\
apropraite API version/prefix and the rest of the path added\
to it.
:rtype: :obj:`string`
"""
log.debug('_url called with endpoint: {0} and path: {1}'.format(
endpoint, path))
if endpoint in self.endpoints:
api_prefix = self.api_version
endpoint = self.endpoints[endpoint]
else:
# If we reach this we're trying to query an endpoint that doesn't
# exist. This shouldn't happen unless someone made a booboo.
raise APIError
url = '{base_url}/{api_prefix}/{endpoint}'.format(
base_url=self.base_url,
api_prefix=api_prefix,
endpoint=endpoint,
)
if path is not None:
url = '{0}/{1}'.format(url, path)
return url
def _query(self, endpoint, path=None, query=None,
order_by=None, limit=None, offset=None, include_total=False,
summarize_by=None, count_by=None, count_filter=None):
"""This method actually querries PuppetDB. Provided an endpoint and an
optional path and/or query it will fire a request at PuppetDB. If
PuppetDB can be reached and answers within the timeout we'll decode
the response and give it back or raise for the HTTP Status Code
PuppetDB gave back.
:param endpoint: The PuppetDB API endpoint we want to query.
:type endpoint: :obj:`string`
:param path: An additional path if we don't wish to query the\
bare endpoint.
:type path: :obj:`string`
:param query: (optional) A query to further narrow down the resultset.
:type query: :obj:`string`
:param order_by: (optional) Set the order parameters for the resultset.
:type order_by: :obj:`string`
:param limit: (optional) Tell PuppetDB to limit it's response to this\
number of objects.
:type limit: :obj:`int`
:param offset: (optional) Tell PuppetDB to start it's response from\
the given offset. This is useful for implementing pagination\
but is not supported just yet.
:type offset: :obj:`string`
:param include_total: (optional) Include the total number of results
:type order_by: :obj:`bool`
:param summarize_by: (optional) Specify what type of object you'd like\
to see counts at the event-counts and aggregate-event-counts \
endpoints
:type summarize_by: :obj:`string`
:param count_by: (optional) Specify what type of object is counted
:type count_by: :obj:`string`
:param count_filter: (optional) Specify a filter for the results
:type count_filter: :obj:`string`
:raises: :class:`~pypuppetdb.errors.EmptyResponseError`
:returns: The decoded response from PuppetDB
:rtype: :obj:`dict` or :obj:`list`
"""
log.debug('_query called with endpoint: {0}, path: {1}, query: {2}, '
'limit: {3}, offset: {4}, summarize_by {5}, count_by {6}, '
'count_filter: {7}'.format(endpoint, path, query, limit,
offset, summarize_by, count_by,
count_filter))
url = self._url(endpoint, path=path)
payload = {}
if query is not None:
payload['query'] = query
if order_by is not None:
payload['order-by'] = order_by
if limit is not None:
payload['limit'] = limit
if include_total is True:
payload['include-total'] = json.dumps(include_total)
if offset is not None:
payload['offset'] = offset
if summarize_by is not None:
payload['summarize-by'] = summarize_by
if count_by is not None:
payload['count-by'] = count_by
if count_filter is not None:
payload['count-filter'] = count_filter
if not (payload):
payload = None
try:
r = self._session.get(url, params=payload, verify=self.ssl_verify,
cert=(self.ssl_cert, self.ssl_key),
timeout=self.timeout,
auth=(self.username, self.password))
r.raise_for_status()
# get total number of results if requested with include-total
# just a quick hack - needs improvement
if 'X-Records' in r.headers:
self.last_total = r.headers['X-Records']
else:
self.last_total = None
json_body = r.json()
if json_body is not None:
return json_body
else:
del json_body
raise EmptyResponseError
except requests.exceptions.Timeout:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['timeout'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.ConnectionError:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['refused'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.HTTPError as err:
log.error("{0} {1}:{2} over {3}.".format(err.response.text,
self.host, self.port,
self.protocol.upper()))
raise
# Method stubs
def nodes(self):
raise NotImplementedError
def node(self):
raise NotImplementedError
def facts(self):
raise NotImplementedError
def resources(self):
raise NotImplementedError
def metric(self, metric):
"""Query for a specific metrc.
:param metric: The name of the metric we want.
:type metric: :obj:`string`
:returns: The return of :meth:`~pypuppetdb.api.BaseAPI._query`.
"""
return self._query('mbean', path=metric)
|
amwilson/pypuppetdb
|
pypuppetdb/api/__init__.py
|
Python
|
apache-2.0
| 13,470
|
import copy, time
from threading import RLock
from . import ac_server_protocol
from .ac_server_helpers import DictToClass
class ACUdpMonitor:
def __init__(self):
# pluginId = 0 -> this plugin
# pluginId = 1 -> proxied plugin
self.HistoryInfo = DictToClass
self.InfoRequest = DictToClass
self.lock = RLock()
self.reset()
def reset(self, carId = None):
with self.lock:
if carId is None:
self.intervals = [0,0]
self.cu_history = [{},{}] # [pluginId][carId]
self.info_requests = []
else:
if carId in self.cu_history[0]: del self.cu_history[0][carId]
if carId in self.cu_history[1]: del self.cu_history[1][carId]
def calcRTInterval(self):
with self.lock:
res = self.intervals[0]
if 0 < self.intervals[1] < res or res == 0:
res = self.intervals[1]
return res
def setIntervals(self, pluginId, interval):
with self.lock:
oldInterval = self.calcRTInterval()
self.intervals[pluginId] = interval
newInterval = self.calcRTInterval()
return newInterval
def getInterval(self, pluginId):
with self.lock:
if self.intervals[pluginId] < 0:
return None
return self.intervals[pluginId]
def infoRequest(self, pluginId, cls, f_filter):
with self.lock:
if len(self.info_requests) < 64:
self.info_requests.append(self.InfoRequest(timestamp=time.time(), pluginId=pluginId, cls=cls, f_filter=f_filter))
def okToSend(self, pluginId, packet):
with self.lock:
if type(packet) == ac_server_protocol.CarUpdate:
# CarUpdate packets
if self.intervals[pluginId] == 0:
# no rt report configured, CarUpdate event will not be passed
return False
t = time.time()
threshold = t - max(0, (self.intervals[pluginId]-50)*0.001)
if not packet.carId in self.cu_history[pluginId]:
# create a history info object for the car if not already there
self.cu_history[pluginId][packet.carId] = self.HistoryInfo(lastSendTime = threshold, firstSendTime = t, count = 0)
lastT = self.cu_history[pluginId][packet.carId].lastSendTime
if t-lastT > self.intervals[pluginId]*10:
log_dbg("car %d has not been updated for a long time (the player probably left) - resetting statistics" % packet.carId)
self.cu_history[pluginId][packet.carId] = self.HistoryInfo(lastSendTime = threshold, firstSendTime = t, count = 0)
lastT = threshold
if ((self.intervals[pluginId] <= self.intervals[1-pluginId] or self.intervals[1-pluginId] <= 0) or
(lastT <= threshold)):
# this plugin has the quicker update rate
h = self.cu_history[pluginId][packet.carId]
h.lastSendTime = t
h.count += 1
# limit the history to 30s, intervals are in milliseconds
maxcnt = 30000./max(10,self.intervals[pluginId])
if h.count > maxcnt:
avg = (h.lastSendTime - h.firstSendTime)/h.count
h.count = maxcnt
h.firstSendTime = h.lastSendTime - avg*h.count
return True
return False
elif type(packet) in [ac_server_protocol.SessionInfo, ac_server_protocol.CarInfo]:
# Requested info packets
for ir in self.info_requests:
if ir.pluginId == pluginId:
if ir.cls == type(packet):
if ir.f_filter(packet):
self.info_requests.remove(ir)
return True
else:
pass
# no request found for this packet. Probably already sent to proxy
return False
# generic packet. Needs proxying
return True
def plausibilityCheck(self):
with self.lock:
t = time.time()
for ir in copy.copy(self.info_requests):
if t - ir.timestamp > 5.:
self.info_requests.remove(ir)
if ir.pluginId == 0:
log=log_err
else:
log=log_dbg
log("Timeout [pluginId=%d] while waiting for request (%.1fs) for request %s." % (ir.pluginId, t-ir.timestamp, ir.cls))
for pluginId in [0,1]:
if pluginId == 0:
log=log_err
else:
log=log_dbg
if self.intervals[pluginId] > 0:
for carId in list(self.cu_history[pluginId].keys()):
h = self.cu_history[pluginId][carId]
if h.count <= 10: continue
avgInterval = (h.lastSendTime - h.firstSendTime)/h.count*1000
if avgInterval > self.intervals[pluginId]*1.5 or avgInterval < self.intervals[pluginId]*0.5:
log("Realtime report interval mismatch [pluginId=%d, carId=%d]. Configured %d ms, measured %.1f ms. Resetting stats." % (pluginId, carId, self.intervals[pluginId], avgInterval))
del self.cu_history[pluginId][carId]
|
minolin/acplugins
|
acplugins4python/acplugins4python/ac_server_udp_monitor.py
|
Python
|
apache-2.0
| 5,801
|
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared constants across the VMware driver
"""
from nova.network import model as network_model
DISK_FORMAT_ISO = 'iso'
DISK_FORMAT_VMDK = 'vmdk'
DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK]
DISK_TYPE_SPARSE = 'sparse'
DISK_TYPE_PREALLOCATED = 'preallocated'
DEFAULT_VIF_MODEL = network_model.VIF_MODEL_E1000
DEFAULT_OS_TYPE = "otherGuest"
DEFAULT_ADAPTER_TYPE = "lsiLogic"
DEFAULT_DISK_TYPE = DISK_TYPE_PREALLOCATED
DEFAULT_DISK_FORMAT = DISK_FORMAT_VMDK
ADAPTER_TYPE_BUSLOGIC = "busLogic"
ADAPTER_TYPE_IDE = "ide"
ADAPTER_TYPE_LSILOGICSAS = "lsiLogicsas"
|
jumpstarter-io/nova
|
nova/virt/vmwareapi/constants.py
|
Python
|
apache-2.0
| 1,183
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tool to validate artifact definitions."""
import argparse
import glob
import logging
import os
import sys
from artifacts import definitions
from artifacts import errors
from artifacts import reader
from artifacts import registry
class ArtifactDefinitionsValidator(object):
"""Artifact definitions validator."""
LEGACY_PATH = os.path.join('data', 'legacy.yaml')
_MACOS_PRIVATE_SUB_PATHS = ('etc', 'tftpboot', 'tmp', 'var')
_SUPPORTED_POSIX_USERS_VARIABLES = [
'%%users.homedir%%']
_SUPPORTED_WINDOWS_ENVIRONMENT_VARIABLES = [
'%%environ_allusersappdata%%',
'%%environ_allusersprofile%%',
'%%environ_programfiles%%',
'%%environ_programfilesx86%%',
'%%environ_systemdrive%%',
'%%environ_systemroot%%',
'%%environ_windir%%']
_SUPPORTED_WINDOWS_USERS_VARIABLES = [
'%%users.appdata%%',
'%%users.localappdata%%',
'%%users.sid%%',
'%%users.temp%%',
'%%users.username%%',
'%%users.userprofile%%']
def __init__(self):
"""Initializes an artifact definitions validator."""
super(ArtifactDefinitionsValidator, self).__init__()
self._artifact_registry = registry.ArtifactDefinitionsRegistry()
self._artifact_registry_key_paths = set()
def _CheckGlobstarInPathSegment(
self, filename, artifact_definition, path, path_segment):
"""Checks if a globstar in a path segment is valid.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
path (str): path of which the path segment originated.
path_segment (str): path segment to validate.
Returns:
bool: True if the globstar is valid.
"""
if not path_segment.startswith('**'):
logging.warning((
'Unuspported globstar with prefix: {0:s} for path: {1:s} defined by '
'artifact definition: {2:s} in file: {3:s}').format(
path_segment, path, artifact_definition.name, filename))
return False
if len(path_segment) > 2:
try:
recursion_depth = int(path_segment[2:], 10)
except (TypeError, ValueError):
logging.warning((
'Unuspported globstar with suffix: {0:s} for path: {1:s} defined '
'by artifact definition: {2:s} in file: {3:s}').format(
path_segment, path, artifact_definition.name, filename))
return False
if recursion_depth <= 0 or recursion_depth > 10:
logging.warning((
'Globstar with unsupported recursion depth: {0:s} for path: {1:s} '
'defined by artifact definition: {2:s} in file: {3:s}').format(
path_segment, path, artifact_definition.name, filename))
return False
return True
def _CheckMacOSPaths(self, filename, artifact_definition, source, paths):
"""Checks if the paths are valid MacOS paths.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
paths (list[str]): paths to validate.
Returns:
bool: True if the MacOS paths is valid.
"""
result = True
paths_with_private = []
paths_with_symbolic_link_to_private = []
for path in paths:
path_lower = path.lower()
path_segments = path_lower.split(source.separator)
if not path_segments:
logging.warning((
'Empty path defined by artifact definition: {0:s} in file: '
'{1:s}').format(artifact_definition.name, filename))
result = False
elif len(path_segments) == 1:
continue
elif path_segments[1] in self._MACOS_PRIVATE_SUB_PATHS:
paths_with_symbolic_link_to_private.append(path)
elif path_segments[1] == 'private' and len(path_segments) >= 2:
if path_segments[2] in self._MACOS_PRIVATE_SUB_PATHS:
paths_with_private.append(path)
else:
logging.warning((
'Unsupported private path: {0:s} defined by artifact definition: '
'{1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
has_globstar = False
for path_segment in path_segments:
if '**' in path_segment:
if has_globstar:
logging.warning((
'Unsupported path: {0:s} with multiple globstars defined by '
'artifact definition: {1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
break
has_globstar = True
if not self._CheckGlobstarInPathSegment(
filename, artifact_definition, path, path_segment):
result = False
if has_globstar and path.endswith(source.separator):
logging.warning((
'Unsupported path: {0:s} with globstar and trailing path '
'separator defined by artifact definition: {1:s} in file: '
'{2:s}').format(path, artifact_definition.name, filename))
result = False
for private_path in paths_with_private:
if private_path[8:] not in paths_with_symbolic_link_to_private:
logging.warning((
'Missing symbolic link: {0:s} for path: {1:s} defined by artifact '
'definition: {2:s} in file: {3:s}').format(
private_path[8:], private_path, artifact_definition.name,
filename))
result = False
for path in paths_with_symbolic_link_to_private:
private_path = '/private{0:s}'.format(path)
if private_path not in paths_with_private:
logging.warning((
'Missing path: {0:s} for symbolic link: {1:s} defined by artifact '
'definition: {2:s} in file: {3:s}').format(
private_path, path, artifact_definition.name, filename))
result = False
return result
def _CheckPath(self, filename, artifact_definition, source, path):
"""Checks if a path is valid.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
path (str): path to validate.
Returns:
bool: True if the path is valid.
"""
result = True
path_segments = path.split(source.separator)
has_globstar = False
for path_segment in path_segments:
if '**' in path_segment:
if has_globstar:
logging.warning((
'Unsupported path: {0:s} with multiple globstars defined by '
'artifact definition: {1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
break
has_globstar = True
if not self._CheckGlobstarInPathSegment(
filename, artifact_definition, path, path_segment):
result = False
if has_globstar and path.endswith(source.separator):
logging.warning((
'Unsupported path: {0:s} with globstar and trailing path '
'separator defined by artifact definition: {1:s} in file: '
'{2:s}').format(path, artifact_definition.name, filename))
result = False
return result
def _CheckWindowsPath(self, filename, artifact_definition, source, path):
"""Checks if a path is a valid Windows path.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
path (str): path to validate.
Returns:
bool: True if the Windows path is valid.
"""
result = True
number_of_forward_slashes = path.count('/')
number_of_backslashes = path.count('\\')
if (number_of_forward_slashes < number_of_backslashes and
source.separator != '\\'):
logging.warning((
'Incorrect path separator: {0:s} in path: {1:s} defined '
'by artifact definition: {2:s} in file: {3:s}').format(
source.separator, path, artifact_definition.name,
filename))
result = False
if source.separator != '\\':
return result
path_lower = path.lower()
path_segments = path_lower.split(source.separator)
if not path_segments:
logging.warning((
'Empty path defined by artifact definition: {0:s} in file: '
'{1:s}').format(artifact_definition.name, filename))
result = False
elif path_segments[0].startswith('%%users.') and path_segments[0] not in (
'%%users.appdata%%', '%%users.homedir%%', '%%users.localappdata%%',
'%%users.temp%%', '%%users.username%%', '%%users.userprofile%%'):
logging.warning((
'Unsupported "{0:s}" in path: {1:s} defined by artifact '
'definition: {2:s} in file: {3:s}').format(
path_segments[0], path, artifact_definition.name, filename))
result = False
elif path_segments[0] == '%%users.homedir%%':
logging.warning((
'Replace "%%users.homedir%%" by "%%users.userprofile%%" in path: '
'{0:s} defined by artifact definition: {1:s} in file: '
'{2:s}').format(path, artifact_definition.name, filename))
result = False
elif path_lower.startswith('%%users.userprofile%%\\appdata\\local\\'):
logging.warning((
'Replace "%%users.userprofile%%\\AppData\\Local" by '
'"%%users.localappdata%%" in path: {0:s} defined by artifact '
'definition: {1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
elif path_lower.startswith('%%users.userprofile%%\\appdata\\roaming\\'):
logging.warning((
'Replace "%%users.userprofile%%\\AppData\\Roaming" by '
'"%%users.appdata%%" in path: {0:s} defined by artifact '
'definition: {1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
elif path_lower.startswith('%%users.userprofile%%\\application data\\'):
logging.warning((
'Replace "%%users.userprofile%%\\Application Data" by '
'"%%users.appdata%%" in path: {0:s} defined by artifact '
'definition: {1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
elif path_lower.startswith(
'%%users.userprofile%%\\local settings\\application data\\'):
logging.warning((
'Replace "%%users.userprofile%%\\Local Settings\\Application Data" '
'by "%%users.localappdata%%" in path: {0:s} defined by artifact '
'definition: {1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
has_globstar = False
for path_segment in path_segments:
if path_segment.startswith('%%') and path_segment.endswith('%%'):
if (path_segment.startswith('%%environ_') and
path_segment not in self._SUPPORTED_WINDOWS_ENVIRONMENT_VARIABLES):
result = False
logging.warning((
'Artifact definition: {0:s} in file: {1:s} contains Windows '
'path that contains an unuspported environment variable: '
'"{2:s}".').format(
artifact_definition.name, filename, path_segment))
elif (path_segment.startswith('%%users.') and
path_segment not in self._SUPPORTED_WINDOWS_USERS_VARIABLES):
result = False
logging.warning((
'Artifact definition: {0:s} in file: {1:s} contains Windows '
'path that contains an unsupported users variable: '
'"{2:s}". ').format(
artifact_definition.name, filename, path_segment))
elif '**' in path_segment:
if has_globstar:
logging.warning((
'Unsupported path: {0:s} with multiple globstars defined by '
'artifact definition: {1:s} in file: {2:s}').format(
path, artifact_definition.name, filename))
result = False
break
has_globstar = True
if not self._CheckGlobstarInPathSegment(
filename, artifact_definition, path, path_segment):
result = False
if has_globstar and path.endswith(source.separator):
logging.warning((
'Unsupported path: {0:s} with globstar and trailing path '
'separator defined by artifact definition: {1:s} in file: '
'{2:s}').format(path, artifact_definition.name, filename))
result = False
return result
def _CheckWindowsRegistryKeyPath(
self, filename, artifact_definition, key_path):
"""Checks if a path is a valid Windows Registry key path.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
key_path (str): Windows Registry key path to validate.
Returns:
bool: True if the Windows Registry key path is valid.
"""
result = True
key_path_segments = key_path.lower().split('\\')
if key_path_segments[0] == '%%current_control_set%%':
result = False
logging.warning((
'Artifact definition: {0:s} in file: {1:s} contains Windows '
'Registry key path that starts with '
'%%CURRENT_CONTROL_SET%%. Replace %%CURRENT_CONTROL_SET%% with '
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet').format(
artifact_definition.name, filename))
for segment_index, key_path_segment in enumerate(key_path_segments):
if key_path_segment.startswith('%%') and key_path_segment.endswith('%%'):
if (segment_index == 1 and key_path_segment == '%%users.sid%%' and
key_path_segments[0] == 'hkey_users'):
continue
if key_path_segment.startswith('%%environ_'):
result = False
logging.warning((
'Artifact definition: {0:s} in file: {1:s} contains Windows '
'Registry key path that contains an environment variable: '
'"{2:s}". Usage of environment variables in key paths is not '
'encouraged at this time.').format(
artifact_definition.name, filename, key_path_segment))
elif key_path_segment.startswith('%%users.'):
result = False
logging.warning((
'Artifact definition: {0:s} in file: {1:s} contains Windows '
'Registry key path that contains a users variable: "{2:s}". '
'Usage of users variables in key paths, except for '
'"HKEY_USERS\\%%users.sid%%", is not encouraged at this '
'time.').format(
artifact_definition.name, filename, key_path_segment))
return result
def _HasDuplicateRegistryKeyPaths(
self, filename, artifact_definition, source):
"""Checks if Registry key paths are not already defined by other artifacts.
Note that at the moment this function will only find exact duplicate
Registry key paths.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
Returns:
bool: True if the Registry key paths defined by the source type
are used in other artifacts.
"""
result = False
intersection = self._artifact_registry_key_paths.intersection(
set(source.keys))
if intersection:
duplicate_key_paths = '\n'.join(intersection)
logging.warning((
'Artifact definition: {0:s} in file: {1:s} has duplicate '
'Registry key paths:\n{2:s}').format(
artifact_definition.name, filename, duplicate_key_paths))
result = True
self._artifact_registry_key_paths.update(source.keys)
return result
def CheckDirectory(self, path):
"""Validates the artifacts definition in a specific directory.
Args:
path (str): path of the directory containing the artifacts definition
files.
Returns:
bool: True if the file contains valid artifacts definitions.
"""
for filename in glob.glob(os.path.join(path, '*.yaml')):
result = self.CheckFile(filename)
if not result:
break
return result
def CheckFile(self, filename):
"""Validates the artifacts definition in a specific file.
Args:
filename (str): name of the artifacts definition file.
Returns:
bool: True if the file contains valid artifacts definitions.
"""
result = True
artifact_reader = reader.YamlArtifactsReader()
try:
for artifact_definition in artifact_reader.ReadFile(filename):
try:
self._artifact_registry.RegisterDefinition(artifact_definition)
except KeyError:
logging.warning(
'Duplicate artifact definition: {0:s} in file: {1:s}'.format(
artifact_definition.name, filename))
result = False
artifact_definition_supports_macos = (
definitions.SUPPORTED_OS_DARWIN in (
artifact_definition.supported_os))
artifact_definition_supports_windows = (
definitions.SUPPORTED_OS_WINDOWS in (
artifact_definition.supported_os))
for source in artifact_definition.sources:
if source.type_indicator in (
definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH):
if (definitions.SUPPORTED_OS_DARWIN in source.supported_os or (
artifact_definition_supports_macos and
not source.supported_os)):
if not self._CheckMacOSPaths(
filename, artifact_definition, source, source.paths):
result = False
elif (artifact_definition_supports_windows or
definitions.SUPPORTED_OS_WINDOWS in source.supported_os):
for path in source.paths:
if not self._CheckWindowsPath(
filename, artifact_definition, source, path):
result = False
else:
for path in source.paths:
if not self._CheckPath(
filename, artifact_definition, source, path):
result = False
elif source.type_indicator == (
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
# Exempt the legacy file from duplicate checking because it has
# duplicates intentionally.
if (filename != self.LEGACY_PATH and
self._HasDuplicateRegistryKeyPaths(
filename, artifact_definition, source)):
result = False
for key_path in source.keys:
if not self._CheckWindowsRegistryKeyPath(
filename, artifact_definition, key_path):
result = False
elif source.type_indicator == (
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
for key_value_pair in source.key_value_pairs:
if not self._CheckWindowsRegistryKeyPath(
filename, artifact_definition, key_value_pair['key']):
result = False
except errors.FormatError as exception:
logging.warning(
'Unable to validate file: {0:s} with error: {1!s}'.format(
filename, exception))
result = False
return result
def GetUndefinedArtifacts(self):
"""Retrieves the names of undefined artifacts used by artifact groups.
Returns:
set[str]: undefined artifacts names.
"""
return self._artifact_registry.GetUndefinedArtifacts()
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
args_parser = argparse.ArgumentParser(
description='Validates an artifact definitions file.')
args_parser.add_argument(
'definitions', nargs='?', action='store', metavar='PATH', default=None,
help=('path of the file or directory that contains the artifact '
'definitions.'))
options = args_parser.parse_args()
if not options.definitions:
print('Source value is missing.')
print('')
args_parser.print_help()
print('')
return False
if not os.path.exists(options.definitions):
print('No such file or directory: {0:s}'.format(options.definitions))
print('')
return False
validator = ArtifactDefinitionsValidator()
if os.path.isdir(options.definitions):
print('Validating definitions in: {0:s}/*.yaml'.format(options.definitions))
result = validator.CheckDirectory(options.definitions)
elif os.path.isfile(options.definitions):
print('Validating definitions in: {0:s}'.format(options.definitions))
result = validator.CheckFile(options.definitions)
if not result:
print('FAILURE')
return False
print('SUCCESS')
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
joachimmetz/artifacts
|
tools/validator.py
|
Python
|
apache-2.0
| 21,190
|
"""Core classes and exceptions for Simple-Salesforce"""
# has to be defined prior to login import
DEFAULT_API_VERSION = '38.0'
import logging
import warnings
import requests
import json
import re
from collections import namedtuple
try:
from urlparse import urlparse, urljoin
except ImportError:
# Python 3+
from urllib.parse import urlparse, urljoin
from simple_salesforce.login import SalesforceLogin
from simple_salesforce.util import date_to_iso8601, exception_handler
from simple_salesforce.exceptions import (
SalesforceGeneralError
)
from simple_salesforce.bulk import SFBulkHandler
try:
from collections import OrderedDict
except ImportError:
# Python < 2.7
from ordereddict import OrderedDict
#pylint: disable=invalid-name
logger = logging.getLogger(__name__)
def _warn_request_deprecation():
"""Deprecation for (Salesforce/SFType).request attribute"""
warnings.warn(
'The request attribute has been deprecated and will be removed in a '
'future version. Please use Salesforce.session instead.',
DeprecationWarning
)
Usage = namedtuple('Usage', 'used total')
PerAppUsage = namedtuple('PerAppUsage', 'used total name')
# pylint: disable=too-many-instance-attributes
class Salesforce(object):
"""Salesforce Instance
An instance of Salesforce is a handy way to wrap a Salesforce session
for easy use of the Salesforce REST API.
"""
# pylint: disable=too-many-arguments
def __init__(
self, username=None, password=None, security_token=None,
session_id=None, instance=None, instance_url=None,
organizationId=None, sandbox=None, version=DEFAULT_API_VERSION,
proxies=None, session=None, client_id=None, domain=None):
"""Initialize the instance with the given parameters.
Available kwargs
Password Authentication:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* sandbox -- DEPRECATED: Use domain instead.
* domain -- The domain to using for connecting to Salesforce. Use
common domains, such as 'login' or 'test', or
Salesforce My domain. If not used, will default to
'login'.
Direct Session and Instance Access:
* session_id -- Access token for this session
Then either
* instance -- Domain of your Salesforce instance, i.e.
`na1.salesforce.com`
OR
* instance_url -- Full URL of your instance i.e.
`https://na1.salesforce.com
Universal Kwargs:
* version -- the version of the Salesforce API to use, for example
`29.0`
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
if (sandbox is not None) and (domain is not None):
raise ValueError("Both 'sandbox' and 'domain' arguments were "
"supplied. Either may be supplied, but not "
"both.")
if sandbox is not None:
warnings.warn("'sandbox' argument is deprecated. Use "
"'domain' instead. Overriding 'domain' "
"with 'sandbox' value.",
DeprecationWarning)
domain = 'test' if sandbox else 'login'
if domain is None:
domain = 'login'
# Determine if the user passed in the optional version and/or
# domain kwargs
self.sf_version = version
self.domain = domain
self.session = session or requests.Session()
self.proxies = self.session.proxies
# override custom session proxies dance
if proxies is not None:
if not session:
self.session.proxies = self.proxies = proxies
else:
logger.warning(
'Proxies must be defined on custom session object, '
'ignoring proxies: %s', proxies
)
# Determine if the user wants to use our username/password auth or pass
# in their own information
if all(arg is not None for arg in (
username, password, security_token)):
self.auth_type = "password"
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=self.session,
username=username,
password=password,
security_token=security_token,
sf_version=self.sf_version,
proxies=self.proxies,
client_id=client_id,
domain=self.domain)
elif all(arg is not None for arg in (
session_id, instance or instance_url)):
self.auth_type = "direct"
self.session_id = session_id
# If the user provides the full url (as returned by the OAuth
# interface for example) extract the hostname (which we rely on)
if instance_url is not None:
self.sf_instance = urlparse(instance_url).hostname
else:
self.sf_instance = instance
elif all(arg is not None for arg in (
username, password, organizationId)):
self.auth_type = 'ipfilter'
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=self.session,
username=username,
password=password,
organizationId=organizationId,
sf_version=self.sf_version,
proxies=self.proxies,
client_id=client_id,
domain=self.domain)
else:
raise TypeError(
'You must provide login information or an instance and token'
)
self.auth_site = ('https://{domain}.salesforce.com'
.format(domain=self.domain))
self.headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
self.base_url = ('https://{instance}/services/data/v{version}/'
.format(instance=self.sf_instance,
version=self.sf_version))
self.apex_url = ('https://{instance}/services/apexrest/'
.format(instance=self.sf_instance))
self.bulk_url = ('https://{instance}/services/async/{version}/'
.format(instance=self.sf_instance,
version=self.sf_version))
self.api_usage = {}
def describe(self):
"""Describes all available objects
"""
url = self.base_url + "sobjects"
result = self._call_salesforce('GET', url, name='describe')
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
return json_result
# SObject Handler
def __getattr__(self, name):
"""Returns an `SFType` instance for the given Salesforce object type
(given in `name`).
The magic part of the SalesforceAPI, this function translates
calls such as `salesforce_api_instance.Lead.metadata()` into fully
constituted `SFType` instances to make a nice Python API wrapper
for the REST API.
Arguments:
* name -- the name of a Salesforce object type, e.g. Lead or Contact
"""
# fix to enable serialization
# (https://github.com/heroku/simple-salesforce/issues/60)
if name.startswith('__'):
return super(Salesforce, self).__getattr__(name)
if name == 'bulk':
# Deal with bulk API functions
return SFBulkHandler(self.session_id, self.bulk_url, self.proxies,
self.session)
return SFType(
name, self.session_id, self.sf_instance, sf_version=self.sf_version,
proxies=self.proxies, session=self.session)
# User utility methods
def set_password(self, user, password):
"""Sets the password of a user
salesforce dev documentation link:
https://www.salesforce.com/us/developer/docs/api_rest/Content/dome_sobject_user_password.htm
Arguments:
* user: the userID of the user to set
* password: the new password
"""
url = self.base_url + 'sobjects/User/%s/password' % user
params = {'NewPassword': password}
result = self._call_salesforce('POST', url, data=json.dumps(params))
# salesforce return 204 No Content when the request is successful
if result.status_code != 200 and result.status_code != 204:
raise SalesforceGeneralError(url,
result.status_code,
'User',
result.content)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
return json_result
# pylint: disable=invalid-name
def setPassword(self, user, password):
# pylint: disable=line-too-long
"""Sets the password of a user
salesforce dev documentation link:
https://www.salesforce.com/us/developer/docs/api_rest/Content/dome_sobject_user_password.htm
Arguments:
* user: the userID of the user to set
* password: the new password
"""
warnings.warn(
"This method has been deprecated."
"Please use set_password instead.",
DeprecationWarning)
return self.set_password(user, password)
# Generic Rest Function
def restful(self, path, params=None, method='GET', **kwargs):
"""Allows you to make a direct REST call if you know the path
Arguments:
* path: The path of the request
Example: sobjects/User/ABC123/password'
* params: dict of parameters to pass to the path
* method: HTTP request method, default GET
* other arguments supported by requests.request (e.g. json, timeout)
"""
url = self.base_url + path
result = self._call_salesforce(method, url, name=path, params=params,
**kwargs)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
return json_result
# Search Functions
def search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the fully formatted SOSL search string, e.g.
`FIND {Waldo}`
"""
url = self.base_url + 'search/'
# `requests` will correctly encode the query string passed as `params`
params = {'q': search}
result = self._call_salesforce('GET', url, name='search', params=params)
json_result = result.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
return json_result
def quick_search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the non-SOSL search string, e.g. `Waldo`. This search
string will be wrapped to read `FIND {Waldo}` before being
sent to Salesforce
"""
search_string = u'FIND {{{search_string}}}'.format(search_string=search)
return self.search(search_string)
def limits(self, **kwargs):
"""Return the result of a Salesforce request to list Organization
limits.
"""
url = self.base_url + 'limits/'
result = self._call_salesforce('GET', url, **kwargs)
if result.status_code != 200:
exception_handler(result)
return result.json(object_pairs_hook=OrderedDict)
# Query Handler
def query(self, query, include_deleted=False, **kwargs):
"""Return the result of a Salesforce SOQL query as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "waldo@somewhere.com"
* include_deleted -- True if deleted records should be included
"""
url = self.base_url + ('queryAll/' if include_deleted else 'query/')
params = {'q': query}
# `requests` will correctly encode the query string passed as `params`
result = self._call_salesforce('GET', url, name='query',
params=params, **kwargs)
return result.json(object_pairs_hook=OrderedDict)
def query_more(
self, next_records_identifier, identifier_is_url=False,
include_deleted=False, **kwargs):
"""Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
Arguments:
* next_records_identifier -- either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
* identifier_is_url -- True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifier` should be treated as
an Id.
* include_deleted -- True if the `next_records_identifier` refers to a
query that includes deleted records. Only used if
`identifier_is_url` is False
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = (u'https://{instance}{next_record_url}'
.format(instance=self.sf_instance,
next_record_url=next_records_identifier))
else:
endpoint = 'queryAll' if include_deleted else 'query'
url = self.base_url + '{query_endpoint}/{next_record_id}'
url = url.format(query_endpoint=endpoint,
next_record_id=next_records_identifier)
result = self._call_salesforce('GET', url, name='query_more', **kwargs)
return result.json(object_pairs_hook=OrderedDict)
def query_all(self, query, include_deleted=False, **kwargs):
"""Returns the full set of results for the `query`. This is a
convenience
wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
Arguments
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "waldo@somewhere.com"
* include_deleted -- True if the query should include deleted records.
"""
result = self.query(query, include_deleted=include_deleted, **kwargs)
all_records = []
while True:
all_records.extend(result['records'])
# fetch next batch if we're not done else break out of loop
if not result['done']:
result = self.query_more(result['nextRecordsUrl'],
identifier_is_url=True)
else:
break
result['records'] = all_records
return result
def apexecute(self, action, method='GET', data=None, **kwargs):
"""Makes an HTTP request to an APEX REST endpoint
Arguments:
* action -- The REST endpoint for the request.
* method -- HTTP method for the request (default GET)
* data -- A dict of parameters to send in a POST / PUT request
* kwargs -- Additional kwargs to pass to `requests.request`
"""
result = self._call_salesforce(
method,
self.apex_url + action,
name="apexexcute",
data=json.dumps(data), **kwargs
)
try:
response_content = result.json()
# pylint: disable=broad-except
except Exception:
response_content = result.text
return response_content
def _call_salesforce(self, method, url, name="", **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
result = self.session.request(
method, url, headers=self.headers, **kwargs)
if result.status_code >= 300:
exception_handler(result, name=name)
sforce_limit_info = result.headers.get('Sforce-Limit-Info')
if sforce_limit_info:
self.api_usage = self.parse_api_usage(sforce_limit_info)
return result
@property
def request(self):
"""Deprecated access to self.session for backwards compatibility"""
_warn_request_deprecation()
return self.session
@request.setter
def request(self, session):
"""Deprecated setter for self.session"""
_warn_request_deprecation()
self.session = session
@staticmethod
def parse_api_usage(sforce_limit_info):
"""parse API usage and limits out of the Sforce-Limit-Info header
Arguments:
* sforce_limit_info: The value of response header 'Sforce-Limit-Info'
Example 1: 'api-usage=18/5000'
Example 2: 'api-usage=25/5000;
per-app-api-usage=17/250(appName=sample-connected-app)'
"""
result = {}
api_usage = re.match(r'[^-]?api-usage=(?P<used>\d+)/(?P<tot>\d+)',
sforce_limit_info)
pau = r'.+per-app-api-usage=(?P<u>\d+)/(?P<t>\d+)\(appName=(?P<n>.+)\)'
per_app_api_usage = re.match(pau, sforce_limit_info)
if api_usage and api_usage.groups():
groups = api_usage.groups()
result['api-usage'] = Usage(used=int(groups[0]),
total=int(groups[1]))
if per_app_api_usage and per_app_api_usage.groups():
groups = per_app_api_usage.groups()
result['per-app-api-usage'] = PerAppUsage(used=int(groups[0]),
total=int(groups[1]),
name=groups[2])
return result
class SFType(object):
"""An interface to a specific type of SObject"""
# pylint: disable=too-many-arguments
def __init__(
self, object_name, session_id, sf_instance,
sf_version=DEFAULT_API_VERSION, proxies=None, session=None):
"""Initialize the instance with the given parameters.
Arguments:
* object_name -- the name of the type of SObject this represents,
e.g. `Lead` or `Contact`
* session_id -- the session ID for authenticating to Salesforce
* sf_instance -- the domain of the instance of Salesforce to use
* sf_version -- the version of the Salesforce API to use
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
"""
self.session_id = session_id
self.name = object_name
self.session = session or requests.Session()
# don't wipe out original proxies with None
if not session and proxies is not None:
self.session.proxies = proxies
self.api_usage = {}
self.base_url = (
u'https://{instance}/services/data/v{sf_version}/sobjects'
'/{object_name}/'.format(instance=sf_instance,
object_name=object_name,
sf_version=sf_version))
def metadata(self, headers=None):
"""Returns the result of a GET to `.../{object_name}/` as a dict
decoded from the JSON payload returned by Salesforce.
Arguments:
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce('GET', self.base_url, headers=headers)
return result.json(object_pairs_hook=OrderedDict)
def describe(self, headers=None):
"""Returns the result of a GET to `.../{object_name}/describe` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='GET', url=urljoin(self.base_url, 'describe'),
headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def describe_layout(self, record_id, headers=None):
"""Returns the layout of the object
Returns the result of a GET to
`.../{object_name}/describe/layouts/<recordid>` as a dict decoded from
the JSON payload returned by Salesforce.
Arguments:
* record_id -- the Id of the SObject to get
* headers -- a dict with additional request headers.
"""
custom_url_part = 'describe/layouts/{record_id}'.format(
record_id=record_id
)
result = self._call_salesforce(
method='GET',
url=urljoin(self.base_url, custom_url_part),
headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def get(self, record_id, headers=None):
"""Returns the result of a GET to `.../{object_name}/{record_id}` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* record_id -- the Id of the SObject to get
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='GET', url=urljoin(self.base_url, record_id),
headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def get_by_custom_id(self, custom_id_field, custom_id, headers=None):
"""Return an ``SFType`` by custom ID
Returns the result of a GET to
`.../{object_name}/{custom_id_field}/{custom_id}` as a dict decoded
from the JSON payload returned by Salesforce.
Arguments:
* custom_id_field -- the API name of a custom field that was defined
as an External ID
* custom_id - the External ID value of the SObject to get
* headers -- a dict with additional request headers.
"""
custom_url = urljoin(
self.base_url, '{custom_id_field}/{custom_id}'.format(
custom_id_field=custom_id_field, custom_id=custom_id
)
)
result = self._call_salesforce(
method='GET', url=custom_url, headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def create(self, data, headers=None):
"""Creates a new SObject using a POST to `.../{object_name}/`.
Returns a dict decoded from the JSON payload returned by Salesforce.
Arguments:
* data -- a dict of the data to create the SObject from. It will be
JSON-encoded before being transmitted.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='POST', url=self.base_url,
data=json.dumps(data), headers=headers
)
return result.json(object_pairs_hook=OrderedDict)
def upsert(self, record_id, data, raw_response=False, headers=None):
"""Creates or updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- an identifier for the SObject as described in the
Salesforce documentation
* data -- a dict of the data to create or update the SObject from. It
will be JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='PATCH', url=urljoin(self.base_url, record_id),
data=json.dumps(data), headers=headers
)
return self._raw_response(result, raw_response)
def update(self, record_id, data, raw_response=False, headers=None):
"""Updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to update
* data -- a dict of the data to update the SObject from. It will be
JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='PATCH', url=urljoin(self.base_url, record_id),
data=json.dumps(data), headers=headers
)
return self._raw_response(result, raw_response)
def delete(self, record_id, raw_response=False, headers=None):
"""Deletes an SObject using a DELETE to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to delete
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='DELETE', url=urljoin(self.base_url, record_id),
headers=headers
)
return self._raw_response(result, raw_response)
def deleted(self, start, end, headers=None):
# pylint: disable=line-too-long
"""Gets a list of deleted records
Use the SObject Get Deleted resource to get a list of deleted records
for the specified object.
.../deleted/?start=2013-05-05T00:00:00+00:00&end=2013-05-10T00:00:00+00:00
* start -- start datetime object
* end -- end datetime object
* headers -- a dict with additional request headers.
"""
url = urljoin(
self.base_url, 'deleted/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end)
)
)
result = self._call_salesforce(method='GET', url=url, headers=headers)
return result.json(object_pairs_hook=OrderedDict)
def updated(self, start, end, headers=None):
# pylint: disable=line-too-long
"""Gets a list of updated records
Use the SObject Get Updated resource to get a list of updated
(modified or added) records for the specified object.
.../updated/?start=2014-03-20T00:00:00+00:00&end=2014-03-22T00:00:00+00:00
* start -- start datetime object
* end -- end datetime object
* headers -- a dict with additional request headers.
"""
url = urljoin(
self.base_url, 'updated/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end)
)
)
result = self._call_salesforce(method='GET', url=url, headers=headers)
return result.json(object_pairs_hook=OrderedDict)
def _call_salesforce(self, method, url, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
additional_headers = kwargs.pop('headers', dict())
headers.update(additional_headers or dict())
result = self.session.request(method, url, headers=headers, **kwargs)
if result.status_code >= 300:
exception_handler(result, self.name)
sforce_limit_info = result.headers.get('Sforce-Limit-Info')
if sforce_limit_info:
self.api_usage = Salesforce.parse_api_usage(sforce_limit_info)
return result
# pylint: disable=no-self-use
def _raw_response(self, response, body_flag):
"""Utility method for processing the response and returning either the
status code or the response object.
Returns either an `int` or a `requests.Response` object.
"""
if not body_flag:
return response.status_code
return response
@property
def request(self):
"""Deprecated access to self.session for backwards compatibility"""
_warn_request_deprecation()
return self.session
@request.setter
def request(self, session):
"""Deprecated setter for self.session"""
_warn_request_deprecation()
self.session = session
class SalesforceAPI(Salesforce):
"""Deprecated SalesforceAPI Instance
This class implements the Username/Password Authentication Mechanism using
Arguments It has since been surpassed by the 'Salesforce' class, which
relies on kwargs
"""
# pylint: disable=too-many-arguments
def __init__(self, username, password, security_token, sandbox=False,
sf_version='27.0'):
"""Initialize the instance with the given parameters.
Arguments:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* sandbox -- True if you want to login to `test.salesforce.com`, False
if you want to login to `login.salesforce.com`.
* sf_version -- the version of the Salesforce API to use, for example
"27.0"
"""
warnings.warn(
"Use of login arguments has been deprecated. Please use kwargs",
DeprecationWarning
)
super(SalesforceAPI, self).__init__(username=username,
password=password,
security_token=security_token,
sandbox=sandbox,
version=sf_version)
|
kawamon/hue
|
desktop/core/ext-py/simple-salesforce-0.74.2/simple_salesforce/api.py
|
Python
|
apache-2.0
| 32,017
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
from tools.toolchains import TOOLCHAINS
from tools.targets import TARGET_NAMES
from utils import argparse_force_uppercase_type, argparse_lowercase_hyphen_type, argparse_many
def get_default_options_parser(add_clean=True, add_options=True):
parser = ArgumentParser()
targetnames = TARGET_NAMES
targetnames.sort()
toolchainlist = list(TOOLCHAINS)
toolchainlist.sort()
parser.add_argument("-m", "--mcu",
help="build for the given MCU (%s)" % ', '.join(targetnames),
metavar="MCU",
type=argparse_many(argparse_force_uppercase_type(targetnames, "MCU")))
parser.add_argument("-t", "--tool",
help="build using the given TOOLCHAIN (%s)" % ', '.join(toolchainlist),
metavar="TOOLCHAIN",
type=argparse_many(argparse_force_uppercase_type(toolchainlist, "toolchain")))
parser.add_argument("--color",
help="print Warnings, and Errors in color",
action="store_true", default=False)
if add_clean:
parser.add_argument("-c", "--clean", action="store_true", default=False,
help="clean the build directory")
if add_options:
parser.add_argument("-o", "--options", action="append",
help='Add a build argument ("save-asm": save the asm generated by the compiler, "debug-info": generate debugging information, "analyze": run Goanna static code analyzer")',
type=argparse_lowercase_hyphen_type(['save-asm', 'debug-info', 'analyze'], "build option"))
return parser
|
rgrover/mbed
|
tools/options.py
|
Python
|
apache-2.0
| 2,276
|
# -*- coding: utf-8 -*-
from nose import tools as nt
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
class TestFileFiltering(ApiTestCase):
def setUp(self):
super(TestFileFiltering, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.file1 = api_utils.create_test_file(
self.node, self.user, filename='file1')
self.file2 = api_utils.create_test_file(
self.node, self.user, filename='file2')
self.file3 = api_utils.create_test_file(
self.node, self.user, filename='file3')
self.file4 = api_utils.create_test_file(
self.node, self.user, filename='file4')
def test_get_all_files(self):
res = self.app.get(
'/{}nodes/{}/files/osfstorage/'.format(API_BASE, self.node._id),
auth=self.user.auth
)
data = res.json.get('data')
nt.assert_equal(len(data), 4)
def test_filter_on_tag(self):
self.file1.add_tag('new', Auth(self.user))
self.file2.add_tag('new', Auth(self.user))
res = self.app.get(
'/{}nodes/{}/files/osfstorage/?filter[tags]=new'.format(
API_BASE, self.node._id
),
auth=self.user.auth
)
data = res.json.get('data')
nt.assert_equal(len(data), 2)
names = [f['attributes']['name'] for f in data]
nt.assert_in('file1', names)
nt.assert_in('file2', names)
def test_filtering_tags_exact(self):
self.file1.add_tag('cats', Auth(self.user))
self.file2.add_tag('cats', Auth(self.user))
self.file1.add_tag('cat', Auth(self.user))
res = self.app.get(
'/{}nodes/{}/files/osfstorage/?filter[tags]=cat'.format(
API_BASE, self.node._id
),
auth=self.user.auth
)
nt.assert_equal(len(res.json.get('data')), 1)
def test_filtering_tags_capitalized_query(self):
self.file1.add_tag('cat', Auth(self.user))
res = self.app.get(
'/{}nodes/{}/files/osfstorage/?filter[tags]=CAT'.format(
API_BASE, self.node._id
),
auth=self.user.auth
)
nt.assert_equal(len(res.json.get('data')), 1)
def test_filtering_tags_capitalized_tag(self):
self.file1.add_tag('CAT', Auth(self.user))
res = self.app.get(
'/{}nodes/{}/files/osfstorage/?filter[tags]=cat'.format(
API_BASE, self.node._id
),
auth=self.user.auth
)
nt.assert_equal(len(res.json.get('data')), 1)
def test_filtering_on_multiple_tags(self):
self.file1.add_tag('cat', Auth(self.user))
self.file1.add_tag('sand', Auth(self.user))
res = self.app.get(
'/{}nodes/{}/files/osfstorage/?filter[tags]=cat&filter[tags]=sand'.format(
API_BASE, self.node._id
),
auth=self.user.auth
)
nt.assert_equal(len(res.json.get('data')), 1)
def test_filtering_on_multiple_tags_must_match_both(self):
self.file1.add_tag('cat', Auth(self.user))
res = self.app.get(
'/{}nodes/{}/files/osfstorage/?filter[tags]=cat&filter[tags]=sand'.format(
API_BASE, self.node._id
),
auth=self.user.auth
)
nt.assert_equal(len(res.json.get('data')), 0)
def test_filtering_by_tags_returns_distinct(self):
# regression test for returning multiple of the same file
self.file1.add_tag('cat', Auth(self.user))
self.file1.add_tag('cAt', Auth(self.user))
self.file1.add_tag('caT', Auth(self.user))
self.file1.add_tag('CAT', Auth(self.user))
res = self.app.get(
'/{}nodes/{}/files/osfstorage/?filter[tags]=cat'.format(
API_BASE, self.node._id
),
auth=self.user.auth
)
nt.assert_equal(len(res.json.get('data')), 1)
|
monikagrabowska/osf.io
|
api_tests/files/views/test_file_list.py
|
Python
|
apache-2.0
| 4,210
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``dag_id``/``state`` index on ``dag_run`` table
Revision ID: 127d2bf2dfa7
Revises: 5e7d17757c7a
Create Date: 2017-01-25 11:43:51.635667
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '127d2bf2dfa7'
down_revision = '5e7d17757c7a'
branch_labels = None
depends_on = None
airflow_version = '1.7.1.3'
def upgrade():
op.create_index('dag_id_state', 'dag_run', ['dag_id', 'state'], unique=False)
def downgrade():
op.drop_index('dag_id_state', table_name='dag_run')
|
apache/airflow
|
airflow/migrations/versions/127d2bf2dfa7_add_dag_id_state_index_on_dag_run_table.py
|
Python
|
apache-2.0
| 1,300
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sequences_lib."""
import copy
# internal imports
import tensorflow as tf
from magenta.common import testing_lib as common_testing_lib
from magenta.music import sequences_lib
from magenta.music import testing_lib
from magenta.protobuf import music_pb2
class SequencesLibTest(tf.test.TestCase):
def setUp(self):
self.maxDiff = None
self.steps_per_quarter = 4
self.note_sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
tempos: {
qpm: 60}""")
def testTrimNoteSequence(self):
sequence = copy.copy(self.note_sequence)
testing_lib.add_track_to_sequence(
sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
expected_subsequence = copy.copy(self.note_sequence)
testing_lib.add_track_to_sequence(
expected_subsequence, 0,
[(40, 45, 2.50, 3.50), (55, 120, 4.0, 4.01)])
expected_subsequence.total_time = 4.75
subsequence = sequences_lib.trim_note_sequence(sequence, 2.5, 4.75)
self.assertProtoEquals(expected_subsequence, subsequence)
def testExtractSubsequence(self):
sequence = copy.copy(self.note_sequence)
testing_lib.add_track_to_sequence(
sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords_to_sequence(
sequence, [('C', 1.5), ('G7', 3.0), ('F', 4.8)])
expected_subsequence = copy.copy(self.note_sequence)
testing_lib.add_track_to_sequence(
expected_subsequence, 0,
[(40, 45, 0.0, 1.0), (55, 120, 1.5, 1.51)])
testing_lib.add_chords_to_sequence(
expected_subsequence, [('C', 0.0), ('G7', 0.5)])
expected_subsequence.total_time = 2.25
expected_subsequence.subsequence_info.start_time_offset = 2.5
expected_subsequence.subsequence_info.end_time_offset = 5.25
subsequence = sequences_lib.extract_subsequence(sequence, 2.5, 4.75)
self.assertProtoEquals(expected_subsequence, subsequence)
def testSplitNoteSequenceNoTimeChanges(self):
sequence = copy.copy(self.note_sequence)
testing_lib.add_track_to_sequence(
sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords_to_sequence(
sequence, [('C', 1.5), ('G7', 3.0), ('F', 4.8)])
expected_subsequence = music_pb2.NoteSequence()
expected_subsequence.CopyFrom(sequence)
expected_subsequence.subsequence_info.start_time_offset = 0.0
expected_subsequence.subsequence_info.end_time_offset = 0.0
subsequences = sequences_lib.split_note_sequence_on_time_changes(sequence)
self.assertEquals(1, len(subsequences))
self.assertProtoEquals(expected_subsequence, subsequences[0])
def testSplitNoteSequenceDuplicateTimeChanges(self):
sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
time_signatures: {
time: 2.0
numerator: 4
denominator: 4}
tempos: {
qpm: 60}""")
testing_lib.add_track_to_sequence(
sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords_to_sequence(
sequence, [('C', 1.5), ('G7', 3.0), ('F', 4.8)])
expected_subsequence = music_pb2.NoteSequence()
expected_subsequence.CopyFrom(sequence)
expected_subsequence.subsequence_info.start_time_offset = 0.0
expected_subsequence.subsequence_info.end_time_offset = 0.0
subsequences = sequences_lib.split_note_sequence_on_time_changes(sequence)
self.assertEquals(1, len(subsequences))
self.assertProtoEquals(expected_subsequence, subsequences[0])
def testSplitNoteSequenceCoincidentTimeChanges(self):
sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
time_signatures: {
time: 2.0
numerator: 3
denominator: 4}
tempos: {
qpm: 60}
tempos: {
time: 2.0
qpm: 80}""")
testing_lib.add_track_to_sequence(
sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords_to_sequence(
sequence, [('C', 1.5), ('G7', 3.0), ('F', 4.8)])
expected_subsequence_1 = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
tempos: {
qpm: 60}""")
testing_lib.add_track_to_sequence(
expected_subsequence_1, 0,
[(12, 100, 0.01, 2.0), (11, 55, 0.22, 0.50)])
testing_lib.add_chords_to_sequence(
expected_subsequence_1, [('C', 1.5)])
expected_subsequence_1.total_time = 2.0
expected_subsequence_1.subsequence_info.end_time_offset = 8.0
expected_subsequence_2 = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 3
denominator: 4}
tempos: {
qpm: 80}""")
testing_lib.add_track_to_sequence(
expected_subsequence_2, 0,
[(40, 45, 0.50, 1.50), (55, 120, 2.0, 2.01), (52, 99, 2.75, 3.0)])
testing_lib.add_chords_to_sequence(
expected_subsequence_2, [('C', 0.0), ('G7', 1.0), ('F', 2.8)])
expected_subsequence_2.total_time = 8.0
expected_subsequence_2.subsequence_info.start_time_offset = 2.0
subsequences = sequences_lib.split_note_sequence_on_time_changes(sequence)
self.assertEquals(2, len(subsequences))
self.assertProtoEquals(expected_subsequence_1, subsequences[0])
self.assertProtoEquals(expected_subsequence_2, subsequences[1])
def testSplitNoteSequenceMultipleTimeChangesNoSplitNotes(self):
sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
time_signatures: {
time: 2.0
numerator: 3
denominator: 4}
tempos: {
qpm: 60}
tempos: {
time: 4.25
qpm: 80}""")
testing_lib.add_track_to_sequence(
sequence, 0,
[(12, 100, 0.01, 3.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords_to_sequence(
sequence, [('C', 1.5), ('G7', 3.0), ('F', 4.8)])
expected_subsequence_1 = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
time_signatures: {
time: 2.0
numerator: 3
denominator: 4}
tempos: {
qpm: 60}""")
testing_lib.add_track_to_sequence(
expected_subsequence_1, 0,
[(12, 100, 0.01, 3.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01)])
testing_lib.add_chords_to_sequence(
expected_subsequence_1, [('C', 1.5), ('G7', 3.0)])
expected_subsequence_1.total_time = 4.25
expected_subsequence_1.subsequence_info.end_time_offset = 0.75
expected_subsequence_2 = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 3
denominator: 4}
tempos: {
qpm: 80}""")
testing_lib.add_track_to_sequence(
expected_subsequence_2, 0, [(52, 99, 0.5, 0.75)])
testing_lib.add_chords_to_sequence(expected_subsequence_2, [
('G7', 0.0), ('F', 0.55)])
expected_subsequence_2.total_time = 0.75
expected_subsequence_2.subsequence_info.start_time_offset = 4.25
subsequences = sequences_lib.split_note_sequence_on_time_changes(
sequence, split_notes=False)
self.assertEquals(2, len(subsequences))
self.assertProtoEquals(expected_subsequence_1, subsequences[0])
self.assertProtoEquals(expected_subsequence_2, subsequences[1])
def testSplitNoteSequenceMultipleTimeChanges(self):
sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
time_signatures: {
time: 2.0
numerator: 3
denominator: 4}
tempos: {
qpm: 60}
tempos: {
time: 4.25
qpm: 80}""")
testing_lib.add_track_to_sequence(
sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords_to_sequence(
sequence, [('C', 1.5), ('G7', 3.0), ('F', 4.8)])
expected_subsequence_1 = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
tempos: {
qpm: 60}""")
testing_lib.add_track_to_sequence(
expected_subsequence_1, 0,
[(12, 100, 0.01, 2.0), (11, 55, 0.22, 0.50)])
testing_lib.add_chords_to_sequence(
expected_subsequence_1, [('C', 1.5)])
expected_subsequence_1.total_time = 2.0
expected_subsequence_1.subsequence_info.end_time_offset = 8.0
expected_subsequence_2 = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 3
denominator: 4}
tempos: {
qpm: 60}""")
testing_lib.add_track_to_sequence(
expected_subsequence_2, 0,
[(40, 45, 0.50, 1.50), (55, 120, 2.0, 2.01)])
testing_lib.add_chords_to_sequence(
expected_subsequence_2, [('C', 0.0), ('G7', 1.0)])
expected_subsequence_2.total_time = 2.25
expected_subsequence_2.subsequence_info.start_time_offset = 2.0
expected_subsequence_2.subsequence_info.end_time_offset = 5.75
expected_subsequence_3 = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 3
denominator: 4}
tempos: {
qpm: 80}""")
testing_lib.add_track_to_sequence(
expected_subsequence_3, 0,
[(52, 99, 0.5, 0.75)])
testing_lib.add_chords_to_sequence(
expected_subsequence_3, [('G7', 0.0), ('F', 0.55)])
expected_subsequence_3.total_time = 5.75
expected_subsequence_3.subsequence_info.start_time_offset = 4.25
subsequences = sequences_lib.split_note_sequence_on_time_changes(sequence)
self.assertEquals(3, len(subsequences))
self.assertProtoEquals(expected_subsequence_1, subsequences[0])
self.assertProtoEquals(expected_subsequence_2, subsequences[1])
self.assertProtoEquals(expected_subsequence_3, subsequences[2])
def testQuantizeNoteSequence(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
testing_lib.add_chords_to_sequence(
self.note_sequence,
[('B7', 0.22), ('Em9', 4.0)])
expected_quantized_sequence = copy.deepcopy(self.note_sequence)
expected_quantized_sequence.quantization_info.steps_per_quarter = (
self.steps_per_quarter)
testing_lib.add_quantized_steps_to_sequence(
expected_quantized_sequence,
[(0, 40), (1, 2), (10, 14), (16, 17), (19, 20)])
testing_lib.add_quantized_chord_steps_to_sequence(
expected_quantized_sequence, [1, 16])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=self.steps_per_quarter)
self.assertProtoEquals(expected_quantized_sequence, quantized_sequence)
def testAssertIsQuantizedNoteSequence(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=self.steps_per_quarter)
sequences_lib.assert_is_quantized_sequence(quantized_sequence)
with self.assertRaises(sequences_lib.QuantizationStatusException):
sequences_lib.assert_is_quantized_sequence(self.note_sequence)
def testQuantizeNoteSequence_TimeSignatureChange(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
del self.note_sequence.time_signatures[:]
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Single time signature.
self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=0)
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Multiple time signatures with no change.
self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=1)
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Time signature change.
self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
def testQuantizeNoteSequence_ImplicitTimeSignatureChange(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
del self.note_sequence.time_signatures[:]
# No time signature.
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Implicit time signature change.
self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
def testQuantizeNoteSequence_NoImplicitTimeSignatureChangeOutOfOrder(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
del self.note_sequence.time_signatures[:]
# No time signature.
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# No implicit time signature change, but time signatures are added out of
# order.
self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=0)
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
def testStepsPerQuarterToStepsPerSecond(self):
self.assertEqual(
4.0, sequences_lib.steps_per_quarter_to_steps_per_second(4, 60.0))
def testQuantizeToStep(self):
self.assertEqual(
32, sequences_lib.quantize_to_step(8.0001, 4))
self.assertEqual(
34, sequences_lib.quantize_to_step(8.4999, 4))
self.assertEqual(
33, sequences_lib.quantize_to_step(8.4999, 4, quantize_cutoff=1.0))
def testFromNoteSequence_TempoChange(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
del self.note_sequence.tempos[:]
# No tempos.
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Single tempo.
self.note_sequence.tempos.add(qpm=60, time=0)
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Multiple tempos with no change.
self.note_sequence.tempos.add(qpm=60, time=1)
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Tempo change.
self.note_sequence.tempos.add(qpm=120, time=2)
with self.assertRaises(sequences_lib.MultipleTempoException):
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
def testFromNoteSequence_ImplicitTempoChange(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
del self.note_sequence.tempos[:]
# No tempo.
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# Implicit tempo change.
self.note_sequence.tempos.add(qpm=60, time=2)
with self.assertRaises(sequences_lib.MultipleTempoException):
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
def testFromNoteSequence_NoImplicitTempoChangeOutOfOrder(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
(55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
del self.note_sequence.tempos[:]
# No tempo.
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
# No implicit tempo change, but tempos are added out of order.
self.note_sequence.tempos.add(qpm=60, time=2)
self.note_sequence.tempos.add(qpm=60, time=0)
sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
def testRounding(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 1,
[(12, 100, 0.01, 0.24), (11, 100, 0.22, 0.55), (40, 100, 0.50, 0.75),
(41, 100, 0.689, 1.18), (44, 100, 1.19, 1.69), (55, 100, 4.0, 4.01)])
expected_quantized_sequence = copy.deepcopy(self.note_sequence)
expected_quantized_sequence.quantization_info.steps_per_quarter = (
self.steps_per_quarter)
testing_lib.add_quantized_steps_to_sequence(
expected_quantized_sequence,
[(0, 1), (1, 2), (2, 3), (3, 5), (5, 7), (16, 17)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
self.assertProtoEquals(expected_quantized_sequence, quantized_sequence)
def testMultiTrack(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 1.0, 4.0), (19, 100, 0.95, 3.0)])
testing_lib.add_track_to_sequence(
self.note_sequence, 3,
[(12, 100, 1.0, 4.0), (19, 100, 2.0, 5.0)])
testing_lib.add_track_to_sequence(
self.note_sequence, 7,
[(12, 100, 1.0, 5.0), (19, 100, 2.0, 4.0), (24, 100, 3.0, 3.5)])
expected_quantized_sequence = copy.deepcopy(self.note_sequence)
expected_quantized_sequence.quantization_info.steps_per_quarter = (
self.steps_per_quarter)
testing_lib.add_quantized_steps_to_sequence(
expected_quantized_sequence,
[(4, 16), (4, 12), (4, 16), (8, 20), (4, 20), (8, 16), (12, 14)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
self.assertProtoEquals(expected_quantized_sequence, quantized_sequence)
def testStepsPerBar(self):
qns = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
self.assertEqual(16, sequences_lib.steps_per_bar_in_quantized_sequence(qns))
self.note_sequence.time_signatures[0].numerator = 6
self.note_sequence.time_signatures[0].denominator = 8
qns = sequences_lib.quantize_note_sequence(
self.note_sequence, self.steps_per_quarter)
self.assertEqual(12.0,
sequences_lib.steps_per_bar_in_quantized_sequence(qns))
def testApplySustainControlChanges(self):
sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(
sequence, 0,
[(0.0, 64, 127), (0.75, 64, 0), (2.0, 64, 127), (3.0, 64, 0),
(3.75, 64, 127), (4.5, 64, 127), (4.8, 64, 0), (4.9, 64, 127),
(6.0, 64, 0)])
testing_lib.add_track_to_sequence(
sequence, 1,
[(12, 100, 0.01, 10.0), (52, 99, 4.75, 5.0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(
sequence, 0,
[(11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50), (55, 120, 4.0, 4.01)])
testing_lib.add_track_to_sequence(
expected_sequence, 0,
[(11, 55, 0.22, 0.75), (40, 45, 2.50, 3.50), (55, 120, 4.0, 4.8)])
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(expected_sequence, sus_sequence)
def testTranspositionPipeline(self):
tp = sequences_lib.TranspositionPipeline(range(0, 2))
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(12, 100, 1.0, 4.0)])
transposed = tp.transform(self.note_sequence)
self.assertEqual(2, len(transposed))
self.assertEqual(12, transposed[0].notes[0].pitch)
self.assertEqual(13, transposed[1].notes[0].pitch)
if __name__ == '__main__':
tf.test.main()
|
YoshikawaMasashi/magenta
|
magenta/music/sequences_lib_test.py
|
Python
|
apache-2.0
| 22,295
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import time
import numpy as np
os.environ[str("FLAGS_check_nan_inf")] = str("1")
os.environ[str("GLOG_vmodule")] = str("nan_inf_utils_detail=10")
import paddle
import paddle.nn as nn
np.random.seed(0)
def generator():
batch_size = 5
for i in range(5):
curr_train_x = np.random.randint(
batch_size, size=(batch_size, 3)).astype("float32")
if i >= 2:
curr_train_x[0, :] = np.nan
curr_train_x[-1, :] = np.inf
res = []
for i in range(batch_size):
y = i % 3
res.append([y])
y_label = np.array(res).astype('int64')
yield [curr_train_x, y_label]
class TestLayer(nn.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.linear1 = nn.Linear(3, 400)
self.linear2 = nn.Linear(400, 400)
self.linear3 = nn.Linear(400, 3)
def forward(self, x):
x = self.linear1(x)
x = nn.functional.sigmoid(x)
x = self.linear2(x)
x = nn.functional.sigmoid(x)
x = self.linear3(x)
x = nn.functional.softmax(x)
return x
def check(use_cuda):
paddle.set_device('gpu' if use_cuda else 'cpu')
net = TestLayer()
sgd = paddle.optimizer.SGD(learning_rate=0.05, parameters=net.parameters())
for step, (x, y) in enumerate(generator()):
x = paddle.to_tensor(x)
y = paddle.to_tensor(y)
zero = paddle.zeros(shape=[1], dtype='int64')
fp16_zero = paddle.cast(zero, dtype='float16')
y = y + zero
y_pred = net(x)
cost = nn.functional.cross_entropy(y_pred, y, use_softmax=False)
avg_cost = paddle.mean(cost)
acc_top1 = paddle.metric.accuracy(input=y_pred, label=y, k=1)
print('iter={:.0f}, cost={}, acc1={}'.format(
step, avg_cost.numpy(), acc_top1.numpy()))
sgd.step()
sgd.clear_grad()
if __name__ == '__main__':
if paddle.is_compiled_with_cuda():
try:
check(use_cuda=True)
assert False
except Exception as e:
print(e)
print(type(e))
# Note. Enforce in cuda kernel may not catch in paddle, and
# Exception type will be RuntimeError
assert type(e) == OSError or type(e) == RuntimeError
try:
check(use_cuda=False)
assert False
except Exception as e:
print(e)
print(type(e))
assert type(e) == RuntimeError
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py
|
Python
|
apache-2.0
| 3,187
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, Optional, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.batch_client import BatchClientHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class BatchSensor(BaseSensorOperator):
"""
Asks for the state of the Batch Job execution until it reaches a failure state or success state.
If the job fails, the task will fail.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:BatchSensor`
:param job_id: Batch job_id to check the state for
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
"""
template_fields: Sequence[str] = ('job_id',)
template_ext: Sequence[str] = ()
ui_color = '#66c3ff'
def __init__(
self,
*,
job_id: str,
aws_conn_id: str = 'aws_default',
region_name: Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
self.job_id = job_id
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.hook: Optional[BatchClientHook] = None
def poke(self, context: 'Context') -> bool:
job_description = self.get_hook().get_job_description(self.job_id)
state = job_description['status']
if state == BatchClientHook.SUCCESS_STATE:
return True
if state in BatchClientHook.INTERMEDIATE_STATES:
return False
if state == BatchClientHook.FAILURE_STATE:
raise AirflowException(f'Batch sensor failed. AWS Batch job status: {state}')
raise AirflowException(f'Batch sensor failed. Unknown AWS Batch job status: {state}')
def get_hook(self) -> BatchClientHook:
"""Create and return a BatchClientHook"""
if self.hook:
return self.hook
self.hook = BatchClientHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
)
return self.hook
|
apache/airflow
|
airflow/providers/amazon/aws/sensors/batch.py
|
Python
|
apache-2.0
| 2,895
|
from django import template
register = template.Library()
@register.filter(name='re_poject_vector')
def re_poject_vector(lists):
print lists
#return dict['file_name']
@register.filter(name='file_format')
def re_rpoject_raster(dict):
return dict['file_format']
|
gismanli/gisweb
|
gisweb_app/templatetags/re-project.py
|
Python
|
apache-2.0
| 275
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_name(name: str) -> str:
# Sample function parameter name in get_model_evaluation_text_sentiment_analysis_sample
name = name
return name
|
googleapis/python-aiplatform
|
.sample_configs/param_handlers/get_model_evaluation_text_sentiment_analysis_sample.py
|
Python
|
apache-2.0
| 736
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Functions for converting python distributions to rez packages.
"""
from __future__ import print_function
from rez.exceptions import RezSystemError
import pkg_resources
import shutil
import sys
import os
import os.path
import textwrap
def _mkdirs(*dirs):
path = os.path.join(*dirs)
if not os.path.exists(path):
os.makedirs(path)
return path
def convert_name(name):
""" Convert a python distribution name into a rez-safe package name."""
return name.replace('-', '_')
# TODO: change this when version submod is rewritten
# This is just a temporary simplistic implementation for now
def convert_version(version):
"""Convert a python distribution version into a rez-safe version string."""
"""
version = version.replace('-','.')
version = version.lower()
version = re.sub("[a-z]", "", version)
version = version.replace("..", '.')
version = version.replace("..", '.')
version = version.replace("..", '.')
return version
"""
return str(version)
# TODO: add native Requirement conversion support into new version submod
def convert_requirement(req):
"""
Converts a pkg_resources.Requirement object into a list of Rez package
request strings.
"""
pkg_name = convert_name(req.project_name)
if not req.specs:
return [pkg_name]
req_strs = []
for spec in req.specs:
op, ver = spec
ver = convert_version(ver)
if op == "<":
r = "%s-0+<%s" % (pkg_name, ver)
req_strs.append(r)
elif op == "<=":
r = "%s-0+<%s|%s" % (pkg_name, ver, ver)
req_strs.append(r)
elif op == "==":
r = "%s-%s" % (pkg_name, ver)
req_strs.append(r)
elif op == ">=":
r = "%s-%s+" % (pkg_name, ver)
req_strs.append(r)
elif op == ">":
r1 = "%s-%s+" % (pkg_name, ver)
r2 = "!%s-%s" % (pkg_name, ver)
req_strs.append(r1)
req_strs.append(r2)
elif op == "!=":
r = "!%s-%s" % (pkg_name, ver)
req_strs.append(r)
else:
print("Warning: Can't understand op '%s', just depending on "
"unversioned package..." % op,
file=sys.stderr)
req_strs.append(pkg_name)
return req_strs
def get_dist_dependencies(name, recurse=True):
"""
Get the dependencies of the given, already installed distribution.
@param recurse If True, recursively find all dependencies.
@returns A set of package names.
@note The first entry in the list is always the top-level package itself.
"""
dist = pkg_resources.get_distribution(name)
pkg_name = convert_name(dist.project_name)
reqs = set()
working = set([dist])
depth = 0
while working:
deps = set()
for distname in working:
dist = pkg_resources.get_distribution(distname)
pkg_name = convert_name(dist.project_name)
reqs.add(pkg_name)
for req in dist.requires():
reqs_ = convert_requirement(req)
deps |= set(x.split('-', 1)[0] for x in reqs_
if not x.startswith('!'))
working = deps - reqs
depth += 1
if (not recurse) and (depth >= 2):
break
return reqs
# TODO: doesn't deal with executable scripts yet
def convert_dist(name, dest_path, make_variant=True, ignore_dirs=None,
python_requirement="major_minor"):
"""Convert an already installed python distribution into a rez package.
Args:
dest_path (str): Where to put the rez package. The package will be
created under dest_path/<NAME>/<VERSION>/.
make_variant (bool): If True, makes a single variant in the rez package
based on the MAJOR.MINOR version of python.
ignore_dirs (bool): List of directory names to not copy from the dist.
python_requirement (str): How the package should depend on python.
One of:
- "major": depend on python-X
- "major_minor": depend on python-X.X
- any other value: this string is used as the literal version
range string.
Returns:
Install path of the new Rez package.
"""
dist = pkg_resources.get_distribution(name)
pkg_name = convert_name(dist.project_name)
pkg_version = convert_version(dist.version)
if python_requirement == "major":
pyver = str(sys.version_info[0])
elif python_requirement == "major_minor":
pyver = '.'.join(str(x) for x in sys.version_info[:2])
else:
pyver = python_requirement
pypkg = "python-%s" % pyver
pkg_requires = []
if not make_variant:
pkg_requires.append(pypkg)
for req in dist.requires():
pkg_requires += convert_requirement(req)
pkg_path = _mkdirs(dest_path, pkg_name, pkg_version)
pkg_file = os.path.join(pkg_path, "package.py")
root_path = _mkdirs(pkg_path, pypkg) if make_variant else pkg_path
basename = os.path.basename(dist.location)
is_egg = (os.path.splitext(basename)[1] == ".egg")
if os.path.isdir(dist.location):
if is_egg:
# this is an egg-dir
for file in os.listdir(dist.location):
fpath = os.path.join(dist.location, file)
if os.path.isfile(fpath):
shutil.copy(fpath, root_path)
else:
shutil.copytree(fpath, os.path.join(root_path, file),
ignore=shutil.ignore_patterns(ignore_dirs))
else:
# this is a site dir
egginfo_dir = "%s.egg-info" % dist.egg_name()
eggpath = os.path.join(dist.location, egginfo_dir)
file = os.path.join(eggpath, "installed-files.txt")
if not os.path.isfile(file):
raise RezSystemError(
"There is not enough information on disk to convert the "
"python distribution '%s' into a Rez package. The distribution "
"is installed to a common site, but the installed file "
"information is not present." % name)
with open(file) as f:
installed_files = f.read().strip().split()
dirs = set()
files = set()
for file in installed_files:
path = os.path.join(eggpath, file)
path = os.path.realpath(path)
if os.path.isfile(path) and path.startswith(dist.location + os.sep):
dir_ = os.path.dirname(path)
if ignore_dirs:
reldir = os.path.relpath(dir_, dist.location)
if set(reldir.split(os.sep)) & set(ignore_dirs):
continue
files.add(path)
dirs.add(dir_)
def _dst(p):
dst = os.path.relpath(p, dist.location)
dst = os.path.join(root_path, dst)
return os.path.realpath(dst)
for dir_ in dirs:
dst_dir = _dst(dir_)
_mkdirs(dst_dir)
for file in files:
dst_file = _dst(file)
shutil.copy(file, dst_file)
else:
# this is an egg-file
import zipfile
assert(is_egg and os.path.isfile(dist.location))
assert(zipfile.is_zipfile(dist.location))
z = zipfile.ZipFile(dist.location)
z.extractall(root_path)
variants_str = "[['%s']]" % pypkg if make_variant else ''
content = textwrap.dedent(
"""
config_version = 0
name = '%(name)s'
version = '%(version)s'
%(variants)s
requires = %(requires)s
def commands():
env.PYTHONPATH.append('{this.root}')
""" % dict(
name=pkg_name,
version=pkg_version,
variants=variants_str,
requires=str(pkg_requires)))
content = content.strip() + '\n'
with open(pkg_file, 'w') as f:
f.write(content)
return pkg_path
|
instinct-vfx/rez
|
src/rez/utils/py_dist.py
|
Python
|
apache-2.0
| 8,287
|
# sql/operators.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines operators used in SQL expressions."""
from operator import add
from operator import and_
from operator import contains
from operator import eq
from operator import ge
from operator import getitem
from operator import gt
from operator import inv
from operator import le
from operator import lshift
from operator import lt
from operator import mod
from operator import mul
from operator import ne
from operator import neg
from operator import or_
from operator import rshift
from operator import sub
from operator import truediv
from .. import util
if util.py2k:
from operator import div
else:
div = truediv
class Operators(object):
"""Base of comparison and logical operators.
Implements base methods
:meth:`~sqlalchemy.sql.operators.Operators.operate` and
:meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as
:meth:`~sqlalchemy.sql.operators.Operators.__and__`,
:meth:`~sqlalchemy.sql.operators.Operators.__or__`,
:meth:`~sqlalchemy.sql.operators.Operators.__invert__`.
Usually is used via its most common subclass
:class:`.ColumnOperators`.
"""
__slots__ = ()
def __and__(self, other):
"""Implement the ``&`` operator.
When used with SQL expressions, results in an
AND operation, equivalent to
:func:`_expression.and_`, that is::
a & b
is equivalent to::
from sqlalchemy import and_
and_(a, b)
Care should be taken when using ``&`` regarding
operator precedence; the ``&`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) & (b == 4)
"""
return self.operate(and_, other)
def __or__(self, other):
"""Implement the ``|`` operator.
When used with SQL expressions, results in an
OR operation, equivalent to
:func:`_expression.or_`, that is::
a | b
is equivalent to::
from sqlalchemy import or_
or_(a, b)
Care should be taken when using ``|`` regarding
operator precedence; the ``|`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) | (b == 4)
"""
return self.operate(or_, other)
def __invert__(self):
"""Implement the ``~`` operator.
When used with SQL expressions, results in a
NOT operation, equivalent to
:func:`_expression.not_`, that is::
~a
is equivalent to::
from sqlalchemy import not_
not_(a)
"""
return self.operate(inv)
def op(
self, opstring, precedence=0, is_comparison=False, return_type=None
):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in ``somecolumn``.
:param operator: a string which will be output as the infix operator
between this element and the expression passed to the
generated function.
:param precedence: precedence to apply to the operator, when
parenthesizing expressions. A lower number will cause the expression
to be parenthesized when applied against another operator with
higher precedence. The default value of ``0`` is lower than all
operators except for the comma (``,``) and ``AS`` operators.
A value of 100 will be higher or equal to all operators, and -100
will be lower than or equal to all operators.
:param is_comparison: if True, the operator will be considered as a
"comparison" operator, that is which evaluates to a boolean
true/false value, like ``==``, ``>``, etc. This flag should be set
so that ORM relationships can establish that the operator is a
comparison operator when used in a custom join condition.
.. versionadded:: 0.9.2 - added the
:paramref:`.Operators.op.is_comparison` flag.
:param return_type: a :class:`.TypeEngine` class or object that will
force the return type of an expression produced by this operator
to be of that type. By default, operators that specify
:paramref:`.Operators.op.is_comparison` will resolve to
:class:`.Boolean`, and those that do not will be of the same
type as the left-hand operand.
.. versionadded:: 1.2.0b3 - added the
:paramref:`.Operators.op.return_type` argument.
.. seealso::
:ref:`types_operators`
:ref:`relationship_custom_operator`
"""
operator = custom_op(opstring, precedence, is_comparison, return_type)
def against(other):
return operator(self, other)
return against
def bool_op(self, opstring, precedence=0):
"""Return a custom boolean operator.
This method is shorthand for calling
:meth:`.Operators.op` and passing the
:paramref:`.Operators.op.is_comparison`
flag with True.
.. versionadded:: 1.2.0b3
.. seealso::
:meth:`.Operators.op`
"""
return self.op(opstring, precedence=precedence, is_comparison=True)
def operate(self, op, *other, **kwargs):
r"""Operate on an argument.
This is the lowest level of operation, raises
:class:`NotImplementedError` by default.
Overriding this on a subclass can allow common
behavior to be applied to all operations.
For example, overriding :class:`.ColumnOperators`
to apply ``func.lower()`` to the left and right
side::
class MyComparator(ColumnOperators):
def operate(self, op, other):
return op(func.lower(self), func.lower(other))
:param op: Operator callable.
:param \*other: the 'other' side of the operation. Will
be a single scalar for most operations.
:param \**kwargs: modifiers. These may be passed by special
operators such as :meth:`ColumnOperators.contains`.
"""
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
"""Reverse operate on an argument.
Usage is the same as :meth:`operate`.
"""
raise NotImplementedError(str(op))
class custom_op(object):
"""Represent a 'custom' operator.
:class:`.custom_op` is normally instantiated when the
:meth:`.Operators.op` or :meth:`.Operators.bool_op` methods
are used to create a custom operator callable. The class can also be
used directly when programmatically constructing expressions. E.g.
to represent the "factorial" operation::
from sqlalchemy.sql import UnaryExpression
from sqlalchemy.sql import operators
from sqlalchemy import Numeric
unary = UnaryExpression(table.c.somecolumn,
modifier=operators.custom_op("!"),
type_=Numeric)
.. seealso::
:meth:`.Operators.op`
:meth:`.Operators.bool_op`
"""
__name__ = "custom_op"
def __init__(
self,
opstring,
precedence=0,
is_comparison=False,
return_type=None,
natural_self_precedent=False,
eager_grouping=False,
):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
self.natural_self_precedent = natural_self_precedent
self.eager_grouping = eager_grouping
self.return_type = (
return_type._to_instance(return_type) if return_type else None
)
def __eq__(self, other):
return isinstance(other, custom_op) and other.opstring == self.opstring
def __hash__(self):
return id(self)
def __call__(self, left, right, **kw):
return left.operate(self, right, **kw)
class ColumnOperators(Operators):
"""Defines boolean, comparison, and other operators for
:class:`_expression.ColumnElement` expressions.
By default, all methods call down to
:meth:`.operate` or :meth:`.reverse_operate`,
passing in the appropriate operator function from the
Python builtin ``operator`` module or
a SQLAlchemy-specific operator function from
:mod:`sqlalchemy.expression.operators`. For example
the ``__eq__`` function::
def __eq__(self, other):
return self.operate(operators.eq, other)
Where ``operators.eq`` is essentially::
def eq(a, b):
return a == b
The core column expression unit :class:`_expression.ColumnElement`
overrides :meth:`.Operators.operate` and others
to return further :class:`_expression.ColumnElement` constructs,
so that the ``==`` operation above is replaced by a clause
construct.
.. seealso::
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
:class:`.ColumnOperators`
:class:`.PropComparator`
"""
__slots__ = ()
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
"""Implement the ``<`` operator.
In a column context, produces the clause ``a < b``.
"""
return self.operate(lt, other)
def __le__(self, other):
"""Implement the ``<=`` operator.
In a column context, produces the clause ``a <= b``.
"""
return self.operate(le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
"""Implement the ``==`` operator.
In a column context, produces the clause ``a = b``.
If the target is ``None``, produces ``a IS NULL``.
"""
return self.operate(eq, other)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a column context, produces the clause ``a != b``.
If the target is ``None``, produces ``a IS NOT NULL``.
"""
return self.operate(ne, other)
def is_distinct_from(self, other):
"""Implement the ``IS DISTINCT FROM`` operator.
Renders "a IS DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS NOT b".
.. versionadded:: 1.1
"""
return self.operate(is_distinct_from, other)
def isnot_distinct_from(self, other):
"""Implement the ``IS NOT DISTINCT FROM`` operator.
Renders "a IS NOT DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS b".
.. versionadded:: 1.1
"""
return self.operate(isnot_distinct_from, other)
def __gt__(self, other):
"""Implement the ``>`` operator.
In a column context, produces the clause ``a > b``.
"""
return self.operate(gt, other)
def __ge__(self, other):
"""Implement the ``>=`` operator.
In a column context, produces the clause ``a >= b``.
"""
return self.operate(ge, other)
def __neg__(self):
"""Implement the ``-`` operator.
In a column context, produces the clause ``-a``.
"""
return self.operate(neg)
def __contains__(self, other):
return self.operate(contains, other)
def __getitem__(self, index):
"""Implement the [] operator.
This can be used by some database-specific types
such as PostgreSQL ARRAY and HSTORE.
"""
return self.operate(getitem, index)
def __lshift__(self, other):
"""implement the << operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
<< as an extension point.
"""
return self.operate(lshift, other)
def __rshift__(self, other):
"""implement the >> operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
>> as an extension point.
"""
return self.operate(rshift, other)
def concat(self, other):
"""Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
"""
return self.operate(concat_op, other)
def like(self, other, escape=None):
r"""Implement the ``like`` operator.
In a column context, produces the expression::
a LIKE other
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.like("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.like("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(like_op, other, escape=escape)
def ilike(self, other, escape=None):
r"""Implement the ``ilike`` operator, e.g. case insensitive LIKE.
In a column context, produces an expression either of the form::
lower(a) LIKE lower(other)
Or on backends that support the ILIKE operator::
a ILIKE other
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.ilike("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.ilike("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(ilike_op, other, escape=escape)
def in_(self, other):
"""Implement the ``in`` operator.
In a column context, produces the clause ``column IN <other>``.
The given parameter ``other`` may be:
* A list of literal values, e.g.::
stmt.where(column.in_([1, 2, 3]))
In this calling form, the list of items is converted to a set of
bound parameters the same length as the list given::
WHERE COL IN (?, ?, ?)
* A list of tuples may be provided if the comparison is against a
:func:`.tuple_` containing multiple expressions::
from sqlalchemy import tuple_
stmt.where(tuple_(col1, col2).in_([(1, 10), (2, 20), (3, 30)]))
* An empty list, e.g.::
stmt.where(column.in_([]))
In this calling form, the expression renders a "false" expression,
e.g.::
WHERE 1 != 1
This "false" expression has historically had different behaviors
in older SQLAlchemy versions, see
:paramref:`_sa.create_engine.empty_in_strategy`
for behavioral options.
.. versionchanged:: 1.2 simplified the behavior of "empty in"
expressions
* A bound parameter, e.g. :func:`.bindparam`, may be used if it
includes the :paramref:`.bindparam.expanding` flag::
stmt.where(column.in_(bindparam('value', expanding=True)))
In this calling form, the expression renders a special non-SQL
placeholder expression that looks like::
WHERE COL IN ([EXPANDING_value])
This placeholder expression is intercepted at statement execution
time to be converted into the variable number of bound parameter
form illustrated earlier. If the statement were executed as::
connection.execute(stmt, {"value": [1, 2, 3]})
The database would be passed a bound parameter for each value::
WHERE COL IN (?, ?, ?)
.. versionadded:: 1.2 added "expanding" bound parameters
If an empty list is passed, a special "empty list" expression,
which is specific to the database in use, is rendered. On
SQLite this would be::
WHERE COL IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
.. versionadded:: 1.3 "expanding" bound parameters now support
empty lists
* a :func:`_expression.select` construct,
which is usually a correlated
scalar select::
stmt.where(
column.in_(
select([othertable.c.y]).
where(table.c.x == othertable.c.x)
)
)
In this calling form, :meth:`.ColumnOperators.in_` renders as given::
WHERE COL IN (SELECT othertable.y
FROM othertable WHERE othertable.x = table.x)
:param other: a list of literals, a :func:`_expression.select`
construct,
or a :func:`.bindparam` construct that includes the
:paramref:`.bindparam.expanding` flag set to True.
"""
return self.operate(in_op, other)
def notin_(self, other):
"""implement the ``NOT IN`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.in_`, i.e. ``~x.in_(y)``.
In the case that ``other`` is an empty sequence, the compiler
produces an "empty not in" expression. This defaults to the
expression "1 = 1" to produce true in all cases. The
:paramref:`_sa.create_engine.empty_in_strategy` may be used to
alter this behavior.
.. versionchanged:: 1.2 The :meth:`.ColumnOperators.in_` and
:meth:`.ColumnOperators.notin_` operators
now produce a "static" expression for an empty IN sequence
by default.
.. seealso::
:meth:`.ColumnOperators.in_`
"""
return self.operate(notin_op, other)
def notlike(self, other, escape=None):
"""implement the ``NOT LIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``.
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(notlike_op, other, escape=escape)
def notilike(self, other, escape=None):
"""implement the ``NOT ILIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``.
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(notilike_op, other, escape=escape)
def is_(self, other):
"""Implement the ``IS`` operator.
Normally, ``IS`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS`` may be desirable if comparing to boolean values
on certain platforms.
.. seealso:: :meth:`.ColumnOperators.isnot`
"""
return self.operate(is_, other)
def isnot(self, other):
"""Implement the ``IS NOT`` operator.
Normally, ``IS NOT`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS NOT`` may be desirable if comparing to boolean values
on certain platforms.
.. seealso:: :meth:`.ColumnOperators.is_`
"""
return self.operate(isnot, other)
def startswith(self, other, **kwargs):
r"""Implement the ``startswith`` operator.
Produces a LIKE expression that tests against a match for the start
of a string value::
column LIKE <other> || '%'
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.startswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.startswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.startswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.startswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.startswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE :param || '%' ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.startswith.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.startswith.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.startswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.startswith.autoescape`::
somecolumn.startswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
r"""Implement the 'endswith' operator.
Produces a LIKE expression that tests against a match for the end
of a string value::
column LIKE '%' || <other>
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.endswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.endswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.endswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.endswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.endswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.endswith.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.endswith.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.endswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.endswith.autoescape`::
somecolumn.endswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
r"""Implement the 'contains' operator.
Produces a LIKE expression that tests against a match for the middle
of a string value::
column LIKE '%' || <other> || '%'
E.g.::
stmt = select([sometable]).\
where(sometable.c.column.contains("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.contains.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.contains.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.contains.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.contains("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '/'
With the value of :param as ``"foo/%bar"``.
.. versionadded:: 1.2
.. versionchanged:: 1.2.0 The
:paramref:`.ColumnOperators.contains.autoescape` parameter is
now a simple boolean rather than a character; the escape
character itself is also escaped, and defaults to a forwards
slash, which itself can be customized using the
:paramref:`.ColumnOperators.contains.escape` parameter.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.contains("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.contains.autoescape`::
somecolumn.contains("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.like`
"""
return self.operate(contains_op, other, **kwargs)
def match(self, other, **kwargs):
"""Implements a database-specific 'match' operator.
:meth:`~.ColumnOperators.match` attempts to resolve to
a MATCH-like function or operator provided by the backend.
Examples include:
* PostgreSQL - renders ``x @@ to_tsquery(y)``
* MySQL - renders ``MATCH (x) AGAINST (y IN BOOLEAN MODE)``
* Oracle - renders ``CONTAINS(x, y)``
* other backends may provide special implementations.
* Backends without any special implementation will emit
the operator as "MATCH". This is compatible with SQLite, for
example.
"""
return self.operate(match_op, other, **kwargs)
def desc(self):
"""Produce a :func:`_expression.desc` clause against the
parent object."""
return self.operate(desc_op)
def asc(self):
"""Produce a :func:`_expression.asc` clause against the
parent object."""
return self.operate(asc_op)
def nullsfirst(self):
"""Produce a :func:`_expression.nullsfirst` clause against the
parent object."""
return self.operate(nullsfirst_op)
def nullslast(self):
"""Produce a :func:`_expression.nullslast` clause against the
parent object."""
return self.operate(nullslast_op)
def collate(self, collation):
"""Produce a :func:`_expression.collate` clause against
the parent object, given the collation string.
.. seealso::
:func:`_expression.collate`
"""
return self.operate(collate, collation)
def __radd__(self, other):
"""Implement the ``+`` operator in reverse.
See :meth:`.ColumnOperators.__add__`.
"""
return self.reverse_operate(add, other)
def __rsub__(self, other):
"""Implement the ``-`` operator in reverse.
See :meth:`.ColumnOperators.__sub__`.
"""
return self.reverse_operate(sub, other)
def __rmul__(self, other):
"""Implement the ``*`` operator in reverse.
See :meth:`.ColumnOperators.__mul__`.
"""
return self.reverse_operate(mul, other)
def __rdiv__(self, other):
"""Implement the ``/`` operator in reverse.
See :meth:`.ColumnOperators.__div__`.
"""
return self.reverse_operate(div, other)
def __rmod__(self, other):
"""Implement the ``%`` operator in reverse.
See :meth:`.ColumnOperators.__mod__`.
"""
return self.reverse_operate(mod, other)
def between(self, cleft, cright, symmetric=False):
"""Produce a :func:`_expression.between` clause against
the parent object, given the lower and upper range.
"""
return self.operate(between_op, cleft, cright, symmetric=symmetric)
def distinct(self):
"""Produce a :func:`_expression.distinct` clause against the
parent object.
"""
return self.operate(distinct_op)
def any_(self):
"""Produce a :func:`_expression.any_` clause against the
parent object.
This operator is only appropriate against a scalar subquery
object, or for some backends an column expression that is
against the ARRAY type, e.g.::
# postgresql '5 = ANY (somearray)'
expr = 5 == mytable.c.somearray.any_()
# mysql '5 = ANY (SELECT value FROM table)'
expr = 5 == select([table.c.value]).as_scalar().any_()
.. seealso::
:func:`_expression.any_` - standalone version
:func:`_expression.all_` - ALL operator
.. versionadded:: 1.1
"""
return self.operate(any_op)
def all_(self):
"""Produce a :func:`_expression.all_` clause against the
parent object.
This operator is only appropriate against a scalar subquery
object, or for some backends an column expression that is
against the ARRAY type, e.g.::
# postgresql '5 = ALL (somearray)'
expr = 5 == mytable.c.somearray.all_()
# mysql '5 = ALL (SELECT value FROM table)'
expr = 5 == select([table.c.value]).as_scalar().all_()
.. seealso::
:func:`_expression.all_` - standalone version
:func:`_expression.any_` - ANY operator
.. versionadded:: 1.1
"""
return self.operate(all_op)
def __add__(self, other):
"""Implement the ``+`` operator.
In a column context, produces the clause ``a + b``
if the parent object has non-string affinity.
If the parent object has a string affinity,
produces the concatenation operator, ``a || b`` -
see :meth:`.ColumnOperators.concat`.
"""
return self.operate(add, other)
def __sub__(self, other):
"""Implement the ``-`` operator.
In a column context, produces the clause ``a - b``.
"""
return self.operate(sub, other)
def __mul__(self, other):
"""Implement the ``*`` operator.
In a column context, produces the clause ``a * b``.
"""
return self.operate(mul, other)
def __div__(self, other):
"""Implement the ``/`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(div, other)
def __mod__(self, other):
"""Implement the ``%`` operator.
In a column context, produces the clause ``a % b``.
"""
return self.operate(mod, other)
def __truediv__(self, other):
"""Implement the ``//`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(truediv, other)
def __rtruediv__(self, other):
"""Implement the ``//`` operator in reverse.
See :meth:`.ColumnOperators.__truediv__`.
"""
return self.reverse_operate(truediv, other)
_commutative = {eq, ne, add, mul}
_comparison = {eq, ne, lt, gt, ge, le}
def commutative_op(fn):
_commutative.add(fn)
return fn
def comparison_op(fn):
_comparison.add(fn)
return fn
def from_():
raise NotImplementedError()
@comparison_op
def function_as_comparison_op():
raise NotImplementedError()
def as_():
raise NotImplementedError()
def exists():
raise NotImplementedError()
def istrue(a):
raise NotImplementedError()
def isfalse(a):
raise NotImplementedError()
@comparison_op
def is_distinct_from(a, b):
return a.is_distinct_from(b)
@comparison_op
def isnot_distinct_from(a, b):
return a.isnot_distinct_from(b)
@comparison_op
def is_(a, b):
return a.is_(b)
@comparison_op
def isnot(a, b):
return a.isnot(b)
def collate(a, b):
return a.collate(b)
def op(a, opstring, b):
return a.op(opstring)(b)
@comparison_op
def like_op(a, b, escape=None):
return a.like(b, escape=escape)
@comparison_op
def notlike_op(a, b, escape=None):
return a.notlike(b, escape=escape)
@comparison_op
def ilike_op(a, b, escape=None):
return a.ilike(b, escape=escape)
@comparison_op
def notilike_op(a, b, escape=None):
return a.notilike(b, escape=escape)
@comparison_op
def between_op(a, b, c, symmetric=False):
return a.between(b, c, symmetric=symmetric)
@comparison_op
def notbetween_op(a, b, c, symmetric=False):
return a.notbetween(b, c, symmetric=symmetric)
@comparison_op
def in_op(a, b):
return a.in_(b)
@comparison_op
def notin_op(a, b):
return a.notin_(b)
def distinct_op(a):
return a.distinct()
def any_op(a):
return a.any_()
def all_op(a):
return a.all_()
def _escaped_like_impl(fn, other, escape, autoescape):
if autoescape:
if autoescape is not True:
util.warn(
"The autoescape parameter is now a simple boolean True/False"
)
if escape is None:
escape = "/"
if not isinstance(other, util.compat.string_types):
raise TypeError("String value expected when autoescape=True")
if escape not in ("%", "_"):
other = other.replace(escape, escape + escape)
other = other.replace("%", escape + "%").replace("_", escape + "_")
return fn(other, escape=escape)
@comparison_op
def startswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.startswith, b, escape, autoescape)
@comparison_op
def notstartswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.startswith, b, escape, autoescape)
@comparison_op
def endswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.endswith, b, escape, autoescape)
@comparison_op
def notendswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.endswith, b, escape, autoescape)
@comparison_op
def contains_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.contains, b, escape, autoescape)
@comparison_op
def notcontains_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.contains, b, escape, autoescape)
@comparison_op
def match_op(a, b, **kw):
return a.match(b, **kw)
@comparison_op
def notmatch_op(a, b, **kw):
return a.notmatch(b, **kw)
def comma_op(a, b):
raise NotImplementedError()
@comparison_op
def empty_in_op(a, b):
raise NotImplementedError()
@comparison_op
def empty_notin_op(a, b):
raise NotImplementedError()
def filter_op(a, b):
raise NotImplementedError()
def concat_op(a, b):
return a.concat(b)
def desc_op(a):
return a.desc()
def asc_op(a):
return a.asc()
def nullsfirst_op(a):
return a.nullsfirst()
def nullslast_op(a):
return a.nullslast()
def json_getitem_op(a, b):
raise NotImplementedError()
def json_path_getitem_op(a, b):
raise NotImplementedError()
def is_comparison(op):
return op in _comparison or isinstance(op, custom_op) and op.is_comparison
def is_commutative(op):
return op in _commutative
def is_ordering_modifier(op):
return op in (asc_op, desc_op, nullsfirst_op, nullslast_op)
def is_natural_self_precedent(op):
return (
op in _natural_self_precedent
or isinstance(op, custom_op)
and op.natural_self_precedent
)
_booleans = (inv, istrue, isfalse, and_, or_)
def is_boolean(op):
return is_comparison(op) or op in _booleans
_mirror = {gt: lt, ge: le, lt: gt, le: ge}
def mirror(op):
"""rotate a comparison operator 180 degrees.
Note this is not the same as negation.
"""
return _mirror.get(op, op)
_associative = _commutative.union([concat_op, and_, or_]).difference([eq, ne])
_natural_self_precedent = _associative.union(
[getitem, json_getitem_op, json_path_getitem_op]
)
"""Operators where if we have (a op b) op c, we don't want to
parenthesize (a op b).
"""
_asbool = util.symbol("_asbool", canonical=-10)
_smallest = util.symbol("_smallest", canonical=-100)
_largest = util.symbol("_largest", canonical=100)
_PRECEDENCE = {
from_: 15,
function_as_comparison_op: 15,
any_op: 15,
all_op: 15,
getitem: 15,
json_getitem_op: 15,
json_path_getitem_op: 15,
mul: 8,
truediv: 8,
div: 8,
mod: 8,
neg: 8,
add: 7,
sub: 7,
concat_op: 6,
filter_op: 6,
match_op: 5,
notmatch_op: 5,
ilike_op: 5,
notilike_op: 5,
like_op: 5,
notlike_op: 5,
in_op: 5,
notin_op: 5,
is_: 5,
isnot: 5,
eq: 5,
ne: 5,
is_distinct_from: 5,
isnot_distinct_from: 5,
empty_in_op: 5,
empty_notin_op: 5,
gt: 5,
lt: 5,
ge: 5,
le: 5,
between_op: 5,
notbetween_op: 5,
distinct_op: 5,
inv: 5,
istrue: 5,
isfalse: 5,
and_: 3,
or_: 2,
comma_op: -1,
desc_op: 3,
asc_op: 3,
collate: 4,
as_: -1,
exists: 0,
_asbool: -10,
_smallest: _smallest,
_largest: _largest,
}
def is_precedent(operator, against):
if operator is against and is_natural_self_precedent(operator):
return False
else:
return _PRECEDENCE.get(
operator, getattr(operator, "precedence", _smallest)
) <= _PRECEDENCE.get(against, getattr(against, "precedence", _largest))
|
kawamon/hue
|
desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/sql/operators.py
|
Python
|
apache-2.0
| 42,548
|
import ast
import json
import hashlib
import urllib
import base64
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from lrs import models, views
class ActivityProfileTests(TestCase):
test_activityId1 = 'act:act-1'
test_activityId2 = 'act:act-2'
test_activityId3 = 'act:act-3'
other_activityId = 'act:act-other'
content_type = "application/json"
testprofileId1 = "http://profile.test.id/test/1"
testprofileId2 = "http://profile.test.id/test/2"
testprofileId3 = "http://profile.test.id/test/3"
otherprofileId1 = "http://profile.test.id/other/1"
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
def setUp(self):
self.username = "tester"
self.email = "test@tester.com"
self.password = "test"
self.auth = "Basic %s" % base64.b64encode("%s:%s" % (self.username, self.password))
form = {'username':self.username, 'email': self.email,'password':self.password,'password2':self.password}
self.client.post(reverse(views.register),form, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams1 = {"profileId": self.testprofileId1, "activityId": self.test_activityId1}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams1))
self.testprofile1 = {"test":"put profile 1","obj":{"activity":"test"}}
self.put1 = self.client.put(path, json.dumps(self.testprofile1), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams2 = {"profileId": self.testprofileId2, "activityId": self.test_activityId2}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams2))
self.testprofile2 = {"test":"put profile 2","obj":{"activity":"test"}}
self.put2 = self.client.put(path, json.dumps(self.testprofile2), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams3 = {"profileId": self.testprofileId3, "activityId": self.test_activityId3}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams3))
self.testprofile3 = {"test":"put profile 3","obj":{"activity":"test"}}
self.put3 = self.client.put(path, json.dumps(self.testprofile3), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams4 = {"profileId": self.otherprofileId1, "activityId": self.other_activityId}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams4))
self.otherprofile1 = {"test":"put profile other","obj":{"activity":"other"}}
self.put4 = self.client.put(path, json.dumps(self.otherprofile1), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams5 = {"profileId": self.otherprofileId1, "activityId": self.test_activityId1}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams5))
self.anotherprofile1 = {"test":"put another profile 1","obj":{"activity":"other"}}
self.put5 = self.client.put(path, json.dumps(self.anotherprofile1), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def tearDown(self):
self.client.delete(reverse(views.activity_profile), self.testparams1, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse(views.activity_profile), self.testparams2, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse(views.activity_profile), self.testparams3, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse(views.activity_profile), self.testparams4, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse(views.activity_profile), self.testparams5, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_put(self):
#Test the puts
self.assertEqual(self.put1.status_code, 204)
self.assertEqual(self.put2.status_code, 204)
self.assertEqual(self.put3.status_code, 204)
self.assertEqual(self.put4.status_code, 204)
self.assertEqual(self.put5.status_code, 204)
#Make sure profiles have correct activities
self.assertEqual(models.ActivityProfile.objects.filter(profileId=self.testprofileId1)[0].activityId, self.test_activityId1)
self.assertEqual(models.ActivityProfile.objects.filter(profileId=self.testprofileId2)[0].activityId, self.test_activityId2)
self.assertEqual(models.ActivityProfile.objects.filter(profileId=self.testprofileId3)[0].activityId, self.test_activityId3)
def test_put_no_params(self):
put = self.client.put(reverse(views.activity_profile) ,content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEquals(put.content, 'Error -- activity_profile - method = PUT, but activityId parameter missing..')
def test_put_no_activityId(self):
put = self.client.put(reverse(views.activity_profile), {'profileId':'10'},content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEquals(put.content, 'Error -- activity_profile - method = PUT, but activityId parameter missing..')
def test_put_no_profileId(self):
testparams = {'activityId':'act:act:act'}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(testparams))
put = self.client.put(path, content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEquals(put.content, 'Error -- activity_profile - method = PUT, but profileId parameter missing..')
def test_put_etag_missing_on_change(self):
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams1))
profile = {"test":"error - trying to put new profile w/o etag header","obj":{"activity":"test"}}
response = self.client.put(path, profile, content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 409)
self.assertIn('If-Match and If-None-Match headers were missing', response.content)
r = self.client.get(reverse(views.activity_profile), self.testparams1, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['activity'], self.testprofile1['obj']['activity'])
def test_put_etag_right_on_change(self):
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams1))
profile = {"test":"good - trying to put new profile w/ etag header","obj":{"activity":"act:test"}}
thehash = '"%s"' % hashlib.sha1(json.dumps(self.testprofile1)).hexdigest()
response = self.client.put(path, json.dumps(profile), content_type=self.content_type, If_Match=thehash, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
r = self.client.get(reverse(views.activity_profile), self.testparams1, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, json.dumps(profile))
def test_put_etag_wrong_on_change(self):
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams1))
profile = {"test":"error - trying to put new profile w/ wrong etag value","obj":{"activity":"act:test"}}
thehash = '"%s"' % hashlib.sha1('%s' % 'wrong hash').hexdigest()
response = self.client.put(path, profile, content_type=self.content_type, If_Match=thehash, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 412)
self.assertIn('No resources matched', response.content)
r = self.client.get(reverse(views.activity_profile), self.testparams1, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['activity'], self.testprofile1['obj']['activity'])
def test_put_etag_if_none_match_good(self):
params = {"profileId": 'http://etag.nomatch.good', "activityId": self.test_activityId1}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
profile = {"test":"good - trying to put new profile w/ if none match etag header","obj":{"activity":"act:test"}}
response = self.client.put(path, json.dumps(profile), content_type=self.content_type, if_none_match='*', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
r = self.client.get(reverse(views.activity_profile), params, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], profile['test'])
self.assertEqual(robj['obj']['activity'], profile['obj']['activity'])
r = self.client.delete(reverse(views.activity_profile), params, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_put_etag_if_none_match_bad(self):
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams1))
profile = {"test":"error - trying to put new profile w/ if none match etag but one exists","obj":{"activity":"act:test"}}
response = self.client.put(path, profile, content_type=self.content_type, If_None_Match='*', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 412)
self.assertEqual(response.content, 'Resource detected')
r = self.client.get(reverse(views.activity_profile), self.testparams1, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['activity'], self.testprofile1['obj']['activity'])
def test_get_activity_only(self):
response = self.client.get(reverse(views.activity_profile), {'activityId':self.test_activityId2}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.testprofileId2)
params = {'activityId': self.test_activityId2, 'profileId': self.testprofileId2}
self.client.delete(reverse(views.activity_profile), params, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_activity_profileId(self):
response = self.client.get(reverse(views.activity_profile), {'activityId':self.test_activityId1,'profileId':self.testprofileId1},
X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(response.status_code, 200)
robj = ast.literal_eval(response.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['activity'], self.testprofile1['obj']['activity'])
resp_hash = hashlib.sha1(response.content).hexdigest()
self.assertEqual(response['etag'], '"%s"' % resp_hash)
params = {'activityId': self.test_activityId1, 'profileId': self.testprofileId1}
self.client.delete(reverse(views.activity_profile), params, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_activity_profileId_no_auth(self):
response = self.client.get(reverse(views.activity_profile), {'activityId':self.test_activityId1,'profileId':self.testprofileId1}, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
def test_get_activity_profileId_activity_dne(self):
response = self.client.get(reverse(views.activity_profile), {'activityId':'http://actID','profileId':self.testprofileId1}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(response.status_code, 404)
def test_get_activity_since_tz(self):
actid = "test:activity"
profid = "test://test/tz"
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id': actid}})
st_post = self.client.post(reverse(views.statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
params = {"profileId": profid, "activityId": actid}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"test":"timezone since","obj":{"activity":"other"}}
r = self.client.put(path, json.dumps(prof), content_type=self.content_type, updated="2012-11-11T12:00:00+00:00", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
since = "2012-11-11T12:00:00-02:00"
response = self.client.get(reverse(views.activity_profile), {'activityId': actid,'since':since}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(response.status_code, 200)
self.assertNotIn(profid, response.content)
params = {"activityId": actid, "profileId": profid}
self.client.delete(reverse(views.activity_profile), params, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_activity_bad_since(self):
actid = "test:activity"
profid = "test://test/tz"
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id': actid}})
st_post = self.client.post(reverse(views.statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
params = {"profileId": profid, "activityId": actid}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"test":"timezone since","obj":{"activity":"other"}}
r = self.client.put(path, json.dumps(prof), content_type=self.content_type, updated="2012-11-11T12:00:00+00:00", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
since = "2012-11-1112:00:00-02:00"
response = self.client.get(reverse(views.activity_profile), {'activityId': actid,'since':since}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, "Since parameter was not a valid ISO8601 timestamp")
params = {"activityId": actid, "profileId": profid}
self.client.delete(reverse(views.activity_profile), params, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_no_activityId_with_profileId(self):
response = self.client.get(reverse(views.activity_profile), {'profileId': self.testprofileId3}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Error -- activity_profile - method = GET, but activityId parameter missing..')
def test_get_no_activityId_with_since(self):
since = "2012-07-01T13:30:00+04:00"
response = self.client.get(reverse(views.activity_profile), {'since':since}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Error -- activity_profile - method = GET, but activityId parameter missing..')
def test_delete(self):
response = self.client.delete(reverse(views.activity_profile), {'activityId':self.other_activityId, 'profileId':self.otherprofileId1}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, '')
def test_cors_put(self):
profileid = 'http://test.cors.put'
activityid = 'act:test_cors_put-activity'
testparams1 = {"profileId": profileid, "activityId": activityid}
content = {"test":"put profile 1","obj":{"activity":"act:test"}}
params = "profileId=%s&activityId=%s&Authorization=%s&content=%s&X-Experience-API-Version=1.0" % (profileid, activityid,self.auth,content)
path = path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode({"method":"PUT"}))
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id': activityid}})
st_post = self.client.post(reverse(views.statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
thedata = urllib.quote_plus(params)
put1 = self.client.post(path, thedata, content_type="application/x-www-form-urlencoded")
self.assertEqual(put1.status_code, 204)
get1 = self.client.get(reverse(views.activity_profile), testparams1, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get1.status_code, 200)
import ast
c = ast.literal_eval(get1.content)
self.assertEqual(c['test'], content['test'])
self.client.delete(reverse(views.activity_profile), testparams1, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_cors_put_etag(self):
pid = 'http://ie.cors.etag/test'
aid = 'act:ie.cors.etag/test'
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id': aid}})
st_post = self.client.post(reverse(views.statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(self.testparams1))
tp = {"test":"put example profile for test_cors_put_etag","obj":{"activity":"this should be replaced -- ie cors post/put"}}
thehash = '"%s"' % hashlib.sha1(json.dumps(self.testprofile1)).hexdigest()
put1 = self.client.put(path, tp, content_type=self.content_type, If_Match=thehash, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put1.status_code, 204)
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode({"method":"PUT"}))
content = {"test":"good - trying to put new profile w/ etag header - IE cors","obj":{"activity":"test IE cors etag"}}
thehash = '"%s"' % hashlib.sha1('%s' % tp).hexdigest()
thedata = "profileId=%s&activityId=%s&If-Match=%s&Authorization=%s&Content-Type=application/x-www-form-urlencoded&content=%s&X-Experience-API-Version=1.0.0" % (pid, aid, thehash, self.auth, content)
response = self.client.post(path, thedata, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
r = self.client.get(reverse(views.activity_profile), {'activityId': aid, 'profileId': pid}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(r.status_code, 200)
import ast
c = ast.literal_eval(r.content)
self.assertEqual(c['test'], content['test'])
self.client.delete(reverse(views.activity_profile), {'activityId': aid, 'profileId': pid}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_tetris_snafu(self):
params = {"profileId": "http://test.tetris/", "activityId": "act:tetris.snafu"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
profile = {"test":"put profile 1","obj":{"activity":"test"}}
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id': "act:tetris.snafu"}})
st_post = self.client.post(reverse(views.statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
p_r = self.client.put(path, json.dumps(profile), content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(p_r.status_code, 204)
r = self.client.get(reverse(views.activity_profile), {'activityId': "act:tetris.snafu", 'profileId': "http://test.tetris/"}, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(r.status_code, 200)
self.assertEqual(r['Content-Type'], self.content_type)
self.assertIn("\"", r.content)
self.client.delete(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_post_new_profile(self):
params = {"profileId": "prof:test_post_new_profile", "activityId": "act:test.post.new.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"test":"post new profile","obj":{"activity":"act:test.post.new.prof"}}
post = self.client.post(path, json.dumps(prof), content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
self.assertEqual(ast.literal_eval(get.content), prof)
self.assertEqual(get.get('etag'), '"%s"' % hashlib.sha1(get.content).hexdigest())
self.client.delete(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_post_blank_profile(self):
params = {"profileId": "prof:test_post_new_profile", "activityId": "act:test.post.new.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = ""
post = self.client.post(path, prof, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 400)
self.assertEqual(post.content, 'No body in request')
def test_post_update_profile(self):
params = {"profileId": "prof:test_post_update_profile", "activityId": "act:test.post.update.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"test":"post updated profile","obj":{"activity":"act:test.post.update.prof"}}
post = self.client.post(path, json.dumps(prof), content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
what = ast.literal_eval(get.content)
self.assertEqual(what, prof)
etag = '"%s"' % hashlib.sha1(get.content).hexdigest()
self.assertEqual(get.get('etag'), etag)
params = {"profileId": "prof:test_post_update_profile", "activityId": "act:test.post.update.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"obj":{"activity":"act:test.post.update.prof_changed", "new":"thing"}, "added":"yes"}
post = self.client.post(path, json.dumps(prof), content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 409)
post = self.client.post(path, json.dumps(prof), content_type="application/json", If_Match=etag, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
ret_json = ast.literal_eval(get.content)
self.assertEqual(ret_json['added'], prof['added'])
self.assertEqual(ret_json['test'], "post updated profile")
self.assertEqual(ret_json['obj']['activity'], prof['obj']['activity'])
self.assertEqual(ret_json['obj']['new'], prof['obj']['new'])
self.assertEqual(get.get('etag'), '"%s"' % hashlib.sha1(get.content).hexdigest())
self.client.delete(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_post_and_put_profile(self):
params = {"profileId": "prof:test_post_and_put_profile", "activityId": "act:test.post.put.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"test":"post and put profile","obj":{"activity":"act:test.post.put.prof"}}
post = self.client.post(path, json.dumps(prof), content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
self.assertEqual(ast.literal_eval(get.content), prof)
self.assertEqual(get.get('etag'), '"%s"' % hashlib.sha1(get.content).hexdigest())
params = {"profileId": "prof:test_post_and_put_profile", "activityId": "act:test.post.put.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"wipe":"new data"}
thehash = get.get('etag')
put = self.client.put(path, json.dumps(prof), content_type="application/json", If_Match=thehash, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
self.assertEqual(ast.literal_eval(get.content), prof)
etag = '"%s"' % hashlib.sha1(get.content).hexdigest()
self.assertEqual(get.get('etag'), etag)
params = {"profileId": "prof:test_post_and_put_profile", "activityId": "act:test.post.put.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
prof = {"test":"post updated profile","obj":{"activity":"act:test.post.update.prof_changed", "new":"thing"}, "added":"yes"}
post = self.client.post(path, json.dumps(prof), content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 409)
post = self.client.post(path, json.dumps(prof), content_type="application/json", If_Match=etag, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
ret_json = ast.literal_eval(get.content)
self.assertEqual(ret_json['wipe'], "new data")
self.assertEqual(ret_json['added'], prof['added'])
self.assertEqual(ret_json['test'], prof['test'])
self.assertEqual(ret_json['obj']['activity'], prof['obj']['activity'])
self.assertEqual(ret_json['obj']['new'], prof['obj']['new'])
self.assertEqual(get.get('etag'), '"%s"' % hashlib.sha1(get.content).hexdigest())
self.client.delete(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_put_wrong_activityId(self):
params = {'activityId':'foo','profileId':'10'}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
put = self.client.put(path, '{test:body}', content_type=self.content_type, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEquals(put.content, 'activityId param for activity profile with value foo was not a valid URI')
def test_current_tetris(self):
params = {"profileId":"profile:highscores","activityId":"act:adlnet.gov/JsTetris_TCAPI"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
put = self.client.put(path, '[{"actor":{"name":"tom","mbox":"mailto:tom@tom.com"},"score":802335,"date":"2013-07-26T13:42:13.465Z"},{"actor":{"name":"tom","mbox":"mailto:tom@tom.com"},"score":159482,"date":"2013-07-26T13:49:14.011Z"},{"actor":{"name":"lou","mbox":"mailto:l@l.com"},"score":86690,"date":"2013-07-26T13:27:29.083Z"},{"actor":{"name":"tom","mbox":"mailto:tom@tom.com"},"score":15504,"date":"2013-07-26T13:27:30.763Z"},{"actor":{"name":"tom","mbox":"mailto:tom@tom.com"},"score":1982,"date":"2013-07-26T13:29:46.067Z"},{"actor":{"name":"unknown","mbox":"mailto:unknown@example.com"},"score":348,"date":"2013-07-26T13:51:08.043Z"}]', content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put.status_code, 204)
theget = self.client.get(path, Authorization=self.auth, X_Experience_API_Version="1.0")
self.assertEqual(theget['ETag'], '"d4827d99a5cc3510d3847baa341ba5a3b477fdfc"')
def test_json_merge(self):
prof = '{"test": { "goal": "ensure proper json parse", "attempt": 1, "result": null } }'
params = {"profileId": "prof:test_json_merge", "activityId": "act:test.json.merge.prof"}
path = '%s?%s' % (reverse(views.activity_profile), urllib.urlencode(params))
post = self.client.post(path, prof, content_type="application/json", If_None_Match='*', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
returned = json.loads(get.content)
sent = json.loads(prof)
self.assertEqual(returned['test']['goal'], sent['test']['goal'])
self.assertEqual(returned['test']['attempt'], sent['test']['attempt'])
self.assertEqual(returned['test']['result'], sent['test']['result'])
etag = '"%s"' % hashlib.sha1(get.content).hexdigest()
self.assertEqual(get.get('etag'), etag)
sent['test']['result'] = True
sent['test']['attempt'] = sent['test']['attempt'] + 1
prof = json.dumps(sent)
post = self.client.post(path, prof, content_type="application/json", If_Match=etag, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
returned = json.loads(get.content)
sent = json.loads(prof)
self.assertEqual(returned['test']['goal'], sent['test']['goal'])
self.assertEqual(returned['test']['attempt'], sent['test']['attempt'])
self.assertEqual(returned['test']['result'], sent['test']['result'])
etag = '"%s"' % hashlib.sha1(get.content).hexdigest()
self.assertEqual(get.get('etag'), etag)
|
ELSUru/ADL_LRS
|
lrs/tests/ActivityProfileTests.py
|
Python
|
apache-2.0
| 33,532
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.api.v1.validators.json_schema import base_types
from nailgun.api.v1.validators.json_schema import tasks
from nailgun import consts
from nailgun.errors import errors
from nailgun import objects
from nailgun.orchestrator import deployment_graph
class GraphTasksValidator(BasicValidator):
@classmethod
def validate_update(cls, data, instance):
parsed = cls.validate(data)
cls.validate_schema(parsed, tasks.TASKS_SCHEMA)
graph = deployment_graph.DeploymentGraph()
graph.add_tasks(parsed)
if not graph.is_acyclic():
raise errors.InvalidData(
"Tasks can not be processed because it contains cycles in it.")
return parsed
class TaskDeploymentValidator(BasicValidator):
@classmethod
def validate_tasks(cls, tasks, cluster):
"""Check that passed tasks are present in deployment graph
:param tasks: list of tasks
:param cluster: Cluster DB object
:returns: list of tasks
"""
cls.validate_schema(tasks, base_types.STRINGS_ARRAY)
deployment_tasks = objects.Cluster.get_deployment_tasks(cluster)
graph = deployment_graph.DeploymentGraph()
graph.add_tasks(deployment_tasks)
non_existent_tasks = set(tasks) - set(graph.nodes())
if non_existent_tasks:
raise errors.InvalidData(
'Tasks {0} are not present in deployment graph'.format(
','.join(non_existent_tasks)))
return tasks
@classmethod
def validate_tasks_types(cls, types):
"""Check that passed types are actuall tasks types
:param types: list of types
"""
cls.validate_schema(types, base_types.STRINGS_ARRAY)
non_existent_types = set(types) - set(consts.INTERNAL_TASKS)
if non_existent_types:
raise errors.InvalidData("Task types {0} do not exist".format(
','.join(non_existent_types)))
return types
class GraphVisualizationValidator(TaskDeploymentValidator):
@classmethod
def validate(cls, data, cluster):
"""Check that passed tasks are present in deployment graph
:param data: list of tasks in string representation.
Example: "hiera,controller"
:param cluster: Cluster DB object
"""
tasks = list(set(data.split(',')))
return cls.validate_tasks(tasks, cluster)
@classmethod
def validate_task_presence(cls, task, graph):
"""Checks if task is present in graph.
:param task: task name to check
:param graph: graph where task presence will be check
"""
if not graph.has_node(task):
raise errors.InvalidData(
'Task {0} is not present in graph'.format(task))
return task
|
prmtl/fuel-web
|
nailgun/nailgun/api/v1/validators/graph.py
|
Python
|
apache-2.0
| 3,521
|
__author__ = 'greg'
import random
import math
import sys
import gc
steps = ["centered_in_crosshairs", "subtracted", "circular", "centered_in_host"]
gc.set_debug(gc.DEBUG_LEAK)
def create_annotations():
annotations = []
end_point = random.randint(0,4)
for i in range(end_point):
annotations.append({"task":steps[i],"value":1})
try:
annotations.append({"task":steps[end_point],"value":0})
except IndexError:
pass
return annotations
def score_index(annotations):
assert annotations[0]["task"] == "centered_in_crosshairs"
if annotations[0]["value"] == 0:
return 0 #-1
# they should have answered yes
assert annotations[1]["task"] == "subtracted"
if annotations[1]["value"] == 0:
return 0 #-1
assert annotations[2]["task"] == "circular"
if annotations[2]["value"] == 0:
return 0 #-1
assert annotations[3]["task"] == "centered_in_host"
if annotations[3]["value"] == 0:
return 2 #3
else:
return 1 #1
l = []
#@profile
def create_list():
for classification_count in range(1000000):
subject_id = random.randint(0,50000)
annotations = create_annotations()
l.append((subject_id,annotations[:]))
create_list()
print sys.getsizeof(l)
scores = {}
#@profile
def test():
for subject_id,annotations in l:
#print classification
# if this is the first time we have encountered this subject, add it to the dictionary
if not(subject_id in scores):
scores[subject_id] = [0,0,0]
# get the score index and increment that "box"
scores[subject_id][score_index(annotations)] += 1
for subject_id,values in scores.items():
avg_score = (values[0]*-1+ + values[1]*1 + values[2]*3)/float(sum(values))
std = math.sqrt((-1-avg_score)**2*(values[0]/float(sum(values))) + (1-avg_score)**2*(values[1]/float(sum(values))) + (3-avg_score)**2*(values[1]/float(sum(values))))
aggregation = {"mean":avg_score,"std":std,"count":values}
test()
|
zooniverse/aggregation
|
Stargazing_2015/loadtesting.py
|
Python
|
apache-2.0
| 2,065
|
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cache for the accessors."""
from __future__ import absolute_import
from __future__ import print_function
import sys
import abc
import hashlib
import cachetools
class AccessorCache(object):
"""A cache that can be given to an accessor.
It looks like Django's cache.
https://docs.djangoproject.com/en/1.11/topics/cache/#the-low-level-cache-api
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def set(self, key, value, timeout=None, version=None):
"""Set a key in the cache."""
pass
@abc.abstractmethod
def get(self, key, default=None, version=None):
"""Get a single key."""
pass
def set_many(self, data, timeout=None, version=None):
"""Set a bunch of keys in the cache."""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def get_many(self, keys, version=None):
"""Fetch a bunch of keys from the cache.
Args:
keys: a list of keys.
version: an optional version.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
class MemoryCache(AccessorCache):
"""A per-process memory cache."""
def __init__(self, size, ttl):
"""Initialize the memory cache."""
super(MemoryCache, self).__init__()
self.__size = size
self.__ttl = ttl
self.__cache = cachetools.TTLCache(maxsize=self.__size, ttl=self.__ttl)
def _make_key(self, key, version):
return str(version) + "-" + key
def set(self, key, value, timeout=None, version=None):
"""Set a key in the cache."""
self.__cache[self._make_key(key, version)] = value
def get(self, key, default=None, version=None):
"""Get a single key."""
return self.__cache.get(self._make_key(key, version), default=default)
class DjangoCache(AccessorCache):
"""Django cache, but safe."""
def __init__(self, django_cache):
"""Initialize the cache."""
self.__cache = django_cache
def _make_key(self, key):
"""Construct a clean key from a key."""
if sys.version_info > (3, 0):
key = key.encode('utf-8')
return hashlib.md5(key).hexdigest()
def set(self, key, value, timeout=None, version=None):
"""Set a key."""
key = self._make_key(key)
return self.__cache.set(key, value, timeout=timeout, version=version)
def get(self, key, value, default=None, version=None):
"""Get a key."""
key = self._make_key(key)
return self.__cache.get(key, value, default=default, version=version)
def set_many(self, data, timeout=None, version=None):
"""Set a bunch of keys in the cache."""
new_data = {self._make_key(key): value for key, value in data.items()}
return self.__cache.set_many(new_data, timeout=timeout, version=None)
def get_many(self, keys, version=None):
"""Fetch a bunch of keys from the cache."""
keymap = {self._make_key(key): key for key in keys}
data = self.__cache.get_many(keymap.keys(), version=None)
return {keymap[key]: value for key, value in data.items()}
|
Thib17/biggraphite
|
biggraphite/accessor_cache.py
|
Python
|
apache-2.0
| 3,879
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from openstackclient.tests.functional import base
class AggregateTests(base.TestCase):
"""Functional tests for aggregate"""
def test_aggregate_crud(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
self.addCleanup(
self.openstack,
'aggregate delete ' + name1,
fail_ok=True,
)
cmd_output = json.loads(self.openstack(
'aggregate create -f json ' +
'--zone nova ' +
'--property a=b ' +
name1
))
self.assertEqual(
name1,
cmd_output['name']
)
self.assertEqual(
'nova',
cmd_output['availability_zone']
)
self.assertIn(
'a',
cmd_output['properties']
)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' + name1))
self.assertEqual(name1, cmd_output['name'])
name2 = uuid.uuid4().hex
self.addCleanup(
self.openstack,
'aggregate delete ' + name2,
fail_ok=True,
)
cmd_output = json.loads(self.openstack(
'aggregate create -f json ' +
'--zone external ' +
name2
))
self.assertEqual(
name2,
cmd_output['name']
)
self.assertEqual(
'external',
cmd_output['availability_zone']
)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' + name2))
self.assertEqual(name2, cmd_output['name'])
# Test aggregate set
name3 = uuid.uuid4().hex
self.addCleanup(
self.openstack,
'aggregate delete ' + name3,
fail_ok=True,
)
raw_output = self.openstack(
'aggregate set ' +
'--name ' + name3 + ' ' +
'--zone internal ' +
'--no-property ' +
'--property c=d ' +
name1
)
self.assertOutput('', raw_output)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' +
name3
))
self.assertEqual(
name3,
cmd_output['name']
)
self.assertEqual(
'internal',
cmd_output['availability_zone']
)
self.assertIn(
'c',
cmd_output['properties']
)
self.assertNotIn(
'a',
cmd_output['properties']
)
# Test aggregate list
cmd_output = json.loads(self.openstack(
'aggregate list -f json'
))
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
# Test aggregate list --long
cmd_output = json.loads(self.openstack(
'aggregate list --long -f json'
))
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
zones = [x['Availability Zone'] for x in cmd_output]
self.assertIn('external', zones)
self.assertIn('internal', zones)
properties = [x['Properties'] for x in cmd_output]
self.assertNotIn({'a': 'b'}, properties)
self.assertIn({'c': 'd'}, properties)
# Test unset
raw_output = self.openstack(
'aggregate unset ' +
'--property c ' +
name3
)
self.assertOutput('', raw_output)
cmd_output = json.loads(self.openstack(
'aggregate show -f json ' +
name3
))
self.assertNotIn(
"c='d'",
cmd_output['properties']
)
# test aggregate delete
del_output = self.openstack(
'aggregate delete ' +
name3 + ' ' +
name2
)
self.assertOutput('', del_output)
def test_aggregate_add_and_remove_host(self):
"""Test aggregate add and remove host"""
# Get a host
cmd_output = json.loads(self.openstack(
'host list -f json'
))
host_name = cmd_output[0]['Host Name']
# NOTE(dtroyer): Cells v1 is not operable with aggregates. Hostnames
# are returned as rrr@host or ccc!rrr@host.
if '@' in host_name:
self.skipTest("Skip aggregates in a Nova cells v1 configuration")
name = uuid.uuid4().hex
self.addCleanup(self.openstack, 'aggregate delete ' + name)
self.openstack(
'aggregate create ' +
name
)
# Test add host
cmd_output = json.loads(self.openstack(
'aggregate add host -f json ' +
name + ' ' +
host_name
))
self.assertIn(
host_name,
cmd_output['hosts']
)
# Test remove host
cmd_output = json.loads(self.openstack(
'aggregate remove host -f json ' +
name + ' ' +
host_name
))
self.assertNotIn(
host_name,
cmd_output['hosts']
)
|
openstack/python-openstackclient
|
openstackclient/tests/functional/compute/v2/test_aggregate.py
|
Python
|
apache-2.0
| 5,960
|
import datetime
from unittest import mock
from django.conf import settings
from django.core import mail
from django.utils.timezone import now
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.lib.actions import (
do_deactivate_realm,
do_deactivate_user,
do_set_realm_property,
do_start_email_change_process,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import (
EmailChangeStatus,
Realm,
UserProfile,
get_realm,
get_user,
get_user_by_delivery_email,
get_user_profile_by_id,
)
class EmailChangeTestCase(ZulipTestCase):
def test_confirm_email_change_with_non_existent_key(self) -> None:
self.login("hamlet")
key = generate_key()
url = confirmation_url(key, None, Confirmation.EMAIL_CHANGE)
response = self.client_get(url)
self.assertEqual(response.status_code, 404)
self.assert_in_response(
"Whoops. We couldn't find your confirmation link in the system.", response
)
def test_confirm_email_change_with_invalid_key(self) -> None:
self.login("hamlet")
key = "invalid_key"
url = confirmation_url(key, None, Confirmation.EMAIL_CHANGE)
response = self.client_get(url)
self.assertEqual(response.status_code, 404)
self.assert_in_response("Whoops. The confirmation link is malformed.", response)
def test_confirm_email_change_when_time_exceeded(self) -> None:
user_profile = self.example_user("hamlet")
old_email = user_profile.email
new_email = "hamlet-new@zulip.com"
self.login("hamlet")
obj = EmailChangeStatus.objects.create(
new_email=new_email,
old_email=old_email,
user_profile=user_profile,
realm=user_profile.realm,
)
date_sent = now() - datetime.timedelta(days=2)
with mock.patch("confirmation.models.timezone_now", return_value=date_sent):
url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
response = self.client_get(url)
self.assertEqual(response.status_code, 404)
self.assert_in_response("The confirmation link has expired or been deactivated.", response)
def test_confirm_email_change(self) -> None:
user_profile = self.example_user("hamlet")
do_set_realm_property(
user_profile.realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
acting_user=None,
)
old_email = user_profile.delivery_email
new_email = "hamlet-new@zulip.com"
new_realm = get_realm("zulip")
self.login("hamlet")
obj = EmailChangeStatus.objects.create(
new_email=new_email,
old_email=old_email,
user_profile=user_profile,
realm=user_profile.realm,
)
url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
response = self.client_get(url)
self.assertEqual(response.status_code, 200)
self.assert_in_success_response(
["This confirms that the email address for your Zulip"], response
)
user_profile = get_user_by_delivery_email(new_email, new_realm)
self.assertTrue(bool(user_profile))
obj.refresh_from_db()
self.assertEqual(obj.status, 1)
def test_change_email_deactivated_user_realm(self) -> None:
data = {"email": "hamlet-new@zulip.com"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
url = "/json/settings"
self.assert_length(mail.outbox, 0)
result = self.client_patch(url, data)
self.assert_length(mail.outbox, 1)
self.assert_json_success(result)
email_message = mail.outbox[0]
self.assertEqual(
email_message.subject,
"Verify your new email address",
)
body = email_message.body
self.assertIn("We received a request to change the email", body)
activation_url = [s for s in body.split("\n") if s][2]
do_deactivate_user(user_profile, acting_user=None)
response = self.client_get(activation_url)
self.assertEqual(response.status_code, 401)
do_deactivate_realm(user_profile.realm, acting_user=None)
response = self.client_get(activation_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(response["Location"].endswith("/accounts/deactivated/"))
def test_start_email_change_process(self) -> None:
user_profile = self.example_user("hamlet")
do_start_email_change_process(user_profile, "hamlet-new@zulip.com")
self.assertEqual(EmailChangeStatus.objects.count(), 1)
def test_end_to_end_flow(self) -> None:
data = {"email": "hamlet-new@zulip.com"}
self.login("hamlet")
url = "/json/settings"
self.assert_length(mail.outbox, 0)
result = self.client_patch(url, data)
self.assert_json_success(result)
self.assert_length(mail.outbox, 1)
email_message = mail.outbox[0]
self.assertEqual(
email_message.subject,
"Verify your new email address",
)
body = email_message.body
self.assertIn("We received a request to change the email", body)
self.assertEqual(self.email_envelope_from(email_message), settings.NOREPLY_EMAIL_ADDRESS)
self.assertRegex(
self.email_display_from(email_message),
rf"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertEqual(email_message.extra_headers["List-Id"], "Zulip Dev <zulip.testserver>")
activation_url = [s for s in body.split("\n") if s][2]
response = self.client_get(activation_url)
self.assert_in_success_response(["This confirms that the email address"], response)
# Now confirm trying to change your email back doesn't throw an immediate error
result = self.client_patch(url, {"email": "hamlet@zulip.com"})
self.assert_json_success(result)
def test_unauthorized_email_change(self) -> None:
data = {"email": "hamlet-new@zulip.com"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
do_set_realm_property(
user_profile.realm,
"email_changes_disabled",
True,
acting_user=None,
)
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_length(mail.outbox, 0)
self.assertEqual(result.status_code, 400)
self.assert_in_response("Email address changes are disabled in this organization.", result)
# Realm admins can change their email address even setting is disabled.
data = {"email": "iago-new@zulip.com"}
self.login("iago")
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_json_success(result)
def test_email_change_already_taken(self) -> None:
data = {"email": "cordelia@zulip.com"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_length(mail.outbox, 0)
self.assertEqual(result.status_code, 400)
self.assert_in_response("Already has an account", result)
def test_unauthorized_email_change_from_email_confirmation_link(self) -> None:
data = {"email": "hamlet-new@zulip.com"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
url = "/json/settings"
self.assert_length(mail.outbox, 0)
result = self.client_patch(url, data)
self.assert_length(mail.outbox, 1)
self.assert_json_success(result)
email_message = mail.outbox[0]
self.assertEqual(
email_message.subject,
"Verify your new email address",
)
body = email_message.body
self.assertIn("We received a request to change the email", body)
do_set_realm_property(
user_profile.realm,
"email_changes_disabled",
True,
acting_user=None,
)
activation_url = [s for s in body.split("\n") if s][2]
response = self.client_get(activation_url)
self.assertEqual(response.status_code, 400)
self.assert_in_response(
"Email address changes are disabled in this organization.", response
)
def test_post_invalid_email(self) -> None:
data = {"email": "hamlet-new"}
self.login("hamlet")
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_in_response("Invalid address", result)
def test_post_same_email(self) -> None:
data = {"email": self.example_email("hamlet")}
self.login("hamlet")
url = "/json/settings"
result = self.client_patch(url, data)
self.assertEqual("success", result.json()["result"])
self.assertEqual("", result.json()["msg"])
def test_change_delivery_email_end_to_end_with_admins_visibility(self) -> None:
user_profile = self.example_user("hamlet")
do_set_realm_property(
user_profile.realm,
"email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
self.login_user(user_profile)
old_email = user_profile.delivery_email
new_email = "hamlet-new@zulip.com"
obj = EmailChangeStatus.objects.create(
new_email=new_email,
old_email=old_email,
user_profile=user_profile,
realm=user_profile.realm,
)
url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
response = self.client_get(url)
self.assertEqual(response.status_code, 200)
self.assert_in_success_response(
["This confirms that the email address for your Zulip"], response
)
user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(user_profile.delivery_email, new_email)
self.assertEqual(user_profile.email, f"user{user_profile.id}@zulip.testserver")
obj.refresh_from_db()
self.assertEqual(obj.status, 1)
with self.assertRaises(UserProfile.DoesNotExist):
get_user(old_email, user_profile.realm)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_by_delivery_email(old_email, user_profile.realm)
self.assertEqual(get_user_by_delivery_email(new_email, user_profile.realm), user_profile)
|
zulip/zulip
|
zerver/tests/test_email_change.py
|
Python
|
apache-2.0
| 10,810
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PackagesInfo handlers."""
import hashlib
import httplib
import logging
import urllib
from simian.mac.common import datastore_locks
from simian.auth import gaeserver
from simian.mac import models
from simian.mac.common import auth
from simian.mac.munki import handlers
from simian.mac.munki import plist
from simian.mac.munki.handlers import pkgs
class PackageDoesNotExistError(plist.PlistError):
"""The package referenced in the pkginfo plist does not exist."""
class MunkiPackageInfoPlistStrict(plist.MunkiPackageInfoPlist):
"""Class for Munki plist with added strict validation."""
def __init__(self, *args, **kwargs):
super(MunkiPackageInfoPlistStrict, self).__init__(*args, **kwargs)
self.AddValidationHook(self.ValidatePackageExists)
def ValidatePackageExists(self):
"""Verifies if a particular package exists or not."""
if not pkgs.PackageExists(self._plist['installer_item_location']):
raise PackageDoesNotExistError(
'Package %s does not exist' % self._plist['installer_item_location'])
class PackagesInfo(handlers.AuthenticationHandler):
"""Handler for /pkgsinfo/"""
def get(self, filename=None):
"""GET
Args:
filename: string like Firefox-1.0.dmg
"""
auth_return = auth.DoAnyAuth()
if hasattr(auth_return, 'email'):
email = auth_return.email()
if not any((auth.IsAdminUser(email),
auth.IsSupportUser(email),
)):
raise auth.IsAdminMismatch
if filename:
filename = urllib.unquote(filename)
hash_str = self.request.get('hash')
if hash_str:
lock = models.GetLockForPackage(filename)
try:
lock.Acquire(timeout=30, max_acquire_attempts=5)
except datastore_locks.AcquireLockError:
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Could not lock pkgsinfo')
return
pkginfo = models.PackageInfo.get_by_key_name(filename)
if pkginfo:
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
if hash_str:
self.response.headers['X-Pkgsinfo-Hash'] = self._Hash(pkginfo.plist)
self.response.out.write(pkginfo.plist)
else:
if hash_str:
lock.Release()
self.response.set_status(httplib.NOT_FOUND)
return
if hash_str:
lock.Release()
else:
query = models.PackageInfo.all()
filename = self.request.get('filename')
if filename:
query.filter('filename', filename)
install_types = self.request.get_all('install_types')
for install_type in install_types:
query.filter('install_types =', install_type)
catalogs = self.request.get_all('catalogs')
for catalog in catalogs:
query.filter('catalogs =', catalog)
pkgs = []
for p in query:
pkg = {}
for k in p.properties():
if k != '_plist':
pkg[k] = getattr(p, k)
pkgs.append(pkg)
self.response.out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
self.response.out.write(plist.GetXmlStr(pkgs))
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
def _Hash(self, s):
"""Return a sha256 hash for a string.
Args:
s: str
Returns:
str, sha256 digest
"""
h = hashlib.sha256(str(s))
return h.hexdigest()
def put(self, filename):
"""PUT
Args:
filename: string like Firefox-1.0.dmg
"""
session = gaeserver.DoMunkiAuth(require_level=gaeserver.LEVEL_UPLOADPKG)
filename = urllib.unquote(filename)
hash_str = self.request.get('hash')
catalogs = self.request.get('catalogs', None)
manifests = self.request.get('manifests', None)
install_types = self.request.get('install_types')
if catalogs == '':
catalogs = []
elif catalogs:
catalogs = catalogs.split(',')
if manifests == '':
manifests = []
elif manifests:
manifests = manifests.split(',')
if install_types:
install_types = install_types.split(',')
mpl = MunkiPackageInfoPlistStrict(self.request.body)
try:
mpl.Parse()
except plist.PlistError, e:
logging.exception('Invalid pkginfo plist PUT: \n%s\n', self.request.body)
self.response.set_status(httplib.BAD_REQUEST)
self.response.out.write(str(e))
return
lock_name = 'pkgsinfo_%s' % filename
lock = datastore_locks.DatastoreLock(lock_name)
try:
lock.Acquire(timeout=30, max_acquire_attempts=5)
except datastore_locks.AcquireLockError:
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Could not lock pkgsinfo')
return
# To avoid pkginfo uploads without corresponding packages, only allow
# updates to existing PackageInfo entities, not creations of new ones.
pkginfo = models.PackageInfo.get_by_key_name(filename)
if pkginfo is None:
logging.warning(
'pkginfo "%s" does not exist; PUT only allows updates.', filename)
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Only updates supported')
lock.Release()
return
# If the pkginfo is not modifiable, ensure only manifests have changed.
if not pkginfo.IsSafeToModify():
if not mpl.EqualIgnoringManifestsAndCatalogs(pkginfo.plist):
logging.warning(
'pkginfo "%s" is in stable or testing; change prohibited.',
filename)
self.response.set_status(httplib.FORBIDDEN)
self.response.out.write('Changes to pkginfo not allowed')
lock.Release()
return
# If the update parameter asked for a careful update, by supplying
# a hash of the last known pkgsinfo, then compare the hash to help
# the client make a non destructive update.
if hash_str:
if self._Hash(pkginfo.plist) != hash_str:
self.response.set_status(httplib.CONFLICT)
self.response.out.write('Update hash does not match')
lock.Release()
return
# All verification has passed, so let's create the PackageInfo entity.
pkginfo.plist = mpl
pkginfo.name = mpl.GetPackageName()
if catalogs is not None:
pkginfo.catalogs = catalogs
if manifests is not None:
pkginfo.manifests = manifests
if install_types:
pkginfo.install_types = install_types
pkginfo.put()
lock.Release()
for track in pkginfo.catalogs:
models.Catalog.Generate(track, delay=1)
# Log admin pkginfo put to Datastore.
user = session.uuid
admin_log = models.AdminPackageLog(
user=user, action='pkginfo', filename=filename,
catalogs=pkginfo.catalogs, manifests=pkginfo.manifests,
install_types=pkginfo.install_types, plist=pkginfo.plist.GetXml())
admin_log.put()
|
frlen/simian
|
src/simian/mac/munki/handlers/pkgsinfo.py
|
Python
|
apache-2.0
| 7,437
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import os
import sys
import unittest
import pytest
import pytz
from pyflink.table import DataTypes, expressions as expr
from pyflink.table.udf import ScalarFunction, udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \
PyFlinkBatchTableTestCase
class UserDefinedFunctionTests(object):
def test_scalar_function(self):
# test metric disabled.
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'false')
# test lambda function
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
# test Python ScalarFunction
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
# test callable function
add_one_callable = udf(CallablePlus(), result_type=DataTypes.BIGINT())
def partial_func(col, param):
return col + param
# test partial function
import functools
add_one_partial = udf(functools.partial(partial_func, param=1),
result_type=DataTypes.BIGINT())
# check memory limit is set
@udf(result_type=DataTypes.BIGINT())
def check_memory_limit(exec_mode):
if exec_mode == "process":
assert os.environ['_PYTHON_WORKER_MEMORY_LIMIT'] is not None
return 1
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(),
DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
execution_mode = self.t_env.get_config().get_configuration().get_string(
"python.execution-mode", "process")
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.where(add_one(t.b) <= 3).select(
add_one(t.a), subtract_one(t.b), add(t.a, t.c), add_one_callable(t.a),
add_one_partial(t.a), check_memory_limit(execution_mode), t.a) \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, 1, 4, 2, 2, 1, 1]", "+I[4, 0, 12, 4, 4, 1, 3]"])
def test_chaining_scalar_function(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 1), (2, 5, 2), (3, 1, 3)], ['a', 'b', 'c'])
t.select(add(add_one(t.a), subtract_one(t.b)), t.c, expr.lit(1)) \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 1, 1]", "+I[7, 2, 1]", "+I[4, 3, 1]"])
def test_udf_in_join_condition(self):
t1 = self.t_env.from_elements([(2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
f = udf(lambda i: i, result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
t1.join(t2).where(f(t1.a) == t2.c).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, Hi, 2, Flink]"])
def test_udf_in_join_condition_2(self):
t1 = self.t_env.from_elements([(1, "Hi"), (2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
f = udf(lambda i: i, result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
t1.join(t2).where(f(t1.a) == f(t2.c)).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, Hi, 2, Flink]"])
def test_udf_with_constant_params(self):
def udf_with_constant_params(p, null_param, tinyint_param, smallint_param, int_param,
bigint_param, decimal_param, float_param, double_param,
boolean_param, str_param,
date_param, time_param, timestamp_param):
from decimal import Decimal
import datetime
assert null_param is None, 'null_param is wrong value %s' % null_param
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
p += tinyint_param
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
p += smallint_param
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
p += int_param
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
p += bigint_param
assert decimal_param == Decimal('1.05'), \
'decimal_param is wrong value %s ' % decimal_param
p += int(decimal_param)
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-06), \
'float_param is wrong value %s ' % float_param
p += int(float_param)
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-07), \
'double_param is wrong value %s ' % double_param
p += int(double_param)
assert boolean_param is True, 'boolean_param is wrong value %s' % boolean_param
assert str_param == 'flink', 'str_param is wrong value %s' % str_param
assert date_param == datetime.date(year=2014, month=9, day=13), \
'date_param is wrong value %s' % date_param
assert time_param == datetime.time(hour=12, minute=0, second=0), \
'time_param is wrong value %s' % time_param
assert timestamp_param == datetime.datetime(1999, 9, 10, 5, 20, 10), \
'timestamp_param is wrong value %s' % timestamp_param
return p
self.t_env.create_temporary_system_function("udf_with_constant_params",
udf(udf_with_constant_params,
result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"udf_with_all_constant_params", udf(lambda i, j: i + j,
result_type=DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
self.t_env.register_table("test_table", t)
self.t_env.sql_query("select udf_with_all_constant_params("
"cast (1 as BIGINT),"
"cast (2 as BIGINT)), "
"udf_with_constant_params(a, "
"cast (null as BIGINT),"
"cast (1 as TINYINT),"
"cast (1 as SMALLINT),"
"cast (1 as INT),"
"cast (1 as BIGINT),"
"cast (1.05 as DECIMAL),"
"cast (1.23 as FLOAT),"
"cast (1.98932 as DOUBLE),"
"true,"
"'flink',"
"cast ('2014-09-13' as DATE),"
"cast ('12:00:00' as TIME),"
"cast ('1999-9-10 05:20:10' as TIMESTAMP))"
" from test_table").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 8]", "+I[3, 9]", "+I[3, 10]"])
def test_overwrite_builtin_function(self):
self.t_env.create_temporary_system_function(
"plus", udf(lambda i, j: i + j - 1,
result_type=DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(['a'], [DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select("plus(a, b)").execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2]", "+I[6]", "+I[3]"])
def test_open(self):
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
execution_mode = self.t_env.get_config().get_configuration().get_string(
"python.execution-mode", None)
if execution_mode == "process":
subtract = udf(SubtractWithMetrics(), result_type=DataTypes.BIGINT())
else:
subtract = udf(Subtract(), result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 4)], ['a', 'b'])
t.select(t.a, subtract(t.b)).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 1]", "+I[2, 4]", "+I[3, 3]"])
def test_udf_without_arguments(self):
one = udf(lambda: 1, result_type=DataTypes.BIGINT(), deterministic=True)
two = udf(lambda: 2, result_type=DataTypes.BIGINT(), deterministic=False)
table_sink = source_sink_utils.TestAppendSink(['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(one(), two()).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2]", "+I[1, 2]", "+I[1, 2]"])
def test_all_data_types_expression(self):
@udf(result_type=DataTypes.BOOLEAN())
def boolean_func(bool_param):
assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \
% type(bool_param)
return bool_param
@udf(result_type=DataTypes.TINYINT())
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
return tinyint_param
@udf(result_type=DataTypes.SMALLINT())
def smallint_func(smallint_param):
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
@udf(result_type=DataTypes.INT())
def int_func(int_param):
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
assert int_param == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
@udf(result_type=DataTypes.BIGINT())
def bigint_func(bigint_param):
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
return bigint_param
@udf(result_type=DataTypes.BIGINT())
def bigint_func_none(bigint_param):
assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param
return bigint_param
@udf(result_type=DataTypes.FLOAT())
def float_func(float_param):
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \
'float_param is wrong value %s !' % float_param
return float_param
@udf(result_type=DataTypes.DOUBLE())
def double_func(double_param):
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \
'double_param is wrong value %s !' % double_param
return double_param
@udf(result_type=DataTypes.BYTES())
def bytes_func(bytes_param):
assert bytes_param == b'flink', \
'bytes_param is wrong value %s !' % bytes_param
return bytes_param
@udf(result_type=DataTypes.STRING())
def str_func(str_param):
assert str_param == 'pyflink', \
'str_param is wrong value %s !' % str_param
return str_param
@udf(result_type=DataTypes.DATE())
def date_func(date_param):
from datetime import date
assert date_param == date(year=2014, month=9, day=13), \
'date_param is wrong value %s !' % date_param
return date_param
@udf(result_type=DataTypes.TIME())
def time_func(time_param):
from datetime import time
assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \
'time_param is wrong value %s !' % time_param
return time_param
@udf(result_type=DataTypes.TIMESTAMP(3))
def timestamp_func(timestamp_param):
from datetime import datetime
assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \
'timestamp_param is wrong value %s !' % timestamp_param
return timestamp_param
@udf(result_type=DataTypes.ARRAY(DataTypes.BIGINT()))
def array_func(array_param):
assert array_param == [[1, 2, 3]], \
'array_param is wrong value %s !' % array_param
return array_param[0]
@udf(result_type=DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()))
def map_func(map_param):
assert map_param == {1: 'flink', 2: 'pyflink'}, \
'map_param is wrong value %s !' % map_param
return map_param
@udf(result_type=DataTypes.DECIMAL(38, 18))
def decimal_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
@udf(result_type=DataTypes.DECIMAL(38, 18))
def decimal_cut_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.TINYINT(),
DataTypes.BOOLEAN(), DataTypes.SMALLINT(), DataTypes.INT(),
DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BYTES(),
DataTypes.STRING(), DataTypes.DATE(), DataTypes.TIME(),
DataTypes.TIMESTAMP(3), DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()),
DataTypes.DECIMAL(38, 18), DataTypes.DECIMAL(38, 18)])
self.t_env.register_table_sink("Results", table_sink)
import datetime
import decimal
t = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]],
{1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18))]))
t.select(
bigint_func(t.a),
bigint_func_none(t.b),
tinyint_func(t.c),
boolean_func(t.d),
smallint_func(t.e),
int_func(t.f),
float_func(t.g),
double_func(t.h),
bytes_func(t.i),
str_func(t.j),
date_func(t.k),
time_func(t.l),
timestamp_func(t.m),
array_func(t.n),
map_func(t.o),
decimal_func(t.p),
decimal_cut_func(t.q)) \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
# Currently the sink result precision of DataTypes.TIME(precision) only supports 0.
self.assert_equals(actual,
["+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, "
"[102, 108, 105, 110, 107], pyflink, 2014-09-13, "
"12:00:00, 2018-03-11 03:00:00.123, [1, 2, 3], "
"{1=flink, 2=pyflink}, 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999]"])
def test_all_data_types(self):
def boolean_func(bool_param):
assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \
% type(bool_param)
return bool_param
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
return tinyint_param
def smallint_func(smallint_param):
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
def int_func(int_param):
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
assert int_param == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
def bigint_func(bigint_param):
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
return bigint_param
def bigint_func_none(bigint_param):
assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param
return bigint_param
def float_func(float_param):
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \
'float_param is wrong value %s !' % float_param
return float_param
def double_func(double_param):
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \
'double_param is wrong value %s !' % double_param
return double_param
def bytes_func(bytes_param):
assert bytes_param == b'flink', \
'bytes_param is wrong value %s !' % bytes_param
return bytes_param
def str_func(str_param):
assert str_param == 'pyflink', \
'str_param is wrong value %s !' % str_param
return str_param
def date_func(date_param):
from datetime import date
assert date_param == date(year=2014, month=9, day=13), \
'date_param is wrong value %s !' % date_param
return date_param
def time_func(time_param):
from datetime import time
assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \
'time_param is wrong value %s !' % time_param
return time_param
def timestamp_func(timestamp_param):
from datetime import datetime
assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \
'timestamp_param is wrong value %s !' % timestamp_param
return timestamp_param
def array_func(array_param):
assert array_param == [[1, 2, 3]], \
'array_param is wrong value %s !' % array_param
return array_param[0]
def map_func(map_param):
assert map_param == {1: 'flink', 2: 'pyflink'}, \
'map_param is wrong value %s !' % map_param
return map_param
def decimal_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
def decimal_cut_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
self.t_env.create_temporary_system_function(
"boolean_func", udf(boolean_func, result_type=DataTypes.BOOLEAN()))
self.t_env.create_temporary_system_function(
"tinyint_func", udf(tinyint_func, result_type=DataTypes.TINYINT()))
self.t_env.create_temporary_system_function(
"smallint_func", udf(smallint_func, result_type=DataTypes.SMALLINT()))
self.t_env.create_temporary_system_function(
"int_func", udf(int_func, result_type=DataTypes.INT()))
self.t_env.create_temporary_system_function(
"bigint_func", udf(bigint_func, result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"bigint_func_none", udf(bigint_func_none, result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"float_func", udf(float_func, result_type=DataTypes.FLOAT()))
self.t_env.create_temporary_system_function(
"double_func", udf(double_func, result_type=DataTypes.DOUBLE()))
self.t_env.create_temporary_system_function(
"bytes_func", udf(bytes_func, result_type=DataTypes.BYTES()))
self.t_env.create_temporary_system_function(
"str_func", udf(str_func, result_type=DataTypes.STRING()))
self.t_env.create_temporary_system_function(
"date_func", udf(date_func, result_type=DataTypes.DATE()))
self.t_env.create_temporary_system_function(
"time_func", udf(time_func, result_type=DataTypes.TIME()))
self.t_env.create_temporary_system_function(
"timestamp_func", udf(timestamp_func, result_type=DataTypes.TIMESTAMP(3)))
self.t_env.create_temporary_system_function(
"array_func", udf(array_func, result_type=DataTypes.ARRAY(DataTypes.BIGINT())))
self.t_env.create_temporary_system_function(
"map_func", udf(map_func,
result_type=DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())))
self.t_env.register_function(
"decimal_func", udf(decimal_func, result_type=DataTypes.DECIMAL(38, 18)))
self.t_env.register_function(
"decimal_cut_func", udf(decimal_cut_func, result_type=DataTypes.DECIMAL(38, 18)))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.TINYINT(),
DataTypes.BOOLEAN(), DataTypes.SMALLINT(), DataTypes.INT(),
DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BYTES(),
DataTypes.STRING(), DataTypes.DATE(), DataTypes.TIME(),
DataTypes.TIMESTAMP(3), DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()),
DataTypes.DECIMAL(38, 18), DataTypes.DECIMAL(38, 18)])
self.t_env.register_table_sink("Results", table_sink)
import datetime
import decimal
t = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]],
{1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18))]))
t.select("bigint_func(a), bigint_func_none(b),"
"tinyint_func(c), boolean_func(d),"
"smallint_func(e),int_func(f),"
"float_func(g),double_func(h),"
"bytes_func(i),str_func(j),"
"date_func(k),time_func(l),"
"timestamp_func(m),array_func(n),"
"map_func(o),decimal_func(p),"
"decimal_cut_func(q)") \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
# Currently the sink result precision of DataTypes.TIME(precision) only supports 0.
self.assert_equals(actual,
["+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, "
"[102, 108, 105, 110, 107], pyflink, 2014-09-13, "
"12:00:00, 2018-03-11 03:00:00.123, [1, 2, 3], "
"{1=flink, 2=pyflink}, 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999]"])
def test_create_and_drop_function(self):
t_env = self.t_env
t_env.create_temporary_system_function(
"add_one_func", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
t_env.create_temporary_function(
"subtract_one_func", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
self.assert_equals(t_env.list_user_defined_functions(),
['add_one_func', 'subtract_one_func'])
t_env.drop_temporary_system_function("add_one_func")
t_env.drop_temporary_function("subtract_one_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
# decide whether two floats are equal
def float_equal(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class PyFlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkStreamTableTestCase):
def test_deterministic(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.assertTrue(add_one._deterministic)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), deterministic=False)
self.assertFalse(add_one._deterministic)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
self.assertTrue(subtract_one._deterministic)
with self.assertRaises(ValueError, msg="Inconsistent deterministic: False and True"):
udf(SubtractOne(), result_type=DataTypes.BIGINT(), deterministic=False)
self.assertTrue(add._deterministic)
@udf(result_type=DataTypes.BIGINT(), deterministic=False)
def non_deterministic_udf(i):
return i
self.assertFalse(non_deterministic_udf._deterministic)
def test_name(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.assertEqual("<lambda>", add_one._name)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), name="add_one")
self.assertEqual("add_one", add_one._name)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
self.assertEqual("SubtractOne", subtract_one._name)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT(), name="subtract_one")
self.assertEqual("subtract_one", subtract_one._name)
self.assertEqual("add", add._name)
@udf(result_type=DataTypes.BIGINT(), name="named")
def named_udf(i):
return i
self.assertEqual("named", named_udf._name)
def test_abc(self):
class UdfWithoutEval(ScalarFunction):
def open(self, function_context):
pass
with self.assertRaises(
TypeError,
msg="Can't instantiate abstract class UdfWithoutEval with abstract methods eval"):
UdfWithoutEval()
def test_invalid_udf(self):
class Plus(object):
def eval(self, col):
return col + 1
with self.assertRaises(
TypeError,
msg="Invalid function: not a function or callable (__call__ is not defined)"):
# test non-callable function
self.t_env.create_temporary_system_function(
"non-callable-udf", udf(Plus(), DataTypes.BIGINT(), DataTypes.BIGINT()))
def test_data_types(self):
timezone = self.t_env.get_config().get_local_timezone()
local_datetime = pytz.timezone(timezone).localize(
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000))
@udf(result_type=DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))
def local_zoned_timestamp_func(local_zoned_timestamp_param):
assert local_zoned_timestamp_param == local_datetime, \
'local_zoned_timestamp_param is wrong value %s !' % local_zoned_timestamp_param
return local_zoned_timestamp_param
table_sink = source_sink_utils.TestAppendSink(
['a'], [DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3)])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements(
[(local_datetime,)],
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))]))
t.select(local_zoned_timestamp_func(local_zoned_timestamp_func(t.a))) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1970-01-01T00:00:00.123Z]"])
def test_execute_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = ['1,1', '3,3', '2,2']
source_path = tmp_dir + '/test_execute_from_json_plan_input.csv'
sink_path = tmp_dir + '/test_execute_from_json_plan_out'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table = """
CREATE TABLE source_table (
a BIGINT,
b BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % source_path
self.t_env.execute_sql(source_table)
self.t_env.execute_sql("""
CREATE TABLE sink_table (
id BIGINT,
data BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % sink_path)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.t_env.create_temporary_system_function("add_one", add_one)
json_plan = self.t_env._j_tenv.getJsonPlan("INSERT INTO sink_table SELECT "
"a, "
"add_one(b) "
"FROM source_table")
from py4j.java_gateway import get_method
get_method(self.t_env._j_tenv.executeJsonPlan(json_plan), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines, ['1,2', '2,3', '3,4'])
class PyFlinkBatchUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkBatchTableTestCase):
pass
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python3.7")
class PyFlinkEmbeddedMultiThreadTests(UserDefinedFunctionTests, PyFlinkBatchTableTestCase):
def setUp(self):
super(PyFlinkEmbeddedMultiThreadTests, self).setUp()
self.t_env.get_config().get_configuration().set_string("python.execution-mode",
"multi-thread")
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires python3.7")
class PyFlinkEmbeddedSubInterpreterTests(UserDefinedFunctionTests, PyFlinkBatchTableTestCase):
def setUp(self):
super(PyFlinkEmbeddedSubInterpreterTests, self).setUp()
self.t_env.get_config().get_configuration().set_string("python.execution-mode",
"sub-interpreter")
# test specify the input_types
@udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT())
def add(i, j):
return i + j
class SubtractOne(ScalarFunction):
def eval(self, i):
return i - 1
class SubtractWithMetrics(ScalarFunction, unittest.TestCase):
def open(self, function_context):
self.subtracted_value = 1
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def eval(self, i):
# counter
self.counter.inc(i)
self.counter_sum += i
return i - self.subtracted_value
class Subtract(ScalarFunction, unittest.TestCase):
def open(self, function_context):
self.subtracted_value = 1
self.counter_sum = 0
def eval(self, i):
# counter
self.counter_sum += i
return i - self.subtracted_value
class CallablePlus(object):
def __call__(self, col):
return col + 1
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
lincoln-lil/flink
|
flink-python/pyflink/table/tests/test_udf.py
|
Python
|
apache-2.0
| 38,764
|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Functions for solving NLP problems using the bruteforce method.
"""
from contracts import contract
from neat.contracts_extra import *
import nlp
from neat.common import frange
import logging
log = logging.getLogger(__name__)
@contract
def solve2(objective, constraint, step, limit):
""" Solve a maximization problem for 2 states.
:param objective: The objective function.
:type objective: function
:param constraint: A tuple representing the constraint.
:type constraint: tuple(function, function, number)
:param step: The step size.
:type step: number,>0
:param limit: The maximum value of the variables.
:type limit: number,>0
:return: The problem solution.
:rtype: list(number)
"""
res_best = 0
solution = []
for x in frange(0, limit, step):
for y in frange(0, limit, step):
try:
res = objective(x, y)
if res > res_best and \
constraint[1](constraint[0](x, y), constraint[2]):
res_best = res
solution = [x, y]
except ZeroDivisionError:
pass
return solution
@contract
def optimize(step, limit, otf, migration_time, ls, p, state_vector,
time_in_states, time_in_state_n):
""" Solve a MHOD optimization problem.
:param step: The step size for the bruteforce algorithm.
:type step: number,>0
:param limit: The maximum value of the variables.
:type limit: number,>0
:param otf: The OTF parameter.
:type otf: number,>=0,<=1
:param migration_time: The VM migration time in time steps.
:type migration_time: float,>=0
:param ls: L functions.
:type ls: list(function)
:param p: A matrix of transition probabilities.
:type p: list(list(number))
:param state_vector: A state vector.
:type state_vector: list(int)
:param time_in_states: The total time in all the states in time steps.
:type time_in_states: number,>=0
:param time_in_state_n: The total time in the state N in time steps.
:type time_in_state_n: number,>=0
:return: The solution of the problem.
:rtype: list(number)
"""
objective = nlp.build_objective(ls, state_vector, p)
constraint = nlp.build_constraint(otf, migration_time, ls, state_vector,
p, time_in_states, time_in_state_n)
return solve2(objective, constraint, step, limit)
|
fr34k8/openstack-neat
|
neat/locals/overload/mhod/bruteforce.py
|
Python
|
apache-2.0
| 3,071
|
import unittest
import re
class RegularExpressionRegression(unittest.TestCase):
def test_basic(self):
frp = re.compile(r'old')
self.assertEqual('Something new', frp.sub('new', 'Something old'))
def test_search(self):
self.assertEqual(re.search("(abc){1}", ""), None)
self.assertEqual(str(re.search("(abc){1}", "abcxyz").span()), '(0, 3)')
def test_match(self):
self.assertEqual(str(re.match("(abc){1}", "abcxyz", flags=re.L).span()), '(0, 3)')
def test_split(self):
self.assertEqual(str(re.split("(abc){1}", "abcxyz")), "['', 'abc', 'xyz']")
def test_findall(self):
self.assertEqual(re.findall("(abc){1}", ""), [])
self.assertEqual(re.findall("(abc){1}", "abcxyz"), ['abc'])
self.assertEqual(re.findall("(abc){1}", "abcxyz", re.L), ['abc'])
self.assertEqual(re.findall("(abc){1}", "abcxyz", flags=re.L), ['abc'])
self.assertEqual(re.findall("(abc){1}", "xyzabcabc"), ['abc', 'abc'])
def test_sub(self):
self.assertEqual(re.sub("(abc){1}", "9", "abcd"), "9d")
self.assertEqual(re.sub("(abc){1}", "abcxyz",'abcd'), "abcxyzd")
self.assertEqual(re.sub("(abc){1}", "1", "abcd", 0), "1d")
self.assertEqual(re.sub("(abc){1}", "1", "abcd", count=0), "1d")
self.assertEqual(re.sub("(abc){1}", "1", "abcdabcd", 1), "1dabcd")
self.assertEqual(re.sub("(abc){1}", "1", "abcdabcd", 2), "1d1d")
def test_escape(self):
self.assertEqual(re.escape("abc"), "abc")
self.assertEqual(re.escape(""), "")
self.assertEqual(re.escape("_"), "\\_")
self.assertEqual(re.escape("a_c"), "a\\_c")
|
tempbottle/dlr
|
Src/Hosts/Silverlight/Tests/tests/regressions/test_re.py
|
Python
|
apache-2.0
| 1,710
|
# Copyright 2013 IBM Corp
#
# Author: Tong Li <litong01@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Base(object):
def __init__(self, conf):
self.conf = conf
@abc.abstractmethod
def define_routes(self, app):
"""Post metric data interface."""
|
shaunaks/python-monasca
|
monasca/dispatcher/__init__.py
|
Python
|
apache-2.0
| 849
|
"""
Current Fabric version constant plus version pretty-print method.
This functionality is contained in its own module to prevent circular import
problems with ``__init__.py`` (which is loaded by setup.py during installation,
which in turn needs access to this version information.)
"""
from subprocess import Popen, PIPE
from os.path import abspath, dirname
VERSION = (1, 8, 3, 'final', 0)
def git_sha():
loc = abspath(dirname(__file__))
try:
p = Popen(
"cd \"%s\" && git log -1 --format=format:%%h" % loc,
shell=True,
stdout=PIPE,
stderr=PIPE
)
return p.communicate()[0]
# OSError occurs on Unix-derived platforms lacking Popen's configured shell
# default, /bin/sh. E.g. Android.
except OSError:
return None
def get_version(form='short'):
"""
Return a version string for this package, based on `VERSION`.
Takes a single argument, ``form``, which should be one of the following
strings:
* ``branch``: just the major + minor, e.g. "0.9", "1.0".
* ``short`` (default): compact, e.g. "0.9rc1", "0.9.0". For package
filenames or SCM tag identifiers.
* ``normal``: human readable, e.g. "0.9", "0.9.1", "0.9 beta 1". For e.g.
documentation site headers.
* ``verbose``: like ``normal`` but fully explicit, e.g. "0.9 final". For
tag commit messages, or anywhere that it's important to remove ambiguity
between a branch and the first final release within that branch.
* ``all``: Returns all of the above, as a dict.
"""
# Setup
versions = {}
branch = "%s.%s" % (VERSION[0], VERSION[1])
tertiary = VERSION[2]
type_ = VERSION[3]
final = (type_ == "final")
type_num = VERSION[4]
firsts = "".join([x[0] for x in type_.split()])
sha = git_sha()
sha1 = (" (%s)" % sha) if sha else ""
# Branch
versions['branch'] = branch
# Short
v = branch
if (tertiary or final):
v += "." + str(tertiary)
if not final:
v += firsts
if type_num:
v += str(type_num)
else:
v += sha1
versions['short'] = v
# Normal
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
versions['normal'] = v
# Verbose
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
else:
v += " final"
versions['verbose'] = v
try:
return versions[form]
except KeyError:
if form == 'all':
return versions
raise TypeError('"%s" is not a valid form specifier.' % form)
__version__ = get_version('short')
if __name__ == "__main__":
print(get_version('all'))
|
alex/fabric
|
fabric/version.py
|
Python
|
bsd-2-clause
| 2,985
|
import unittest
from openrunlog import models, util
fbid = '1264253926'
email = 'dtwwtd@gmail.com'
class BCryptTests(unittest.TestCase):
def test_bcrypt(self):
password = 'password'
self.assertTrue(util.check_pwd(password, util.hash_pwd(password)))
class ImageHTMLTests(unittest.TestCase):
expected_fb_url = 'https://graph.facebook.com/1264253926/picture?type=small'
expected_robo_url = 'https://robohash.org/dtwwtd@gmail.com.jpg?gravatar=yes&size=50x50'
expected_bigrobo_url = 'https://robohash.org/dtwwtd@gmail.com.jpg?gravatar=yes&size=180x180'
def test_fb_image_url(self):
url = util.fb_image_url(fbid)
self.assertEqual(url, self.expected_fb_url)
def test_robohash_img_url(self):
url = util.robohash_image_url(email, 50)
self.assertEqual(url, self.expected_robo_url)
def test_image_html(self):
expected_base_html = '<img src="{}" />'
expected_fb_html = expected_base_html.format(self.expected_fb_url)
expected_robo_html = expected_base_html.format(self.expected_robo_url)
expected_bigrobo_html = expected_base_html.format(self.expected_bigrobo_url)
fbuser = models.User(display_name='david', email=email)
fbuser.facebook['id'] = fbid
robouser = models.User(display_name='david', email=email)
fb_html = util.image_html(fbuser, 'small')
robo_html = util.image_html(robouser, 'small')
bigrobo_html = util.image_html(robouser, 'big')
self.assertEqual(expected_fb_html, fb_html)
self.assertEqual(expected_robo_html, robo_html)
self.assertEqual(expected_bigrobo_html, bigrobo_html)
|
JsonChiu/openrunlog
|
openrunlog/tests/test_util.py
|
Python
|
bsd-2-clause
| 1,673
|
from django.conf.urls.defaults import *
from django.contrib import admin
urlpatterns = patterns('',
(r"^housekeeping/statistics/", "housekeeping.statistics.stats"),
# /housekeeping/repair_mptt/contacts_and_people.Entity/
(r"^housekeeping/repair_mptt/(?P<slug>[-\w\\.]+)/$", "housekeeping.repair_mptt.fix"),
# then, try to match /housekeeping/<task>/<execute>
(r"^housekeeping/(?P<task>[^/]+)/(?P<action>[^/]+)/$", "housekeeping.tasks.tasks"),
# # no match?
(r"^housekeeping/", "housekeeping.tasks.tasks"),
# (r"^housekeeping/clean_plugins/", "housekeeping.clean_plugins.clean"),
#
#
# (r"^statistics/user/(?P<slug>[-\w]+)$", "housekeeping.statistics.userstats"),
)
|
evildmp/Arkestra
|
housekeeping/urls.py
|
Python
|
bsd-2-clause
| 731
|
"""
Commands provided by the smt tool.
Each command corresponds to a function in this module.
:copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt
:license: BSD 2-clause, see LICENSE for details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import os.path
import sys
from argparse import ArgumentParser
from textwrap import dedent
import warnings
import logging
import sumatra
from sumatra.programs import get_executable
from sumatra.datastore import get_data_store
from sumatra.projects import Project, load_project
from sumatra.launch import get_launch_mode
from sumatra.parameters import build_parameters
from sumatra.recordstore import get_record_store
from sumatra.versioncontrol import get_working_copy, get_repository, UncommittedModificationsError
from sumatra.formatting import get_diff_formatter
from sumatra.records import MissingInformationError
from sumatra.core import TIMESTAMP_FORMAT
logger = logging.getLogger("Sumatra")
logger.setLevel(logging.CRITICAL)
h = logging.StreamHandler()
h.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(h)
logger.debug("STARTING")
modes = ("init", "configure", "info", "run", "list", "delete", "comment", "tag",
"repeat", "diff", "help", "export", "upgrade", "sync", "migrate", "version")
store_arg_help = "The argument can take the following forms: (1) `/path/to/sqlitedb` - DjangoRecordStore is used with the specified Sqlite database, (2) `http[s]://location` - remote HTTPRecordStore is used with a remote Sumatra server, (3) `postgres://username:password@hostname/databasename` - DjangoRecordStore is used with specified Postgres database."
## recommended method for modifying warning formatting
## see https://docs.python.org/2/library/warnings.html#warnings.showwarning
def _warning(
message,
category = UserWarning,
filename = '',
lineno = -1):
print("Warning: ")
print(message)
warnings.showwarning = _warning
def parse_executable_str(exec_str):
"""
Split the string describing the executable into a path part and an
options part.
"""
first_space = exec_str.find(" ")
if first_space == -1:
first_space = len(exec_str)
return exec_str[:first_space], exec_str[first_space:]
def parse_arguments(args, input_datastore, stdin=None, stdout=None,
allow_command_line_parameters=True):
cmdline_parameters = []
script_args = []
parameter_sets = []
input_data = []
for arg in args:
have_parameters = False
if os.path.isfile(arg): # could be a parameter file or a data file
parameters = build_parameters(arg)
if parameters is not None:
parameter_sets.append(parameters)
script_args.append("<parameters>")
have_parameters = True
if not have_parameters:
if arg[0] == "/":
path = arg
else:
path = os.path.relpath(arg, input_datastore.root)
if input_datastore.contains_path(path):
data_key = input_datastore.generate_keys(path)
input_data.extend(data_key)
script_args.append(arg)
elif allow_command_line_parameters and "=" in arg: # cmdline parameter
cmdline_parameters.append(arg)
else: # a flag or something, passed on unchanged
script_args.append(arg)
if stdin:
script_args.append("< %s" % stdin)
if input_datastore.contains_path(stdin):
data_key = input_datastore.generate_keys(stdin)
input_data.extend(data_key)
else:
raise IOError("File does not exist: %s" % stdin)
if stdout:
script_args.append("> %s" % stdout)
assert len(parameter_sets) < 2, "No more than one parameter file may be supplied." # temporary restriction
if cmdline_parameters:
if parameter_sets:
ps = parameter_sets[0]
for cl in cmdline_parameters:
try:
ps.update(ps.parse_command_line_parameter(cl))
except ValueError as v:
message, name, value = v.args
warnings.warn(message)
warnings.warn("'{0}={1}' not defined in the parameter file".format(name, value))
ps.update({name: value}) ## for now, add the command line param anyway
else:
raise Exception("Command-line parameters supplied but without a parameter file to put them into.")
# ought really to have a more specific Exception and to catch it so as to give a helpful error message to user
return parameter_sets, input_data, " ".join(script_args)
def init(argv):
"""Create a new project in the current directory."""
usage = "%(prog)s init [options] NAME"
description = "Create a new project called NAME in the current directory."
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('project_name', metavar='NAME', help="a short name for the project; should not contain spaces.")
parser.add_argument('-d', '--datapath', metavar='PATH', default='./Data', help="set the path to the directory in which smt will search for output datafiles generated by the simulation/analysis. Defaults to %(default)s.")
parser.add_argument('-i', '--input', metavar='PATH', default='/', help="set the path to the directory relative to which input datafile paths will be given. Defaults to the filesystem root.")
parser.add_argument('-l', '--addlabel', choices=['cmdline', 'parameters', None], metavar='OPTION',
default=None, help="If this option is set, smt will append the record label either to the command line (option 'cmdline') or to the parameter file (option 'parameters'), and will add the label to the datapath when searching for datafiles. It is up to the user to make use of this label inside their program to ensure files are created in the appropriate location.")
parser.add_argument('-e', '--executable', metavar='PATH', help="set the path to the executable. If this is not set, smt will try to infer the executable from the value of the --main option, if supplied, and will try to find the executable from the PATH environment variable, then by searching various likely locations on the filesystem.")
parser.add_argument('-r', '--repository', help="the URL of a Subversion or Mercurial repository containing the code. This will be checked out/cloned into the current directory.")
parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulation or analysis normally, e.g. init.hoc.")
parser.add_argument('-c', '--on-changed', default='error', help="the action to take if the code in the repository or any of the depdendencies has changed. Defaults to %(default)s") # need to add list of allowed values
parser.add_argument('-s', '--store', help="Specify the path, URL or URI to the record store (must be specified). This can either be an existing record store or one to be created. {0} Not using the `--store` argument defaults to a DjangoRecordStore with Sqlite in `.smt/records`".format(store_arg_help))
parser.add_argument('-g', '--labelgenerator', choices=['timestamp', 'uuid'], default='timestamp', metavar='OPTION', help="specify which method Sumatra should use to generate labels (options: timestamp, uuid)")
parser.add_argument('-t', '--timestamp_format', help="the timestamp format given to strftime", default=TIMESTAMP_FORMAT)
parser.add_argument('-L', '--launch_mode', choices=['serial', 'distributed', 'slurm-mpi'], default='serial', help="how computations should be launched. Defaults to %(default)s")
parser.add_argument('-o', '--launch_mode_options', help="extra options for the given launch mode")
datastore = parser.add_mutually_exclusive_group()
datastore.add_argument('-W', '--webdav', metavar='URL', help="specify a webdav URL (with username@password: if needed) as the archiving location for data")
datastore.add_argument('-A', '--archive', metavar='PATH', help="specify a directory in which to archive output datafiles. If not specified, or if 'false', datafiles are not archived.")
datastore.add_argument('-M', '--mirror', metavar='URL', help="specify a URL at which your datafiles will be mirrored.")
args = parser.parse_args(argv)
try:
project = load_project()
parser.error("A project already exists in directory '{0}'.".format(project.path))
except Exception:
pass
if not os.path.exists(".smt"):
os.mkdir(".smt")
if args.repository:
repository = get_repository(args.repository)
repository.checkout()
else:
repository = get_working_copy().repository # if no repository is specified, we assume there is a working copy in the current directory.
if args.executable:
executable_path, executable_options = parse_executable_str(args.executable)
executable = get_executable(path=executable_path)
executable.args = executable_options
elif args.main:
try:
executable = get_executable(script_file=args.main)
except Exception: # assume unrecognized extension - really need more specific exception type
# should warn that extension unrecognized
executable = None
else:
executable = None
if args.store:
record_store = get_record_store(args.store)
else:
record_store = 'default'
if args.webdav:
# should we care about archive migration??
output_datastore = get_data_store("DavFsDataStore", {"root": args.datapath, "dav_url": args.webdav})
args.archive = '.smt/archive'
elif args.archive and args.archive.lower() != 'false':
if args.archive.lower() == "true":
args.archive = ".smt/archive"
args.archive = os.path.abspath(args.archive)
output_datastore = get_data_store("ArchivingFileSystemDataStore", {"root": args.datapath, "archive": args.archive})
elif args.mirror:
output_datastore = get_data_store("MirroredFileSystemDataStore", {"root": args.datapath, "mirror_base_url": args.mirror})
else:
output_datastore = get_data_store("FileSystemDataStore", {"root": args.datapath})
input_datastore = get_data_store("FileSystemDataStore", {"root": args.input})
if args.launch_mode_options:
args.launch_mode_options = args.launch_mode_options.strip()
launch_mode = get_launch_mode(args.launch_mode)(options=args.launch_mode_options)
project = Project(name=args.project_name,
default_executable=executable,
default_repository=repository,
default_main_file=args.main, # what if incompatible with executable?
default_launch_mode=launch_mode,
data_store=output_datastore,
record_store=record_store,
on_changed=args.on_changed,
data_label=args.addlabel,
input_datastore=input_datastore,
label_generator=args.labelgenerator,
timestamp_format=args.timestamp_format)
if os.path.exists('.smt') and project.record_store.has_project(project.name):
with open('.smt/labels', 'w') as f:
f.write('\n'.join(project.get_labels()))
project.save()
def configure(argv):
"""Modify the settings for the current project."""
usage = "%(prog)s configure [options]"
description = "Modify the settings for the current project."
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('-d', '--datapath', metavar='PATH', help="set the path to the directory in which smt will search for datafiles generated by the simulation or analysis.")
parser.add_argument('-i', '--input', metavar='PATH', default=None, help="set the path to the directory in which smt will search for input datafiles.")
parser.add_argument('-l', '--addlabel', choices=['cmdline', 'parameters', 'none'], metavar='OPTION',
help="If this option is set, smt will append the record label either to the command line (option 'cmdline') or to the parameter file (option 'parameters'), and will add the label to the datapath when searching for datafiles. It is up to the user to make use of this label inside their program to ensure files are created in the appropriate location.")
parser.add_argument('-e', '--executable', metavar='PATH', help="set the path to the executable.")
parser.add_argument('-r', '--repository', help="the URL of a Subversion or Mercurial repository containing the code. This will be checked out/cloned into the current directory.")
parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulator normally, e.g. init.hoc.")
parser.add_argument('-c', '--on-changed', help="may be 'store-diff' or 'error': the action to take if the code in the repository or any of the dependencies has changed.", choices=['store-diff', 'error'])
parser.add_argument('-g', '--labelgenerator', choices=['timestamp', 'uuid'], metavar='OPTION', help="specify which method Sumatra should use to generate labels (options: timestamp, uuid)")
parser.add_argument('-t', '--timestamp_format', help="the timestamp format given to strftime")
parser.add_argument('-L', '--launch_mode', choices=['serial', 'distributed', 'slurm-mpi'], help="how computations should be launched.")
parser.add_argument('-o', '--launch_mode_options', help="extra options for the given launch mode, to be given in quotes with a leading space, e.g. ' --foo=3'")
parser.add_argument('-p', '--plain', dest='plain', action='store_true', help="pass arguments to the 'run' command straight through to the program. Otherwise arguments of the form name=value can be used to overwrite default parameter values.")
parser.add_argument('--no-plain', dest='plain', action='store_false', help="arguments to the 'run' command of the form name=value will overwrite default parameter values. This is the opposite of the --plain option.")
parser.add_argument('-s', '--store', help="Change the record store to the specified path, URL or URI (must be specified). {0}".format(store_arg_help))
datastore = parser.add_mutually_exclusive_group()
datastore.add_argument('-W', '--webdav', metavar='URL', help="specify a webdav URL (with username@password: if needed) as the archiving location for data")
datastore.add_argument('-A', '--archive', metavar='PATH', help="specify a directory in which to archive output datafiles. If not specified, or if 'false', datafiles are not archived.")
datastore.add_argument('-M', '--mirror', metavar='URL', help="specify a URL at which your datafiles will be mirrored.")
parser.add_argument('--add-plugin', help="name of a Python module containing one or more plug-ins.")
parser.add_argument('--remove-plugin', help="name of a plug-in module to remove from the project.")
args = parser.parse_args(argv)
project = load_project()
if args.store:
new_store = get_record_store(args.store)
project.change_record_store(new_store)
if args.datapath:
project.data_store.root = args.datapath
if args.archive:
if args.archive.lower() == "true":
args.archive = ".smt/archive"
if hasattr(project.data_store, 'archive_store'): # current data store is archiving
if args.archive.lower() == 'false':
project.data_store = get_data_store("FileSystemDataStore",
{"root": project.data_store.root})
else:
project.data_store.archive_store = args.archive
else: # current data store is not archiving
if args.archive.lower() != 'false':
project.data_store = get_data_store("ArchivingFileSystemDataStore",
{"root": project.data_store.root, "archive": args.archive})
elif args.mirror:
project.data_store = get_data_store("MirroredFileSystemDataStore",
{"root": project.data_store.root, "mirror_base_url": args.mirror})
elif args.webdav:
# should we care about archive migration??
project.data_store = get_data_store("DavFsDataStore",
{"root": project.data_store.root, "dav_url": args.webdav})
project.data_store.archive_store = '.smt/archive'
if args.input:
project.input_datastore.root = args.input
if args.repository:
repository = get_repository(args.repository)
repository.checkout()
project.default_repository = repository
if args.main:
project.default_main_file = args.main
if args.executable:
executable_path, executable_options = parse_executable_str(args.executable)
project.default_executable = get_executable(executable_path,
script_file=args.main or project.default_main_file)
project.default_executable.options = executable_options
if args.on_changed:
project.on_changed = args.on_changed
if args.addlabel:
if args.addlabel in ('none', 'None', 'NONE'):
project.data_label = None
else:
project.data_label = args.addlabel
if args.labelgenerator:
project.label_generator = args.labelgenerator
if args.timestamp_format:
project.timestamp_format = args.timestamp_format
if args.launch_mode:
project.default_launch_mode = get_launch_mode(args.launch_mode)()
if args.launch_mode_options:
project.default_launch_mode.options = args.launch_mode_options.strip()
if args.plain is not None:
project.allow_command_line_parameters = not args.plain
if args.add_plugin:
project.load_plugins(args.add_plugin)
if args.remove_plugin:
project.remove_plugins(args.remove_plugin)
project.save()
def info(argv):
"""Print information about the current project."""
usage = "%(prog)s info"
description = "Print information about the current project."
parser = ArgumentParser(usage=usage,
description=description)
args = parser.parse_args(argv)
try:
project = load_project()
except IOError as err:
print(err)
sys.exit(1)
print(project.info())
def run(argv):
"""Run a simulation or analysis."""
usage = "%(prog)s run [options] [arg1, ...] [param=value, ...]"
description = dedent("""\
The list of arguments will be passed on to the simulation/analysis script.
It should normally contain at least the name of a parameter file, but
can also contain input files, flags, etc.
If the parameter file should be in a format that Sumatra understands (see
documentation), then the parameters will be stored to allow future
searching, comparison, etc. of records.
For convenience, it is possible to specify a file with default parameters
and then specify those parameters that are different from the default values
on the command line with any number of param=value pairs (note no space
around the equals sign).""")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('-v', '--version', metavar='REV',
help="use version REV of the code (if this is not the same as the working copy, it will be checked out of the repository). If this option is not specified, the most recent version in the repository will be used. If there are changes in the working copy, the user will be prompted to commit them first")
parser.add_argument('-l', '--label', help="specify a label for the experiment. If no label is specified, one will be generated automatically.")
parser.add_argument('-r', '--reason', help="explain the reason for running this simulation/analysis.")
parser.add_argument('-e', '--executable', metavar='PATH', help="Use this executable for this run. If not specified, the project's default executable will be used.")
parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulation/analysis normally, e.g. init.hoc. If not specified, the project's default will be used.")
parser.add_argument('-n', '--num_processes', metavar='N', type=int,
help="run a distributed computation on N processes using MPI. If this option is not used, or if N=0, a normal, serial simulation/analysis is run.")
parser.add_argument('-t', '--tag', help="tag you want to add to the project")
parser.add_argument('-D', '--debug', action='store_true', help="print debugging information.")
parser.add_argument('-i', '--stdin', help="specify the name of a file that should be connected to standard input.")
parser.add_argument('-o', '--stdout', help="specify the name of a file that should be connected to standard output.")
args, user_args = parser.parse_known_args(argv)
user_args = [str(arg) for arg in user_args] # unifying types for Py2/Py3
if args.debug:
logger.setLevel(logging.DEBUG)
project = load_project()
parameters, input_data, script_args = parse_arguments(user_args,
project.input_datastore,
args.stdin,
args.stdout,
project.allow_command_line_parameters)
if len(parameters) == 0:
parameters = {}
elif len(parameters) == 1:
parameters = parameters[0]
else:
parser.error("Only a single parameter file allowed.") # for now
if args.executable:
executable_path, executable_options = parse_executable_str(args.executable)
executable = get_executable(path=executable_path)
executable.options = executable_options
elif args.main:
executable = get_executable(script_file=args.main) # should we take the options from project.default_executable, if they match?
else:
executable = 'default'
if args.num_processes:
if hasattr(project.default_launch_mode, 'n'):
project.default_launch_mode.n = args.num_processes
else:
parser.error("Your current launch mode does not support using multiple processes.")
reason = args.reason or ''
if reason:
reason = reason.strip('\'"')
label = args.label
try:
run_label = project.launch(parameters, input_data, script_args,
label=label, reason=reason,
executable=executable,
main_file=args.main or 'default',
version=args.version or 'current')
except (UncommittedModificationsError, MissingInformationError) as err:
print(err)
sys.exit(1)
if args.tag:
project.add_tag(run_label, args.tag)
if os.path.exists('.smt'):
with open('.smt/labels', 'w') as f:
f.write('\n'.join(project.get_labels()))
def list(argv): # add 'report' and 'log' as aliases
"""List records belonging to the current project."""
usage = "%(prog)s list [options] [TAGS]"
description = dedent("""\
If TAGS (optional) is specified, then only records tagged with all the tags in TAGS
will be listed.""")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('tags', metavar='TAGS', nargs='*')
parser.add_argument('-l', '--long', action="store_const", const="long",
dest="mode", default="short",
help="prints full information for each record"),
parser.add_argument('-T', '--table', action="store_const", const="table",
dest="mode", help="prints information in tab-separated columns")
parser.add_argument('-f', '--format', metavar='FMT', choices=['text', 'html', 'latex', 'shell', 'json'],
default='text',
help="FMT can be 'text' (default), 'html', 'json', 'latex' or 'shell'.")
parser.add_argument('-r', '--reverse', action="store_true", dest="reverse", default=False,
help="list records in reverse order (default: newest first)")
parser.add_argument('-m', '--main_file', help="filter list of records by main file")
parser.add_argument('-P', '--parameter_table', action="store_const", const="parameter_table",
dest="mode", help="list records with parameter values")
args = parser.parse_args(argv)
project = load_project()
if os.path.exists('.smt'):
with open('.smt/labels', 'w') as f:
f.write('\n'.join(project.get_labels()))
kwargs = {'tags':args.tags, 'mode':args.mode, 'format':args.format, 'reverse':args.reverse}
if args.main_file is not None: kwargs['main_file__startswith'] = args.main_file
print(project.format_records(**kwargs))
def delete(argv):
"""Delete records or records with a particular tag from a project."""
usage = "%(prog)s delete [options] LIST"
description = dedent("""\
LIST should be a space-separated list of labels for individual records or
of tags. If it contains tags, you must set the --tag/-t option (see below).
The special value "last" allows you to delete the most recent simulation/analysis.
If you want to delete all records, just delete the .smt directory and use
smt init to create a new, empty project.""")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('labels', metavar='LIST', nargs="+", help="a space-separated list of labels for individual records or of tags")
parser.add_argument('-t', '--tag', action='store_true',
help="interpret LIST as containing tags. Records with any of these tags will be deleted.")
parser.add_argument('-d', '--data', action='store_true',
help="also delete any data associated with the record(s).")
args = parser.parse_args(argv)
project = load_project()
if args.tag:
for tag in args.labels:
n = project.delete_by_tag(tag, delete_data=args.data)
print("%s records deleted." % n)
else:
for label in args.labels:
if label == 'last':
label = project.most_recent().label
try:
project.delete_record(label, delete_data=args.data)
except Exception: # could be KeyError or DoesNotExist: should create standard NoSuchRecord or RecordDoesNotExist exception
warnings.warn("Could not delete record '%s' because it does not exist" % label)
if os.path.exists('.smt'):
with open('.smt/labels', 'w') as f:
f.write('\n'.join(project.get_labels()))
def comment(argv):
"""Add a comment to an existing record."""
usage = "%(prog)s comment [options] [LABEL] COMMENT"
description = dedent("""\
This command is used to describe the outcome of the simulation/analysis.
If LABEL is omitted, the comment will be added to the most recent experiment.
If the '-f/--file' option is set, COMMENT should be the name of a file
containing the comment, otherwise it should be a string of text.
By default, comments will be appended to any existing comments.
To overwrite existing comments, use the '-r/--replace flag.""")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('label', nargs='?', metavar='LABEL', help="the record to which the comment will be added")
parser.add_argument('comment', help="a string of text, or the name of a file containing the comment.")
parser.add_argument('-r', '--replace', action='store_true',
help="if this flag is set, any existing comment will be overwritten, otherwise, the new comment will be appended to the end, starting on a new line")
parser.add_argument('-f', '--file', action='store_true',
help="interpret COMMENT as the path to a file containing the comment")
args = parser.parse_args(argv)
if args.file:
f = open(args.comment, 'r')
comment = f.read()
f.close()
else:
comment = args.comment
project = load_project()
label = args.label or project.most_recent().label
project.add_comment(label, comment, replace=args.replace)
def tag(argv):
"""Tag, or remove a tag, from a record or records."""
usage = "%(prog)s tag [options] TAG [LIST]"
description = dedent("""\
If TAG contains spaces, it must be enclosed in quotes. LIST should be a
space-separated list of labels for individual records. If it is omitted,
only the most recent record will be tagged. If the '-r/--remove' option
is set, the tag will be removed from the records.""")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('tag', metavar='TAG', help="tag to add")
parser.add_argument('labels', metavar='LIST', nargs='*', help="a space-separated list of records to be tagged")
parser.add_argument('-r', '--remove', action='store_true',
help="remove the tag from the record(s), rather than adding it.")
args = parser.parse_args(argv)
project = load_project()
if args.remove:
op = project.remove_tag
else:
op = project.add_tag
labels = args.labels or [project.most_recent().label]
for label in labels:
op(label, args.tag)
def repeat(argv):
"""Re-run a previous simulation or analysis."""
usage = "%(prog)s repeat LABEL"
description = dedent("""\
Re-run a previous simulation/analysis under (in theory) identical
conditions, and check that the results are unchanged.""")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('original_label', metavar='LABEL', help='label of record to be repeated')
parser.add_argument('-l', '--label', metavar='NEW_LABEL', help="specify a label for the new experiment. If no label is specified, one will be generated automatically.")
args = parser.parse_args(argv)
original_label = args.original_label
project = load_project()
new_label, original_label = project.repeat(original_label, args.label)
diff = project.compare(original_label, new_label)
if diff:
formatter = get_diff_formatter()(diff)
msg = ["The new record does not match the original. It differs as follows.",
formatter.format('short'),
"run smt diff --long %s %s to see the differences in detail." % (original_label, new_label)]
msg = "\n".join(msg)
else:
msg = "The new record exactly matches the original."
print(msg)
project.add_comment(new_label, msg)
def diff(argv):
"""Show the differences, if any, between two records."""
usage = "%(prog)s diff [options] LABEL1 LABEL2"
description = dedent("Show the differences, if any, between two records.")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('label1')
parser.add_argument('label2')
parser.add_argument('-i', '--ignore', action="append",
help="a regular expression pattern for filenames to ignore when evaluating differences in output data. To supply multiple patterns, use the -i option multiple times.")
parser.add_argument('-l', '--long', action="store_const", const="long",
dest="mode", default="short",
help="prints full information for each record"),
args = parser.parse_args(argv)
if args.ignore is None:
args.ignore = []
project = load_project()
print(project.show_diff(args.label1, args.label2, mode=args.mode,
ignore_filenames=args.ignore))
def help(argv):
usage = "%(prog)s help CMD"
description = dedent("""Get help on an %(prog)s command.""")
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('cmd', nargs='?')
args = parser.parse_args(argv)
if args.cmd is None:
parser.error('Please specify a command on which you would like help.\n\nAvailable commands:\n ' + "\n ".join(modes))
else:
try:
func = globals()[args.cmd]
func(['--help'])
except KeyError:
parser.error('"%s" is not an smt command.' % args.cmd)
def upgrade(argv):
usage = "%(prog)s upgrade"
description = dedent("""\
Upgrade an existing Sumatra project. You must have previously run
"smt export" or the standalone 'export.py' script.""")
parser = ArgumentParser(usage=usage,
description=description)
args = parser.parse_args(argv)
project = load_project()
if (hasattr(project, 'sumatra_version')
and project.sumatra_version == sumatra.__version__
and "dev" not in sumatra.__version__):
print("No upgrade needed (project was created with an up-to-date version of Sumatra).")
sys.exit(1)
if not os.path.exists(".smt/project_export.json"):
print("Error: project must have been exported (with the original "
"version of Sumatra) before upgrading.")
sys.exit(1)
# backup and remove .smt
import shutil
backup_dir = project.backup(remove_original=True)
# upgrade the project data
os.mkdir(".smt")
shutil.copy("%s/project_export.json" % backup_dir, ".smt/project")
project.sumatra_version = sumatra.__version__
project.save()
# upgrade the record store
project.record_store.clear()
filename = "%s/records_export.json" % backup_dir
if os.path.exists(filename):
f = open(filename)
project.record_store.import_(project.name, f.read())
f.close()
else:
print("Record file not found")
sys.exit(1)
print("Project successfully upgraded to Sumatra version {0}.".format(project.sumatra_version))
def export(argv):
usage = "%(prog)s export"
description = dedent("""\
Export a Sumatra project and its records to JSON. This is needed before running upgrade.""")
parser = ArgumentParser(usage=usage,
description=description)
args = parser.parse_args(argv)
project = load_project()
project.export()
def sync(argv):
usage = "%(prog)s sync PATH1 [PATH2]"
description = dedent("""\
Synchronize two record stores. If both PATH1 and PATH2 are given, the
record stores at those locations will be synchronized. If only PATH1 is
given, and the command is run in a directory containing a Sumatra
project, only that project's records be synchronized with the store at
PATH1. Note that PATH1 and PATH2 may be either filesystem paths or URLs.
""") # need to say what happens if the sync is incomplete due to label collisions
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('path1')
parser.add_argument('path2', nargs='?')
args = parser.parse_args(argv)
store1 = get_record_store(args.path1)
if args.path2:
store2 = get_record_store(args.path2)
collisions = store1.sync_all(store2)
else:
project = load_project()
store2 = project.record_store
collisions = store1.sync(store2, project.name)
if collisions:
print("Synchronization incomplete: there are two records with the same name for the following: %s" % ", ".join(collisions))
sys.exit(1)
def migrate(argv):
usage = "%(prog)s migrate [options]"
description = dedent("""\
If you have moved your data files to a new location, update the record
store to reflect the new paths.
""")
# might also want to update the repository upstream
# should we keep a history of such changes?
parser = ArgumentParser(usage=usage,
description=description)
parser.add_argument('-d', '--datapath', metavar='PATH', help="modify the path to the directory in which your results are stored.")
parser.add_argument('-i', '--input', metavar='PATH', help="modify the path to the directory in which your input data files are stored.")
parser.add_argument('-A', '--archive', metavar='PATH', help="modify the directory in which your results are archived.")
parser.add_argument('-M', '--mirror', metavar='URL', help="modify the URL at which your data files are mirrored.")
args = parser.parse_args(argv)
project = load_project()
field_map = {
"datapath": "datastore.root",
"input": "input_datastore.root",
"archive": "datastore.archive",
"mirror": "datastore.mirror_base_url"
}
if not any(vars(args).values()):
warnings.warn(
"Command 'smt migrate' had no effect. Please provide at least one "
"argument. (Run 'smt help migrate' for help.)")
else:
for option_name, field in field_map.items():
value = getattr(args, option_name)
if value:
project.record_store.update(project.name, field, value)
# should we also change the default values stored in the Project?
def version(argv):
usage = "%(prog)s version"
description = "Print the Sumatra version."
parser = ArgumentParser(usage=usage,
description=description)
args = parser.parse_args(argv)
print(sumatra.__version__)
|
dpad/sumatra
|
sumatra/commands.py
|
Python
|
bsd-2-clause
| 38,454
|
#Swami skel module for the Swami Control Panel
LightDMConf = "/etc/lightdm/lightdm.conf"
XsessionsDir = "/usr/share/xsessions"
import esudo.esudo as esudo
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL
from efl import elementary
from efl.elementary.button import Button
from efl.elementary.box import Box
from efl.elementary.icon import Icon
from efl.elementary.frame import Frame
from efl.elementary.entry import Entry
from elmextensions import StandardButton
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
EXPAND_HORIZ = EVAS_HINT_EXPAND, 0.0
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
FILL_HORIZ = EVAS_HINT_FILL, 0.5
ALIGN_CENTER = 0.5, 0.5
class SwamiModule(Box):
def __init__(self, rent):
Box.__init__(self, rent)
self.parent = rent
#This appears on the button in the main swmai window
self.name = "Light DM"
#The section in the main window the button is added to
self.section = "System Settings"
#Search terms that this module should appear for
self.searchData = ["lightdm", "autologin", "login", "display"]
#Command line argument to open this module directly
self.launchArg = "--lightdm"
#Should be none by default. This value is used internally by swami
self.button = None
self.icon = Icon(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
#Use FDO icons -> http://standards.freedesktop.org/icon-naming-spec/latest/ar01s04.html
self.icon.standard_set('video-display')
self.icon.show()
self.mainBox = Box(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
self.mainBox.show()
self.config = {"sections":[]}
with open(LightDMConf) as f:
currentSection = None
for line in f:
#Sections start with [ - such as [SeatDefaults]
if line[0] == "[":
self.config["sections"].append(line)
currentSection = line.rstrip()
s = Frame(self, size_hint_weight=EXPAND_HORIZ, size_hint_align=FILL_HORIZ)
s.text = currentSection[1:-1]
s.show()
sectionBox = Box(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
sectionBox.show()
s.content = sectionBox
self.mainBox.pack_end(s)
elif line[0] not in [ "[", "\n"]:
setting, value = line.replace("\n", "").split("=")
e = Entry(self)
e.single_line_set(True)
e.text = value
e.show()
f = Frame(self, size_hint_weight=EXPAND_HORIZ, size_hint_align=FILL_HORIZ)
f.text = setting
f.content = e
f.show()
sectionBox.pack_end(f)
self.config[setting] = [f, currentSection]
buttonBox = Box(self, size_hint_weight = EXPAND_HORIZ, size_hint_align = FILL_BOTH)
buttonBox.horizontal = True
buttonSave = StandardButton(self, "Save Changes", "ok", self.savePressed)
buttonSave.show()
buttonReturn = StandardButton(self, "Back", "go-previous", self.returnPressed)
buttonReturn.show()
buttonBox.pack_end(buttonSave)
buttonBox.pack_end(buttonReturn)
buttonBox.show()
self.pack_end(self.mainBox)
self.pack_end(buttonBox)
def savePressed(self, btn):
dataList = []
for section in self.config["sections"]:
dataList.append("%s"%section)
for s in self.config:
if s != "sections":
if self.config[s][1].rstrip() == section.rstrip():
f = self.config[s][0]
dataList.append("%s=%s\n"%(f.text, f.content_get().text))
with open("/tmp/lightdm.conf", 'w') as f:
for item in dataList:
f.write(item)
self.runCommand('mv -f /tmp/lightdm.conf %s'%LightDMConf)
def returnPressed(self, btn):
self.parent.returnMain()
def runCommand(self, ourCommand):
cmd = esudo.eSudo(ourCommand, self.parent)
|
JeffHoogland/swami
|
swami_lightdm/swami_lightdm.py
|
Python
|
bsd-2-clause
| 4,525
|
import numpy
from scipy.sparse import issparse
import Orange.data
from Orange.statistics import distribution, basic_stats
from Orange.util import Reprable
from .transformation import Transformation, Lookup
__all__ = ["ReplaceUnknowns", "Average", "DoNotImpute", "DropInstances",
"Model", "AsValue", "Random", "Default"]
class ReplaceUnknowns(Transformation):
"""
A column transformation which replaces unknown values with a fixed `value`.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
value : int or float
The value with which to replace the unknown values
"""
def __init__(self, variable, value=0):
super().__init__(variable)
self.value = value
def transform(self, c):
if issparse(c): # sparse does not have unknown values
return c
else:
return numpy.where(numpy.isnan(c), self.value, c)
class BaseImputeMethod(Reprable):
name = ""
short_name = ""
description = ""
format = "{var.name} -> {self.short_name}"
columns_only = False
def __call__(self, data, variable):
""" Imputes table along variable column.
Args:
data (Table): A table to impute.
variable (Variable): Variable for completing missing values.
Returns:
A new Variable instance with completed missing values or
a array mask of rows to drop out.
"""
raise NotImplementedError
def format_variable(self, var):
return self.format.format(var=var, self=self)
def __str__(self):
return self.name
def copy(self):
return self
@classmethod
def supports_variable(cls, variable):
return True
class DoNotImpute(BaseImputeMethod):
name = "Don't impute"
short_name = "leave"
description = ""
def __call__(self, data, variable):
return variable
class DropInstances(BaseImputeMethod):
name = "Remove instances with unknown values"
short_name = "drop"
description = ""
def __call__(self, data, variable):
index = data.domain.index(variable)
return numpy.isnan(data[:, index]).reshape(-1)
class Average(BaseImputeMethod):
name = "Average/Most frequent"
short_name = "average"
description = "Replace with average/mode of the column"
def __call__(self, data, variable, value=None):
variable = data.domain[variable]
if value is None:
if variable.is_continuous:
stats = basic_stats.BasicStats(data, variable)
value = stats.mean
elif variable.is_discrete:
dist = distribution.get_distribution(data, variable)
value = dist.modus()
else:
raise TypeError("Variable must be continuous or discrete")
a = variable.copy(compute_value=ReplaceUnknowns(variable, value))
a.to_sql = ImputeSql(variable, value)
return a
class ImputeSql(Reprable):
def __init__(self, var, default):
self.var = var
self.default = default
def __call__(self):
return 'coalesce(%s, %s)' % (self.var.to_sql(), str(self.default))
class Default(BaseImputeMethod):
name = "Value"
short_name = "value"
description = ""
columns_only = True
format = '{var} -> {self.default}'
def __init__(self, default=0):
self.default = default
def __call__(self, data, variable, *, default=None):
variable = data.domain[variable]
default = default if default is not None else self.default
return variable.copy(compute_value=ReplaceUnknowns(variable, default))
def copy(self):
return Default(self.default)
class ReplaceUnknownsModel(Reprable):
"""
Replace unknown values with predicted values using a `Orange.base.Model`
Parameters
----------
variable : Orange.data.Variable
The target variable for the imputation.
model : Orange.base.Model
A fitted model predicting `variable`.
"""
def __init__(self, variable, model):
assert model.domain.class_var == variable
self.variable = variable
self.model = model
def __call__(self, data):
if isinstance(data, Orange.data.Instance):
column = numpy.array([float(data[self.variable])])
else:
column = numpy.array(data.get_column_view(self.variable)[0],
copy=True)
mask = numpy.isnan(column)
if not numpy.any(mask):
return column
if isinstance(data, Orange.data.Instance):
predicted = self.model(data)
else:
predicted = self.model(data[mask])
column[mask] = predicted
return column
class Model(BaseImputeMethod):
_name = "Model-based imputer"
short_name = "model"
description = ""
format = BaseImputeMethod.format + " ({self.learner.name})"
@property
def name(self):
return "{} ({})".format(self._name, getattr(self.learner, 'name', ''))
def __init__(self, learner):
self.learner = learner
def __call__(self, data, variable):
variable = data.domain[variable]
domain = domain_with_class_var(data.domain, variable)
if self.learner.check_learner_adequacy(domain):
data = data.from_table(domain, data)
model = self.learner(data)
assert model.domain.class_var == variable
return variable.copy(
compute_value=ReplaceUnknownsModel(variable, model))
else:
raise ValueError("`{}` doesn't support domain type"
.format(self.learner.name))
def copy(self):
return Model(self.learner)
def supports_variable(self, variable):
domain = Orange.data.Domain([], class_vars=variable)
return self.learner.check_learner_adequacy(domain)
def domain_with_class_var(domain, class_var):
"""
Return a domain with class_var as output domain.class_var.
If class_var is in the input domain's attributes it is removed from the
output's domain.attributes.
"""
if domain.class_var is class_var:
return domain
elif class_var in domain.attributes:
attrs = [var for var in domain.attributes
if var is not class_var]
else:
attrs = domain.attributes
return Orange.data.Domain(attrs, class_var)
class IsDefined(Transformation):
def transform(self, c):
return ~numpy.isnan(c)
class Lookup(Lookup):
def __init__(self, variable, lookup_table, unknown=None):
super().__init__(variable, lookup_table)
self.unknown = unknown
def transform(self, column):
if self.unknown is None:
unknown = numpy.nan
else:
unknown = self.unknown
mask = numpy.isnan(column)
column_valid = numpy.where(mask, 0, column)
values = self.lookup_table[numpy.array(column_valid, dtype=int)]
return numpy.where(mask, unknown, values)
class AsValue(BaseImputeMethod):
name = "As a distinct value"
short_name = "new value"
description = ""
def __call__(self, data, variable):
variable = data.domain[variable]
if variable.is_discrete:
fmt = "{var.name}"
value = "N/A"
var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=variable.values + [value],
base_value=variable.base_value,
compute_value=Lookup(
variable,
numpy.arange(len(variable.values), dtype=int),
unknown=len(variable.values))
)
return var
elif variable.is_continuous:
fmt = "{var.name}_def"
indicator_var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=("undef", "def"),
compute_value=IsDefined(variable))
stats = basic_stats.BasicStats(data, variable)
return (variable.copy(compute_value=ReplaceUnknowns(variable,
stats.mean)),
indicator_var)
else:
raise TypeError(type(variable))
class ReplaceUnknownsRandom(Transformation):
"""
A column transformation replacing unknowns with values drawn randomly from
an empirical distribution.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
distribution : Orange.statistics.distribution.Distribution
The corresponding sampling distribution
"""
def __init__(self, variable, distribution):
assert distribution.size > 0
assert distribution.variable == variable
super().__init__(variable)
self.distribution = distribution
if variable.is_discrete:
counts = numpy.array(distribution)
elif variable.is_continuous:
counts = numpy.array(distribution)[1, :]
else:
raise TypeError("Only discrete and continuous "
"variables are supported")
csum = numpy.sum(counts)
if csum > 0:
self.sample_prob = counts / csum
else:
self.sample_prob = numpy.ones_like(counts) / len(counts)
def transform(self, c):
c = numpy.array(c, copy=True)
nanindices = numpy.flatnonzero(numpy.isnan(c))
if self.variable.is_discrete:
sample = numpy.random.choice(
len(self.variable.values), size=len(nanindices),
replace=True, p=self.sample_prob)
else:
sample = numpy.random.choice(
numpy.asarray(self.distribution)[0, :], size=len(nanindices),
replace=True, p=self.sample_prob)
c[nanindices] = sample
return c
class Random(BaseImputeMethod):
name = "Random values"
short_name = "random"
description = "Replace with a random value"
def __call__(self, data, variable):
variable = data.domain[variable]
dist = distribution.get_distribution(data, variable)
# A distribution is invalid if a continuous variable's column does not
# contain any known values or if a discrete variable's .values == []
isinvalid = dist.size == 0
if isinvalid and variable.is_discrete:
assert len(variable.values) == 0
raise ValueError("'{}' has no values".format(variable))
elif isinvalid and variable.is_continuous:
raise ValueError("'{}' has an unknown distribution"
.format(variable))
if variable.is_discrete and numpy.sum(dist) == 0:
dist += 1 / len(dist)
elif variable.is_continuous and numpy.sum(dist[1, :]) == 0:
dist[1, :] += 1 / dist.shape[1]
return variable.copy(
compute_value=ReplaceUnknownsRandom(variable, dist))
|
cheral/orange3
|
Orange/preprocess/impute.py
|
Python
|
bsd-2-clause
| 11,137
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import sys
import mmap
import warnings
import numpy as np
from .base import DELAYED, _ValidHDU, ExtensionHDU, BITPIX2DTYPE, DTYPE2BITPIX
from astropy.io.fits.header import Header
from astropy.io.fits.util import _is_pseudo_unsigned, _unsigned_zero, _is_int
from astropy.io.fits.verify import VerifyWarning
from astropy.utils import isiterable, lazyproperty
try:
from dask.array import Array as DaskArray
except ImportError:
class DaskArray:
pass
__all__ = ["Section", "PrimaryHDU", "ImageHDU"]
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class.
Attributes
----------
header
image header
data
image data
"""
standard_keyword_comments = {
'SIMPLE': 'conforms to FITS standard',
'XTENSION': 'Image extension',
'BITPIX': 'array data type',
'NAXIS': 'number of array dimensions',
'GROUPS': 'has groups',
'PCOUNT': 'number of parameters',
'GCOUNT': 'number of groups'
}
def __init__(self, data=None, header=None, do_not_scale_image_data=False,
uint=True, scale_back=False, ignore_blank=False, **kwargs):
from .groups import GroupsHDU
super().__init__(data=data, header=header)
if data is DELAYED:
# Presumably if data is DELAYED then this HDU is coming from an
# open file, and was not created in memory
if header is None:
# this should never happen
raise ValueError('No header to setup HDU.')
else:
# TODO: Some of this card manipulation should go into the
# PrimaryHDU and GroupsHDU subclasses
# construct a list of cards of minimal header
if isinstance(self, ExtensionHDU):
c0 = ('XTENSION', 'IMAGE',
self.standard_keyword_comments['XTENSION'])
else:
c0 = ('SIMPLE', True, self.standard_keyword_comments['SIMPLE'])
cards = [
c0,
('BITPIX', 8, self.standard_keyword_comments['BITPIX']),
('NAXIS', 0, self.standard_keyword_comments['NAXIS'])]
if isinstance(self, GroupsHDU):
cards.append(('GROUPS', True,
self.standard_keyword_comments['GROUPS']))
if isinstance(self, (ExtensionHDU, GroupsHDU)):
cards.append(('PCOUNT', 0,
self.standard_keyword_comments['PCOUNT']))
cards.append(('GCOUNT', 1,
self.standard_keyword_comments['GCOUNT']))
if header is not None:
orig = header.copy()
header = Header(cards)
header.extend(orig, strip=True, update=True, end=True)
else:
header = Header(cards)
self._header = header
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
# Keep track of whether BZERO/BSCALE were set from the header so that
# values for self._orig_bzero and self._orig_bscale can be set
# properly, if necessary, once the data has been set.
bzero_in_header = 'BZERO' in self._header
bscale_in_header = 'BSCALE' in self._header
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
# Save off other important values from the header needed to interpret
# the image data
self._axes = [self._header.get('NAXIS' + str(axis + 1), 0)
for axis in range(self._header.get('NAXIS', 0))]
# Not supplying a default for BITPIX makes sense because BITPIX
# is either in the header or should be determined from the dtype of
# the data (which occurs when the data is set).
self._bitpix = self._header.get('BITPIX')
self._gcount = self._header.get('GCOUNT', 1)
self._pcount = self._header.get('PCOUNT', 0)
self._blank = None if ignore_blank else self._header.get('BLANK')
self._verify_blank()
self._orig_bitpix = self._bitpix
self._orig_blank = self._header.get('BLANK')
# These get set again below, but need to be set to sensible defaults
# here.
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# Set the name attribute if it was provided (if this is an ImageHDU
# this will result in setting the EXTNAME keyword of the header as
# well)
if 'name' in kwargs and kwargs['name']:
self.name = kwargs['name']
if 'ver' in kwargs and kwargs['ver']:
self.ver = kwargs['ver']
# Set to True if the data or header is replaced, indicating that
# update_header should be called
self._modified = False
if data is DELAYED:
if (not do_not_scale_image_data and
(self._bscale != 1 or self._bzero != 0)):
# This indicates that when the data is accessed or written out
# to a new file it will need to be rescaled
self._data_needs_rescale = True
return
else:
# Setting data will update the header and set _bitpix, _bzero,
# and _bscale to the appropriate BITPIX for the data, and always
# sets _bzero=0 and _bscale=1.
self.data = data
# Check again for BITPIX/BSCALE/BZERO in case they changed when the
# data was assigned. This can happen, for example, if the input
# data is an unsigned int numpy array.
self._bitpix = self._header.get('BITPIX')
# Do not provide default values for BZERO and BSCALE here because
# the keywords will have been deleted in the header if appropriate
# after scaling. We do not want to put them back in if they
# should not be there.
self._bzero = self._header.get('BZERO')
self._bscale = self._header.get('BSCALE')
# Handle case where there was no BZERO/BSCALE in the initial header
# but there should be a BSCALE/BZERO now that the data has been set.
if not bzero_in_header:
self._orig_bzero = self._bzero
if not bscale_in_header:
self._orig_bscale = self._bscale
@classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
@property
def is_image(self):
return True
@property
def section(self):
"""
Access a section of the image array without loading the entire array
into memory. The :class:`Section` object returned by this attribute is
not meant to be used directly by itself. Rather, slices of the section
return the appropriate slice of the data, and loads *only* that section
into memory.
Sections are mostly obsoleted by memmap support, but should still be
used to deal with very large scaled images. See the
:ref:`astropy:data-sections` section of the Astropy documentation for
more details.
"""
return Section(self)
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
self._modified = True
self.update_header()
@lazyproperty
def data(self):
"""
Image/array data as a `~numpy.ndarray`.
Please remember that the order of axes on an Numpy array are opposite
of the order specified in the FITS file. For example for a 2D image
the "rows" or y-axis are the first dimension, and the "columns" or
x-axis are the second dimension.
If the data is scaled using the BZERO and BSCALE parameters, this
attribute returns the data scaled to its physical values unless the
file was opened with ``do_not_scale_image_data=True``.
"""
if len(self._axes) < 1:
return
data = self._get_scaled_image_data(self._data_offset, self.shape)
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__ and self.__dict__['data'] is not None:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
was_unsigned = _is_pseudo_unsigned(self.__dict__['data'].dtype)
else:
self._data_replaced = True
was_unsigned = False
if data is not None and not isinstance(data, (np.ndarray, DaskArray)):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except Exception:
raise TypeError('data object {!r} could not be coerced into an '
'ndarray'.format(data))
if data.shape == ():
raise TypeError('data object {!r} should have at least one '
'dimension'.format(data))
self.__dict__['data'] = data
self._modified = True
if self.data is None:
self._axes = []
else:
# Set new values of bitpix, bzero, and bscale now, but wait to
# revise original values until header is updated.
self._bitpix = DTYPE2BITPIX[data.dtype.name]
self._bscale = 1
self._bzero = 0
self._blank = None
self._axes = list(data.shape)
self._axes.reverse()
# Update the header, including adding BZERO/BSCALE if new data is
# unsigned. Does not change the values of self._bitpix,
# self._orig_bitpix, etc.
self.update_header()
if (data is not None and was_unsigned):
self._update_header_scale_info(data.dtype)
# Keep _orig_bitpix as it was until header update is done, then
# set it, to allow easier handling of the case of unsigned
# integer data being converted to something else. Setting these here
# is needed only for the case do_not_scale_image_data=True when
# setting the data to unsigned int.
# If necessary during initialization, i.e. if BSCALE and BZERO were
# not in the header but the data was unsigned, the attributes below
# will be update in __init__.
self._orig_bitpix = self._bitpix
self._orig_bscale = self._bscale
self._orig_bzero = self._bzero
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
def update_header(self):
"""
Update the header keywords to agree with the data.
"""
if not (self._modified or self._header._modified or
(self._has_data and self.shape != self.data.shape)):
# Not likely that anything needs updating
return
old_naxis = self._header.get('NAXIS', 0)
if 'BITPIX' not in self._header:
bitpix_comment = self.standard_keyword_comments['BITPIX']
else:
bitpix_comment = self._header.comments['BITPIX']
# Update the BITPIX keyword and ensure it's in the correct
# location in the header
self._header.set('BITPIX', self._bitpix, bitpix_comment, after=0)
# If the data's shape has changed (this may have happened without our
# noticing either via a direct update to the data.shape attribute) we
# need to update the internal self._axes
if self._has_data and self.shape != self.data.shape:
self._axes = list(self.data.shape)
self._axes.reverse()
# Update the NAXIS keyword and ensure it's in the correct location in
# the header
if 'NAXIS' in self._header:
naxis_comment = self._header.comments['NAXIS']
else:
naxis_comment = self.standard_keyword_comments['NAXIS']
self._header.set('NAXIS', len(self._axes), naxis_comment,
after='BITPIX')
# TODO: This routine is repeated in several different classes--it
# should probably be made available as a method on all standard HDU
# types
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
naxisn = 'NAXIS' + str(idx + 1)
if naxisn in self._header:
self._header[naxisn] = axis
else:
if (idx == 0):
after = 'NAXIS'
else:
after = 'NAXIS' + str(idx)
self._header.set(naxisn, axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header['NAXIS' + str(idx)]
except KeyError:
pass
if 'BLANK' in self._header:
self._blank = self._header['BLANK']
# Add BSCALE/BZERO to header if data is unsigned int.
self._update_uint_scale_keywords()
self._modified = False
def _update_header_scale_info(self, dtype=None):
"""
Delete BSCALE/BZERO from header if necessary.
"""
# Note that _dtype_for_bitpix determines the dtype based on the
# "original" values of bitpix, bscale, and bzero, stored in
# self._orig_bitpix, etc. It contains the logic for determining which
# special cases of BZERO/BSCALE, if any, are auto-detected as following
# the FITS unsigned int convention.
# Added original_was_unsigned with the intent of facilitating the
# special case of do_not_scale_image_data=True and uint=True
# eventually.
# FIXME: unused, maybe it should be useful?
# if self._dtype_for_bitpix() is not None:
# original_was_unsigned = self._dtype_for_bitpix().kind == 'u'
# else:
# original_was_unsigned = False
if (self._do_not_scale_image_data or
(self._orig_bzero == 0 and self._orig_bscale == 1)):
return
if dtype is None:
dtype = self._dtype_for_bitpix()
if (dtype is not None and dtype.kind == 'u' and
(self._scale_back or self._scale_back is None)):
# Data is pseudo-unsigned integers, and the scale_back option
# was not explicitly set to False, so preserve all the scale
# factors
return
for keyword in ['BSCALE', 'BZERO']:
try:
del self._header[keyword]
# Since _update_header_scale_info can, currently, be called
# *after* _prewriteto(), replace these with blank cards so
# the header size doesn't change
self._header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self._header['BITPIX'] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self._header['BITPIX']
self._blank = self._header.pop('BLANK', None)
def scale(self, type=None, option='old', bscale=None, bzero=None):
"""
Scale image data by using ``BSCALE``/``BZERO``.
Call to this method will scale `data` and update the keywords of
``BSCALE`` and ``BZERO`` in the HDU's header. This method should only
be used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy
dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'``
etc.). If is `None`, use the current data type.
option : str, optional
How to scale the data: ``"old"`` uses the original ``BSCALE`` and
``BZERO`` values from when the data was read/created (defaulting to
1 and 0 if they don't exist). For integer data only, ``"minmax"``
uses the minimum and maximum of the data to scale. User-specified
``bscale``/``bzero`` values always take precedence.
bscale, bzero : int, optional
User-specified ``BSCALE`` and ``BZERO`` values
"""
# Disable blank support for now
self._scale_internal(type=type, option=option, bscale=bscale,
bzero=bzero, blank=None)
def _scale_internal(self, type=None, option='old', bscale=None, bzero=None,
blank=0):
"""
This is an internal implementation of the `scale` method, which
also supports handling BLANK properly.
TODO: This is only needed for fixing #3865 without introducing any
public API changes. We should support BLANK better when rescaling
data, and when that is added the need for this internal interface
should go away.
Note: the default of ``blank=0`` merely reflects the current behavior,
and is not necessarily a deliberate choice (better would be to disallow
conversion of floats to ints without specifying a BLANK if there are
NaN/inf values).
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale is not None and bzero is not None:
_scale = bscale
_zero = bzero
elif bscale is not None:
_scale = bscale
_zero = 0
elif bzero is not None:
_scale = 1
_zero = bzero
elif (option == 'old' and self._orig_bscale is not None and
self._orig_bzero is not None):
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == 'minmax' and not issubclass(_type, np.floating):
if isinstance(self.data, DaskArray):
min = self.data.min().compute()
max = self.data.max().compute()
else:
min = np.minimum.reduce(self.data.flat)
max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = min
_scale = (max - min) / (2.0 ** 8 - 1)
else:
_zero = (max + min) / 2.0
# throw away -2^N
nbytes = 8 * _type().itemsize
_scale = (max - min) / (2.0 ** nbytes - 2)
else:
_scale = 1
_zero = 0
# Do the scaling
if _zero != 0:
if isinstance(self.data, DaskArray):
self.data = self.data - _zero
else:
# 0.9.6.3 to avoid out of range error for BZERO = +32768
# We have to explicitly cast _zero to prevent numpy from raising an
# error when doing self.data -= zero, and we do this instead of
# self.data = self.data - zero to avoid doubling memory usage.
np.add(self.data, -_zero, out=self.data, casting='unsafe')
self._header['BZERO'] = _zero
else:
try:
del self._header['BZERO']
except KeyError:
pass
if _scale and _scale != 1:
self.data = self.data / _scale
self._header['BSCALE'] = _scale
else:
try:
del self._header['BSCALE']
except KeyError:
pass
# Set blanks
if blank is not None and issubclass(_type, np.integer):
# TODO: Perhaps check that the requested BLANK value fits in the
# integer type being scaled to?
self.data[np.isnan(self.data)] = blank
self._header['BLANK'] = blank
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type)
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
self._blank = blank
self._header['BITPIX'] = self._bitpix
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_blank = self._blank
def _verify(self, option='warn'):
# update_header can fix some things that would otherwise cause
# verification to fail, so do that now...
self.update_header()
self._verify_blank()
return super()._verify(option)
def _verify_blank(self):
# Probably not the best place for this (it should probably happen
# in _verify as well) but I want to be able to raise this warning
# both when the HDU is created and when written
if self._blank is None:
return
messages = []
# TODO: Once the FITSSchema framewhere is merged these warnings
# should be handled by the schema
if not _is_int(self._blank):
messages.append(
"Invalid value for 'BLANK' keyword in header: {!r} "
"The 'BLANK' keyword must be an integer. It will be "
"ignored in the meantime.".format(self._blank))
self._blank = None
if not self._bitpix > 0:
messages.append(
"Invalid 'BLANK' keyword in header. The 'BLANK' keyword "
"is only applicable to integer data, and will be ignored "
"in this HDU.")
self._blank = None
for msg in messages:
warnings.warn(msg, VerifyWarning)
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self._scale_internal(BITPIX2DTYPE[self._orig_bitpix],
blank=self._orig_blank)
self.update_header()
if not inplace and self._data_needs_rescale:
# Go ahead and load the scaled image data and update the header
# with the correct post-rescaling headers
_ = self.data
return super()._prewriteto(checksum, inplace)
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
elif isinstance(self.data, DaskArray):
return self._writeinternal_dask(fileobj)
else:
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_unsigned(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _unsigned_zero(self.data.dtype),
dtype=f'>i{self.data.dtype.itemsize}')
should_swap = False
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = (byteorder in swap_types)
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _writeinternal_dask(self, fileobj):
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_unsigned(self.data.dtype):
raise NotImplementedError("This dtype isn't currently supported with dask.")
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = (byteorder in swap_types)
if should_swap:
from dask.utils import M
# NOTE: the inplace flag to byteswap needs to be False otherwise the array is
# byteswapped in place every time it is computed and this affects
# the input dask array.
output = output.map_blocks(M.byteswap, False).map_blocks(M.newbyteorder, "S")
initial_position = fileobj.tell()
n_bytes = output.nbytes
# Extend the file n_bytes into the future
fileobj.seek(initial_position + n_bytes - 1)
fileobj.write(b'\0')
fileobj.flush()
if fileobj.fileobj_mode not in ('rb+', 'wb+', 'ab+'):
# Use another file handle if the current one is not in
# read/write mode
fp = open(fileobj.name, mode='rb+')
should_close = True
else:
fp = fileobj._file
should_close = False
try:
outmmap = mmap.mmap(fp.fileno(),
length=initial_position + n_bytes,
access=mmap.ACCESS_WRITE)
outarr = np.ndarray(shape=output.shape,
dtype=output.dtype,
offset=initial_position,
buffer=outmmap)
output.store(outarr, lock=True, compute=True)
finally:
if should_close:
fp.close()
outmmap.close()
# On Windows closing the memmap causes the file pointer to return to 0, so
# we need to go back to the end of the data (since padding may be written
# after)
fileobj.seek(initial_position + n_bytes)
return n_bytes
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in ((16, np.dtype('uint16')),
(32, np.dtype('uint32')),
(64, np.dtype('uint64'))):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype('float64')
elif bitpix > 0: # scale integers to Float32
return np.dtype('float32')
def _convert_pseudo_unsigned(self, data):
"""
Handle "pseudo-unsigned" integers, if the user requested it. Returns
the converted data array if so; otherwise returns None.
In this case case, we don't need to handle BLANK to convert it to NAN,
since we can't do NaNs with integers, anyway, i.e. the user is
responsible for managing blanks.
"""
dtype = self._dtype_for_bitpix()
# bool(dtype) is always False--have to explicitly compare to None; this
# caused a fair amount of hair loss
if dtype is not None and dtype.kind == 'u':
# Convert the input raw data into an unsigned integer array and
# then scale the data adjusting for the value of BZERO. Note that
# we subtract the value of BZERO instead of adding because of the
# way numpy converts the raw signed array into an unsigned array.
bits = dtype.itemsize * 8
data = np.array(data, dtype=dtype)
data -= np.uint64(1 << (bits - 1))
return data
def _get_scaled_image_data(self, offset, shape):
"""
Internal function for reading image data from a file and apply scale
factors to it. Normally this is used for the entire image, but it
supports alternate offset/shape for Section support.
"""
code = BITPIX2DTYPE[self._orig_bitpix]
raw_data = self._get_raw_data(shape, code, offset)
raw_data.dtype = raw_data.dtype.newbyteorder('>')
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1 and
self._blank is None):
# No further conversion of the data is necessary
return raw_data
try:
if self._file.strict_memmap:
raise ValueError("Cannot load a memory-mapped image: "
"BZERO/BSCALE/BLANK header keywords present. "
"Set memmap=False.")
except AttributeError: # strict_memmap not set
pass
data = None
if not (self._orig_bzero == 0 and self._orig_bscale == 1):
data = self._convert_pseudo_unsigned(raw_data)
if data is None:
# In these cases, we end up with floating-point arrays and have to
# apply bscale and bzero. We may have to handle BLANK and convert
# to NaN in the resulting floating-point arrays.
# The BLANK keyword should only be applied for integer data (this
# is checked in __init__ but it can't hurt to double check here)
blanks = None
if self._blank is not None and self._bitpix > 0:
blanks = raw_data.flat == self._blank
# The size of blanks in bytes is the number of elements in
# raw_data.flat. However, if we use np.where instead we will
# only use 8 bytes for each index where the condition is true.
# So if the number of blank items is fewer than
# len(raw_data.flat) / 8, using np.where will use less memory
if blanks.sum() < len(blanks) / 8:
blanks = np.where(blanks)
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
data = np.array(raw_data, dtype=new_dtype)
else: # floating point cases
if self._file is not None and self._file.memmap:
data = raw_data.copy()
elif not raw_data.flags.writeable:
# create a writeable copy if needed
data = raw_data.copy()
# if not memmap, use the space already in memory
else:
data = raw_data
del raw_data
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
data += self._orig_bzero
if self._blank:
data.flat[blanks] = np.nan
return data
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
format = ''
else:
format = self.data.dtype.name
format = format[format.rfind('.')+1:]
else:
if self.shape and all(self.shape):
# Only show the format if all the dimensions are non-zero
# if data is not touched yet, use header info.
format = BITPIX2DTYPE[self._bitpix]
else:
format = ''
if (format and not self._do_not_scale_image_data and
(self._orig_bscale != 1 or self._orig_bzero != 0)):
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
format += f' (rescales to {new_dtype.name})'
# Display shape in FITS-order
shape = tuple(reversed(self.shape))
return (self.name, self.ver, class_name, len(self._header), shape, format, '')
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
d = self.data
# First handle the special case where the data is unsigned integer
# 16, 32 or 64
if _is_pseudo_unsigned(self.data.dtype):
d = np.array(self.data - _unsigned_zero(self.data.dtype),
dtype=f'i{self.data.dtype.itemsize}')
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
if d.dtype.str[0] != '>':
if d.flags.writeable:
byteswapped = True
d = d.byteswap(True)
d.dtype = d.dtype.newbyteorder('>')
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = d.byteswap(False)
d.dtype = d.dtype.newbyteorder('>')
byteswapped = False
else:
byteswapped = False
cs = self._compute_checksum(d.flatten().view(np.uint8))
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped and not _is_pseudo_unsigned(self.data.dtype):
d.byteswap(True)
d.dtype = d.dtype.newbyteorder('<')
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
class Section:
"""
Image section.
Slices of this object load the corresponding section of an image array from
the underlying FITS file on disk, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
naxis = len(self.hdu.shape)
return_scalar = (all(isinstance(k, (int, np.integer)) for k in key)
and len(key) == naxis)
if not any(k is Ellipsis for k in key):
# We can always add a ... at the end, after making note of whether
# to return a scalar.
key += Ellipsis,
ellipsis_count = len([k for k in key if k is Ellipsis])
if len(key) - ellipsis_count > naxis or ellipsis_count > 1:
raise IndexError('too many indices for array')
# Insert extra dimensions as needed.
idx = next(i for i, k in enumerate(key + (Ellipsis,)) if k is Ellipsis)
key = key[:idx] + (slice(None),) * (naxis - len(key) + 1) + key[idx+1:]
return_0dim = (all(isinstance(k, (int, np.integer)) for k in key)
and len(key) == naxis)
dims = []
offset = 0
# Find all leading axes for which a single point is used.
for idx in range(naxis):
axis = self.hdu.shape[idx]
indx = _IndexInfo(key[idx], axis)
offset = offset * axis + indx.offset
if not _is_int(key[idx]):
dims.append(indx.npts)
break
is_contiguous = indx.contiguous
for jdx in range(idx + 1, naxis):
axis = self.hdu.shape[jdx]
indx = _IndexInfo(key[jdx], axis)
dims.append(indx.npts)
if indx.npts == axis and indx.contiguous:
# The offset needs to multiply the length of all remaining axes
offset *= axis
else:
is_contiguous = False
if is_contiguous:
dims = tuple(dims) or (1,)
bitpix = self.hdu._orig_bitpix
offset = self.hdu._data_offset + offset * abs(bitpix) // 8
data = self.hdu._get_scaled_image_data(offset, dims)
else:
data = self._getdata(key)
if return_scalar:
data = data.item()
elif return_0dim:
data = data.squeeze()
return data
def _getdata(self, keys):
for idx, (key, axis) in enumerate(zip(keys, self.hdu.shape)):
if isinstance(key, slice):
ks = range(*key.indices(axis))
break
elif isiterable(key):
# Handle both integer and boolean arrays.
ks = np.arange(axis, dtype=int)[key]
break
# This should always break at some point if _getdata is called.
data = [self[keys[:idx] + (k,) + keys[idx + 1:]] for k in ks]
if any(isinstance(key, slice) or isiterable(key)
for key in keys[idx + 1:]):
# data contains multidimensional arrays; combine them.
return np.array(data)
else:
# Only singleton dimensions remain; concatenate in a 1D array.
return np.concatenate([np.atleast_1d(array) for array in data])
class PrimaryHDU(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = 'PRIMARY'
def __init__(self, data=None, header=None, do_not_scale_image_data=False,
ignore_blank=False,
uint=True, scale_back=None):
"""
Construct a primary HDU.
Parameters
----------
data : array or ``astropy.io.fits.hdu.base.DELAYED``, optional
The data in the HDU.
header : `~astropy.io.fits.Header`, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
ignore_blank : bool, optional
If `True`, the BLANK header keyword will be ignored if present.
Otherwise, pixels equal to this value will be replaced with
NaNs. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
"""
super().__init__(
data=data, header=header,
do_not_scale_image_data=do_not_scale_image_data, uint=uint,
ignore_blank=ignore_blank,
scale_back=scale_back)
# insert the keywords EXTEND
if header is None:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
self._header.set('EXTEND', True, after='NAXIS' + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
# Due to problems discussed in #5808, we cannot assume the 'GROUPS'
# keyword to be True/False, have to check the value
return (card.keyword == 'SIMPLE' and
('GROUPS' not in header or header['GROUPS'] != True) and # noqa
card.value)
def update_header(self):
super().update_header()
# Update the position of the EXTEND keyword if it already exists
if 'EXTEND' in self._header:
if len(self._axes):
after = 'NAXIS' + str(len(self._axes))
else:
after = 'NAXIS'
self._header.set('EXTEND', after=after)
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if 'EXTEND' in self._header:
naxis = self._header.get('NAXIS', 0)
self.req_cards('EXTEND', naxis + 3, lambda v: isinstance(v, bool),
True, option, errs)
return errs
class ImageHDU(_ImageBaseHDU, ExtensionHDU):
"""
FITS image extension HDU class.
"""
_extension = 'IMAGE'
def __init__(self, data=None, header=None, name=None,
do_not_scale_image_data=False, uint=True, scale_back=None,
ver=None):
"""
Construct an image HDU.
Parameters
----------
data : array
The data in the HDU.
header : `~astropy.io.fits.Header`
The header to be used (as a template). If ``header`` is
`None`, a minimal header will be provided.
name : str, optional
The name of the HDU, will be the value of the keyword
``EXTNAME``.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
"""
# This __init__ currently does nothing differently from the base class,
# and is only explicitly defined for the docstring.
super().__init__(
data=data, header=header, name=name,
do_not_scale_image_data=do_not_scale_image_data, uint=uint,
scale_back=scale_back, ver=ver)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _verify(self, option='warn'):
"""
ImageHDU verify method.
"""
errs = super()._verify(option=option)
naxis = self._header.get('NAXIS', 0)
# PCOUNT must == 0, GCOUNT must == 1; the former is verified in
# ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT
# to be >= 0, so we need to check it here
self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v == 0),
0, option, errs)
return errs
class _IndexInfo:
def __init__(self, indx, naxis):
if _is_int(indx):
if 0 <= indx < naxis:
self.npts = 1
self.offset = indx
self.contiguous = True
else:
raise IndexError(f'Index {indx} out of range.')
elif isinstance(indx, slice):
start, stop, step = indx.indices(naxis)
self.npts = (stop - start) // step
self.offset = start
self.contiguous = step == 1
elif isiterable(indx):
self.npts = len(indx)
self.offset = 0
self.contiguous = False
else:
raise IndexError(f'Illegal index {indx}')
|
aleksandr-bakanov/astropy
|
astropy/io/fits/hdu/image.py
|
Python
|
bsd-3-clause
| 46,543
|
from __future__ import unicode_literals
from importlib import import_module
from django.conf import settings
from ahem.loader import notification_registry
from ahem.settings import AHEM_BACKENDS
def get_notification(notification_name):
return notification_registry[notification_name]()
def get_backend(backend_name):
if hasattr(settings, 'AHEM_BACKENDS'):
backend_paths = settings.AHEM_BACKENDS
else:
backend_paths = AHEM_BACKENDS
for path in backend_paths:
module, backend_class = path.rsplit(".", 1)
module = import_module(module)
backend = getattr(module, backend_class)
if backend.name == backend_name:
return backend()
raise Exception("The specifyed backend is not registered. Add it to AHEM_BACKENDS.")
def celery_is_available():
try:
import celery
except ImportError:
return False
else:
return True
def register_user(backend_name, user, **settings):
backend = get_backend(backend_name)
backend.register_user(user, **settings)
def schedule_notification(notification_name, **params):
notification = get_notification(notification_name)
notification.schedule(**params)
|
vintasoftware/Ahem
|
ahem/utils.py
|
Python
|
bsd-3-clause
| 1,219
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib.admin.views.decorators import staff_member_required
from django_lean.experiments.views import experiment_details, list_experiments
urlpatterns = patterns('django_lean.experiments.views',
url(r'^(?P<experiment_name>.+)/$', staff_member_required(experiment_details), name="experiments_experiment_details"),
url(r'^$', staff_member_required(list_experiments), name="experiments_list_experiments")
)
|
uhuramedia/django-lean
|
django_lean/experiments/admin_urls.py
|
Python
|
bsd-3-clause
| 503
|
from django.test import TestCase
from django.urls import reverse
from django.utils.text import slugify
from .wagtail_tests import WagtailTestUtils
class WagtailPageTests(WagtailTestUtils, TestCase):
"""
A set of asserts to help write tests for your own Wagtail site.
"""
def setUp(self):
super().setUp()
self.login()
def _testCanCreateAt(self, parent_model, child_model):
return child_model in parent_model.allowed_subpage_models()
def assertCanCreateAt(self, parent_model, child_model, msg=None):
"""
Assert a particular child Page type can be created under a parent
Page type. ``parent_model`` and ``child_model`` should be the Page
classes being tested.
"""
if not self._testCanCreateAt(parent_model, child_model):
msg = self._formatMessage(
msg,
"Can not create a %s.%s under a %s.%s"
% (
child_model._meta.app_label,
child_model._meta.model_name,
parent_model._meta.app_label,
parent_model._meta.model_name,
),
)
raise self.failureException(msg)
def assertCanNotCreateAt(self, parent_model, child_model, msg=None):
"""
Assert a particular child Page type can not be created under a parent
Page type. ``parent_model`` and ``child_model`` should be the Page
classes being tested.
"""
if self._testCanCreateAt(parent_model, child_model):
msg = self._formatMessage(
msg,
"Can create a %s.%s under a %s.%s"
% (
child_model._meta.app_label,
child_model._meta.model_name,
parent_model._meta.app_label,
parent_model._meta.model_name,
),
)
raise self.failureException(msg)
def assertCanCreate(self, parent, child_model, data, msg=None):
"""
Assert that a child of the given Page type can be created under the
parent, using the supplied POST data.
``parent`` should be a Page instance, and ``child_model`` should be a
Page subclass. ``data`` should be a dict that will be POSTed at the
Wagtail admin Page creation method.
"""
self.assertCanCreateAt(parent.specific_class, child_model)
if "slug" not in data and "title" in data:
data["slug"] = slugify(data["title"])
data["action-publish"] = "action-publish"
add_url = reverse(
"wagtailadmin_pages:add",
args=[child_model._meta.app_label, child_model._meta.model_name, parent.pk],
)
response = self.client.post(add_url, data, follow=True)
if response.status_code != 200:
msg = self._formatMessage(
msg,
"Creating a %s.%s returned a %d"
% (
child_model._meta.app_label,
child_model._meta.model_name,
response.status_code,
),
)
raise self.failureException(msg)
if response.redirect_chain == []:
if "form" not in response.context:
msg = self._formatMessage(msg, "Creating a page failed unusually")
raise self.failureException(msg)
form = response.context["form"]
if not form.errors:
msg = self._formatMessage(
msg, "Creating a page failed for an unknown reason"
)
raise self.failureException(msg)
errors = "\n".join(
" %s:\n %s" % (field, "\n ".join(errors))
for field, errors in sorted(form.errors.items())
)
msg = self._formatMessage(
msg,
"Validation errors found when creating a %s.%s:\n%s"
% (child_model._meta.app_label, child_model._meta.model_name, errors),
)
raise self.failureException(msg)
explore_url = reverse("wagtailadmin_explore", args=[parent.pk])
if response.redirect_chain != [(explore_url, 302)]:
msg = self._formatMessage(
msg,
"Creating a page %s.%s didn't redirect the user to the explorer, but to %s"
% (
child_model._meta.app_label,
child_model._meta.model_name,
response.redirect_chain,
),
)
raise self.failureException(msg)
def assertAllowedSubpageTypes(self, parent_model, child_models, msg=None):
"""
Test that the only page types that can be created under
``parent_model`` are ``child_models``.
The list of allowed child models may differ from those set in
``Page.subpage_types``, if the child models have set
``Page.parent_page_types``.
"""
self.assertEqual(
set(parent_model.allowed_subpage_models()), set(child_models), msg=msg
)
def assertAllowedParentPageTypes(self, child_model, parent_models, msg=None):
"""
Test that the only page types that ``child_model`` can be created under
are ``parent_models``.
The list of allowed parent models may differ from those set in
``Page.parent_page_types``, if the parent models have set
``Page.subpage_types``.
"""
self.assertEqual(
set(child_model.allowed_parent_page_models()), set(parent_models), msg=msg
)
|
wagtail/wagtail
|
wagtail/tests/utils/page_tests.py
|
Python
|
bsd-3-clause
| 5,707
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
''' Nose test generators
Need function load / save / roundtrip tests
'''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
from glob import glob
from io import BytesIO
from tempfile import mkdtemp
from scipy.lib.six import u, text_type, string_types
import warnings
import shutil
import gzip
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_raises, run_module_suite,
assert_)
import numpy as np
from numpy import array
import scipy.sparse as SP
import scipy.io.matlab.byteordercodes as boc
from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError
from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat)
from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader,
MatlabFunction, varmats_from_mat)
from scipy.io.matlab import mio5_params as mio5p
test_data_path = pjoin(dirname(__file__), 'data')
def mlarr(*args, **kwargs):
"""Convenience function to return matlab-compatible 2D array."""
arr = np.array(*args, **kwargs)
arr.shape = matdims(arr)
return arr
# Define cases to test
theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)
case_table4 = [
{'name': 'double',
'classes': {'testdouble': 'double'},
'expected': {'testdouble': theta}
}]
case_table4.append(
{'name': 'string',
'classes': {'teststring': 'char'},
'expected': {'teststring':
array([u('"Do nine men interpret?" "Nine men," I nod.')])}
})
case_table4.append(
{'name': 'complex',
'classes': {'testcomplex': 'double'},
'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}
})
A = np.zeros((3,5))
A[0] = list(range(1,6))
A[:,0] = list(range(1,4))
case_table4.append(
{'name': 'matrix',
'classes': {'testmatrix': 'double'},
'expected': {'testmatrix': A},
})
case_table4.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
B = A.astype(complex)
B[0,0] += 1j
case_table4.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table4.append(
{'name': 'multi',
'classes': {'theta': 'double', 'a': 'double'},
'expected': {'theta': theta, 'a': A},
})
case_table4.append(
{'name': 'minus',
'classes': {'testminus': 'double'},
'expected': {'testminus': mlarr(-1)},
})
case_table4.append(
{'name': 'onechar',
'classes': {'testonechar': 'char'},
'expected': {'testonechar': array([u('r')])},
})
# Cell arrays stored as object arrays
CA = mlarr(( # tuple for object array creation
[],
mlarr([1]),
mlarr([[1,2]]),
mlarr([[1,2,3]])), dtype=object).reshape(1,-1)
CA[0,0] = array(
[u('This cell contains this string and 3 arrays of increasing length')])
case_table5 = [
{'name': 'cell',
'classes': {'testcell': 'cell'},
'expected': {'testcell': CA}}]
CAE = mlarr(( # tuple for object array creation
mlarr(1),
mlarr(2),
mlarr([]),
mlarr([]),
mlarr(3)), dtype=object).reshape(1,-1)
objarr = np.empty((1,1),dtype=object)
objarr[0,0] = mlarr(1)
case_table5.append(
{'name': 'scalarcell',
'classes': {'testscalarcell': 'cell'},
'expected': {'testscalarcell': objarr}
})
case_table5.append(
{'name': 'emptycell',
'classes': {'testemptycell': 'cell'},
'expected': {'testemptycell': CAE}})
case_table5.append(
{'name': 'stringarray',
'classes': {'teststringarray': 'char'},
'expected': {'teststringarray': array(
[u('one '), u('two '), u('three')])},
})
case_table5.append(
{'name': '3dmatrix',
'classes': {'test3dmatrix': 'double'},
'expected': {
'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}
})
st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)
dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]
st1 = np.zeros((1,1), dtype)
st1['stringfield'][0,0] = array([u('Rats live on no evil star.')])
st1['doublefield'][0,0] = st_sub_arr
st1['complexfield'][0,0] = st_sub_arr * (1 + 1j)
case_table5.append(
{'name': 'struct',
'classes': {'teststruct': 'struct'},
'expected': {'teststruct': st1}
})
CN = np.zeros((1,2), dtype=object)
CN[0,0] = mlarr(1)
CN[0,1] = np.zeros((1,3), dtype=object)
CN[0,1][0,0] = mlarr(2, dtype=np.uint8)
CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)
CN[0,1][0,2] = np.zeros((1,2), dtype=object)
CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)
CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)
case_table5.append(
{'name': 'cellnest',
'classes': {'testcellnest': 'cell'},
'expected': {'testcellnest': CN},
})
st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])
st2[0,0]['one'] = mlarr(1)
st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])
st2[0,0]['two'][0,0]['three'] = array([u('number 3')])
case_table5.append(
{'name': 'structnest',
'classes': {'teststructnest': 'struct'},
'expected': {'teststructnest': st2}
})
a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])
a[0,0]['one'] = mlarr(1)
a[0,0]['two'] = mlarr(2)
a[0,1]['one'] = array([u('number 1')])
a[0,1]['two'] = array([u('number 2')])
case_table5.append(
{'name': 'structarr',
'classes': {'teststructarr': 'struct'},
'expected': {'teststructarr': a}
})
ODT = np.dtype([(n, object) for n in
['expr', 'inputExpr', 'args',
'isEmpty', 'numArgs', 'version']])
MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')
m0 = MO[0,0]
m0['expr'] = array([u('x')])
m0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')])
m0['args'] = array([u('x')])
m0['isEmpty'] = mlarr(0)
m0['numArgs'] = mlarr(1)
m0['version'] = mlarr(1)
case_table5.append(
{'name': 'object',
'classes': {'testobject': 'object'},
'expected': {'testobject': MO}
})
fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')
u_str = fp_u_str.read().decode('utf-8')
fp_u_str.close()
case_table5.append(
{'name': 'unicode',
'classes': {'testunicode': 'char'},
'expected': {'testunicode': array([u_str])}
})
case_table5.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
case_table5.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table5.append(
{'name': 'bool',
'classes': {'testbools': 'logical'},
'expected': {'testbools':
array([[True], [False]])},
})
case_table5_rt = case_table5[:]
# Inline functions can't be concatenated in matlab, so RT only
case_table5_rt.append(
{'name': 'objectarray',
'classes': {'testobjectarray': 'object'},
'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})
def types_compatible(var1, var2):
"""Check if types are same or compatible.
0-D numpy scalars are compatible with bare python scalars.
"""
type1 = type(var1)
type2 = type(var2)
if type1 is type2:
return True
if type1 is np.ndarray and var1.shape == ():
return type(var1.item()) is type2
if type2 is np.ndarray and var2.shape == ():
return type(var2.item()) is type1
return False
def _check_level(label, expected, actual):
""" Check one level of a potentially nested array """
if SP.issparse(expected): # allow different types of sparse matrices
assert_(SP.issparse(actual))
assert_array_almost_equal(actual.todense(),
expected.todense(),
err_msg=label,
decimal=5)
return
# Check types are as expected
assert_(types_compatible(expected, actual),
"Expected type %s, got %s at %s" %
(type(expected), type(actual), label))
# A field in a record array may not be an ndarray
# A scalar from a record array will be type np.void
if not isinstance(expected,
(np.void, np.ndarray, MatlabObject)):
assert_equal(expected, actual)
return
# This is an ndarray-like thing
assert_(expected.shape == actual.shape,
msg='Expected shape %s, got %s at %s' % (expected.shape,
actual.shape,
label))
ex_dtype = expected.dtype
if ex_dtype.hasobject: # array of objects
if isinstance(expected, MatlabObject):
assert_equal(expected.classname, actual.classname)
for i, ev in enumerate(expected):
level_label = "%s, [%d], " % (label, i)
_check_level(level_label, ev, actual[i])
return
if ex_dtype.fields: # probably recarray
for fn in ex_dtype.fields:
level_label = "%s, field %s, " % (label, fn)
_check_level(level_label,
expected[fn], actual[fn])
return
if ex_dtype.type in (text_type, # string or bool
np.unicode_,
np.bool_):
assert_equal(actual, expected, err_msg=label)
return
# Something numeric
assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)
def _load_check_case(name, files, case):
for file_name in files:
matdict = loadmat(file_name, struct_as_record=True)
label = "test %s; file %s" % (name, file_name)
for k, expected in case.items():
k_label = "%s, variable %s" % (label, k)
assert_(k in matdict, "Missing key at %s" % k_label)
_check_level(k_label, expected, matdict[k])
def _whos_check_case(name, files, case, classes):
for file_name in files:
label = "test %s; file %s" % (name, file_name)
whos = whosmat(file_name)
expected_whos = []
for k, expected in case.items():
expected_whos.append((k, expected.shape, classes[k]))
whos.sort()
expected_whos.sort()
assert_equal(whos, expected_whos,
"%s: %r != %r" % (label, whos, expected_whos)
)
# Round trip tests
def _rt_check_case(name, expected, format):
mat_stream = BytesIO()
savemat(mat_stream, expected, format=format)
mat_stream.seek(0)
_load_check_case(name, [mat_stream], expected)
# generator for load tests
def test_load():
for case in case_table4 + case_table5:
name = case['name']
expected = case['expected']
filt = pjoin(test_data_path, 'test%s_*.mat' % name)
files = glob(filt)
assert_(len(files) > 0,
"No files for test %s using filter %s" % (name, filt))
yield _load_check_case, name, files, expected
# generator for whos tests
def test_whos():
for case in case_table4 + case_table5:
name = case['name']
expected = case['expected']
classes = case['classes']
filt = pjoin(test_data_path, 'test%s_*.mat' % name)
files = glob(filt)
assert_(len(files) > 0,
"No files for test %s using filter %s" % (name, filt))
yield _whos_check_case, name, files, expected, classes
# generator for round trip tests
def test_round_trip():
for case in case_table4 + case_table5_rt:
case_table4_names = [case['name'] for case in case_table4]
name = case['name'] + '_round_trip'
expected = case['expected']
for format in (['4', '5'] if case['name'] in case_table4_names else ['5']):
yield _rt_check_case, name, expected, format
def test_gzip_simple():
xdense = np.zeros((20,20))
xdense[2,3] = 2.3
xdense[4,5] = 4.5
x = SP.csc_matrix(xdense)
name = 'gzip_test'
expected = {'x':x}
format = '4'
tmpdir = mkdtemp()
try:
fname = pjoin(tmpdir,name)
mat_stream = gzip.open(fname,mode='wb')
savemat(mat_stream, expected, format=format)
mat_stream.close()
mat_stream = gzip.open(fname,mode='rb')
actual = loadmat(mat_stream, struct_as_record=True)
mat_stream.close()
finally:
shutil.rmtree(tmpdir)
assert_array_almost_equal(actual['x'].todense(),
expected['x'].todense(),
err_msg=repr(actual))
def test_multiple_open():
# Ticket #1039, on Windows: check that files are not left open
tmpdir = mkdtemp()
try:
x = dict(x=np.zeros((2, 2)))
fname = pjoin(tmpdir, "a.mat")
# Check that file is not left open
savemat(fname, x)
os.unlink(fname)
savemat(fname, x)
loadmat(fname)
os.unlink(fname)
# Check that stream is left open
f = open(fname, 'wb')
savemat(f, x)
f.seek(0)
f.close()
f = open(fname, 'rb')
loadmat(f)
f.seek(0)
f.close()
finally:
shutil.rmtree(tmpdir)
def test_mat73():
# Check any hdf5 files raise an error
filenames = glob(
pjoin(test_data_path, 'testhdf5*.mat'))
assert_(len(filenames) > 0)
for filename in filenames:
fp = open(filename, 'rb')
assert_raises(NotImplementedError,
loadmat,
fp,
struct_as_record=True)
fp.close()
def test_warnings():
# This test is an echo of the previous behavior, which was to raise a
# warning if the user triggered a search for mat files on the Python system
# path. We can remove the test in the next version after upcoming (0.13)
fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')
with warnings.catch_warnings():
warnings.simplefilter('error')
# This should not generate a warning
mres = loadmat(fname, struct_as_record=True)
# This neither
mres = loadmat(fname, struct_as_record=False)
def test_regression_653():
assert_raises(TypeError, savemat, BytesIO(), {'d':{1:2}}, format='5')
def test_structname_len():
# Test limit for length of field names in structs
lim = 31
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5')
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5')
def test_4_and_long_field_names_incompatible():
# Long field names option not supported in 4
my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])
assert_raises(ValueError, savemat, BytesIO(),
{'my_struct':my_struct}, format='4', long_field_names=True)
def test_long_field_names():
# Test limit for length of field names in structs
lim = 63
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5',long_field_names=True)
def test_long_field_names_in_struct():
# Regression test - long_field_names was erased if you passed a struct
# within a struct
lim = 63
fldname = 'a' * lim
cell = np.ndarray((1,2),dtype=object)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
cell[0,0] = st1
cell[0,1] = st1
savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)
#
# Check to make sure it fails with long field names off
#
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': cell}, format='5', long_field_names=False)
def test_cell_with_one_thing_in_it():
# Regression test - make a cell array that's 1 x 2 and put two
# strings in it. It works. Make a cell array that's 1 x 1 and put
# a string in it. It should work but, in the old days, it didn't.
cells = np.ndarray((1,2),dtype=object)
cells[0,0] = 'Hello'
cells[0,1] = 'World'
savemat(BytesIO(), {'x': cells}, format='5')
cells = np.ndarray((1,1),dtype=object)
cells[0,0] = 'Hello, world'
savemat(BytesIO(), {'x': cells}, format='5')
def test_writer_properties():
# Tests getting, setting of properties of matrix writer
mfw = MatFile5Writer(BytesIO())
yield assert_equal, mfw.global_vars, []
mfw.global_vars = ['avar']
yield assert_equal, mfw.global_vars, ['avar']
yield assert_equal, mfw.unicode_strings, False
mfw.unicode_strings = True
yield assert_equal, mfw.unicode_strings, True
yield assert_equal, mfw.long_field_names, False
mfw.long_field_names = True
yield assert_equal, mfw.long_field_names, True
def test_use_small_element():
# Test whether we're using small data element or not
sio = BytesIO()
wtr = MatFile5Writer(sio)
# First check size for no sde for name
arr = np.zeros(10)
wtr.put_variables({'aaaaa': arr})
w_sz = len(sio.getvalue())
# Check small name results in largish difference in size
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaa': arr})
yield assert_, w_sz - len(sio.getvalue()) > 4
# Whereas increasing name size makes less difference
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaaaa': arr})
yield assert_, len(sio.getvalue()) - w_sz < 4
def test_save_dict():
# Test that dict can be saved (as recarray), loaded as matstruct
dict_types = ((dict, False),)
try:
from collections import OrderedDict
except ImportError:
pass
else:
dict_types += ((OrderedDict, True),)
ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
for dict_type, is_ordered in dict_types:
# Initialize with tuples to keep order for OrderedDict
d = dict_type([('a', 1), ('b', 2)])
stream = BytesIO()
savemat(stream, {'dict': d})
stream.seek(0)
vals = loadmat(stream)['dict']
assert_equal(set(vals.dtype.names), set(['a', 'b']))
if is_ordered: # Input was ordered, output in ab order
assert_array_equal(vals, ab_exp)
else: # Not ordered input, either order output
if vals.dtype.names[0] == 'a':
assert_array_equal(vals, ab_exp)
else:
assert_array_equal(vals, ba_exp)
def test_1d_shape():
# New 5 behavior is 1D -> row vector
arr = np.arange(5)
for format in ('4', '5'):
# Column is the default
stream = BytesIO()
savemat(stream, {'oned': arr}, format=format)
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1, 5))
# can be explicitly 'column' for oned_as
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='column')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (5,1))
# but different from 'row'
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='row')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1,5))
def test_compression():
arr = np.zeros(100).reshape((5,20))
arr[2,10] = 1
stream = BytesIO()
savemat(stream, {'arr':arr})
raw_len = len(stream.getvalue())
vals = loadmat(stream)
yield assert_array_equal, vals['arr'], arr
stream = BytesIO()
savemat(stream, {'arr':arr}, do_compression=True)
compressed_len = len(stream.getvalue())
vals = loadmat(stream)
yield assert_array_equal, vals['arr'], arr
yield assert_, raw_len > compressed_len
# Concatenate, test later
arr2 = arr.copy()
arr2[0,0] = 1
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)
vals = loadmat(stream)
yield assert_array_equal, vals['arr2'], arr2
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)
vals = loadmat(stream)
yield assert_array_equal, vals['arr2'], arr2
def test_single_object():
stream = BytesIO()
savemat(stream, {'A':np.array(1, dtype=object)})
def test_skip_variable():
# Test skipping over the first of two variables in a MAT file
# using mat_reader_factory and put_variables to read them in.
#
# This is a regression test of a problem that's caused by
# using the compressed file reader seek instead of the raw file
# I/O seek when skipping over a compressed chunk.
#
# The problem arises when the chunk is large: this file has
# a 256x256 array of random (uncompressible) doubles.
#
filename = pjoin(test_data_path,'test_skip_variable.mat')
#
# Prove that it loads with loadmat
#
d = loadmat(filename, struct_as_record=True)
yield assert_, 'first' in d
yield assert_, 'second' in d
#
# Make the factory
#
factory = mat_reader_factory(filename, struct_as_record=True)
#
# This is where the factory breaks with an error in MatMatrixGetter.to_next
#
d = factory.get_variables('second')
yield assert_, 'second' in d
factory.mat_stream.close()
def test_empty_struct():
# ticket 885
filename = pjoin(test_data_path,'test_empty_struct.mat')
# before ticket fix, this would crash with ValueError, empty data
# type
d = loadmat(filename, struct_as_record=True)
a = d['a']
assert_equal(a.shape, (1,1))
assert_equal(a.dtype, np.dtype(np.object))
assert_(a[0,0] is None)
stream = BytesIO()
arr = np.array((), dtype='U')
# before ticket fix, this used to give data type not understood
savemat(stream, {'arr':arr})
d = loadmat(stream)
a2 = d['arr']
assert_array_equal(a2, arr)
def test_recarray():
# check roundtrip of structured array
dt = [('f1', 'f8'),
('f2', 'S10')]
arr = np.zeros((2,), dtype=dt)
arr[0]['f1'] = 0.5
arr[0]['f2'] = 'python'
arr[1]['f1'] = 99
arr[1]['f2'] = 'not perl'
stream = BytesIO()
savemat(stream, {'arr': arr})
d = loadmat(stream, struct_as_record=False)
a20 = d['arr'][0,0]
yield assert_equal, a20.f1, 0.5
yield assert_equal, a20.f2, 'python'
d = loadmat(stream, struct_as_record=True)
a20 = d['arr'][0,0]
yield assert_equal, a20['f1'], 0.5
yield assert_equal, a20['f2'], 'python'
# structs always come back as object types
yield assert_equal, a20.dtype, np.dtype([('f1', 'O'),
('f2', 'O')])
a21 = d['arr'].flat[1]
yield assert_equal, a21['f1'], 99
yield assert_equal, a21['f2'], 'not perl'
def test_save_object():
class C(object):
pass
c = C()
c.field1 = 1
c.field2 = 'a string'
stream = BytesIO()
savemat(stream, {'c': c})
d = loadmat(stream, struct_as_record=False)
c2 = d['c'][0,0]
assert_equal(c2.field1, 1)
assert_equal(c2.field2, 'a string')
d = loadmat(stream, struct_as_record=True)
c2 = d['c'][0,0]
assert_equal(c2['field1'], 1)
assert_equal(c2['field2'], 'a string')
def test_read_opts():
# tests if read is seeing option sets, at initialization and after
# initialization
arr = np.arange(6).reshape(1,6)
stream = BytesIO()
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
back_dict = rdr.get_variables()
rarr = back_dict['a']
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, squeeze_me=True)
assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
rdr.squeeze_me = False
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, byte_order=boc.native_code)
assert_array_equal(rdr.get_variables()['a'], arr)
# inverted byte code leads to error on read because of swapped
# header etc
rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
assert_raises(Exception, rdr.get_variables)
rdr.byte_order = boc.native_code
assert_array_equal(rdr.get_variables()['a'], arr)
arr = np.array(['a string'])
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
assert_array_equal(rdr.get_variables()['a'], arr)
rdr = MatFile5Reader(stream, chars_as_strings=False)
carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
assert_array_equal(rdr.get_variables()['a'], carr)
rdr.chars_as_strings = True
assert_array_equal(rdr.get_variables()['a'], arr)
def test_empty_string():
# make sure reading empty string does not raise error
estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
fp = open(estring_fname, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['a'], np.array([], dtype='U1'))
# empty string round trip. Matlab cannot distiguish
# between a string array that is empty, and a string array
# containing a single empty string, because it stores strings as
# arrays of char. There is no way of having an array of char that
# is not empty, but contains an empty string.
stream = BytesIO()
savemat(stream, {'a': np.array([''])})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': np.array([], dtype='U1')})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.close()
def test_corrupted_data():
import zlib
for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),
(zlib.error, 'corrupted_zlib_checksum.mat')]:
with open(pjoin(test_data_path, fname), 'rb') as fp:
rdr = MatFile5Reader(fp)
assert_raises(exc, rdr.get_variables)
def test_corrupted_data_check_can_be_disabled():
with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:
rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)
rdr.get_variables()
def test_read_both_endian():
# make sure big- and little- endian data is read correctly
for fname in ('big_endian.mat', 'little_endian.mat'):
fp = open(pjoin(test_data_path, fname), 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['strings'],
np.array([['hello'],
['world']], dtype=np.object))
assert_array_equal(d['floats'],
np.array([[2., 3.],
[3., 4.]], dtype=np.float32))
def test_write_opposite_endian():
# We don't support writing opposite endian .mat files, but we need to behave
# correctly if the user supplies an other-endian numpy array to write out
float_arr = np.array([[2., 3.],
[3., 4.]])
int_arr = np.arange(6).reshape((2, 3))
uni_arr = np.array(['hello', 'world'], dtype='U')
stream = BytesIO()
savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),
'ints': int_arr.byteswap().newbyteorder(),
'uni_arr': uni_arr.byteswap().newbyteorder()})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['floats'], float_arr)
assert_array_equal(d['ints'], int_arr)
assert_array_equal(d['uni_arr'], uni_arr)
stream.close()
def test_logical_array():
# The roundtrip test doesn't verify that we load the data up with the
# correct (bool) dtype
with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:
rdr = MatFile5Reader(fobj, mat_dtype=True)
d = rdr.get_variables()
x = np.array([[True], [False]], dtype=np.bool_)
assert_array_equal(d['testbools'], x)
assert_equal(d['testbools'].dtype, x.dtype)
def test_logical_out_type():
# Confirm that bool type written as uint8, uint8 class
# See gh-4022
stream = BytesIO()
barr = np.array([False, True, False])
savemat(stream, {'barray': barr})
stream.seek(0)
reader = MatFile5Reader(stream)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)
assert_equal(hdr.is_logical, True)
var = reader.read_var_array(hdr, False)
assert_equal(var.dtype.type, np.uint8)
def test_mat4_3d():
# test behavior when writing 3D arrays to matlab 4 files
stream = BytesIO()
arr = np.arange(24).reshape((2,3,4))
assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')
def test_func_read():
func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_(isinstance(d['testfunc'], MatlabFunction))
stream = BytesIO()
wtr = MatFile5Writer(stream)
assert_raises(MatWriteError, wtr.put_variables, d)
def test_mat_dtype():
double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=False)
d = rdr.get_variables()
fp.close()
yield assert_equal, d['testmatrix'].dtype.kind, 'u'
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=True)
d = rdr.get_variables()
fp.close()
yield assert_equal, d['testmatrix'].dtype.kind, 'f'
def test_sparse_in_struct():
# reproduces bug found by DC where Cython code was insisting on
# ndarray return type, but getting sparse matrix
st = {'sparsefield': SP.coo_matrix(np.eye(4))}
stream = BytesIO()
savemat(stream, {'a':st})
d = loadmat(stream, struct_as_record=True)
yield assert_array_equal, d['a'][0,0]['sparsefield'].todense(), np.eye(4)
def test_mat_struct_squeeze():
stream = BytesIO()
in_d = {'st':{'one':1, 'two':2}}
savemat(stream, in_d)
# no error without squeeze
out_d = loadmat(stream, struct_as_record=False)
# previous error was with squeeze, with mat_struct
out_d = loadmat(stream,
struct_as_record=False,
squeeze_me=True,
)
def test_scalar_squeeze():
stream = BytesIO()
in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}
savemat(stream, in_d)
out_d = loadmat(stream, squeeze_me=True)
assert_(isinstance(out_d['scalar'], float))
assert_(isinstance(out_d['string'], string_types))
assert_(isinstance(out_d['st'], np.ndarray))
def test_str_round():
# from report by Angus McMorland on mailing list 3 May 2010
stream = BytesIO()
in_arr = np.array(['Hello', 'Foob'])
out_arr = np.array(['Hello', 'Foob '])
savemat(stream, dict(a=in_arr))
res = loadmat(stream)
# resulted in ['HloolFoa', 'elWrdobr']
assert_array_equal(res['a'], out_arr)
stream.truncate(0)
stream.seek(0)
# Make Fortran ordered version of string
in_str = in_arr.tostring(order='F')
in_from_str = np.ndarray(shape=a.shape,
dtype=in_arr.dtype,
order='F',
buffer=in_str)
savemat(stream, dict(a=in_from_str))
assert_array_equal(res['a'], out_arr)
# unicode save did lead to buffer too small error
stream.truncate(0)
stream.seek(0)
in_arr_u = in_arr.astype('U')
out_arr_u = out_arr.astype('U')
savemat(stream, {'a': in_arr_u})
res = loadmat(stream)
assert_array_equal(res['a'], out_arr_u)
def test_fieldnames():
# Check that field names are as expected
stream = BytesIO()
savemat(stream, {'a': {'a':1, 'b':2}})
res = loadmat(stream)
field_names = res['a'].dtype.names
assert_equal(set(field_names), set(('a', 'b')))
def test_loadmat_varnames():
# Test that we can get just one variable from a mat file using loadmat
mat5_sys_names = ['__globals__',
'__header__',
'__version__']
for eg_file, sys_v_names in (
(pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(
test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):
vars = loadmat(eg_file)
assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names='a')
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['a'])
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['theta'])
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names=('theta',))
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vnames = ['theta']
vars = loadmat(eg_file, variable_names=vnames)
assert_equal(vnames, ['theta'])
def test_round_types():
# Check that saving, loading preserves dtype in most cases
arr = np.arange(10)
stream = BytesIO()
for dts in ('f8','f4','i8','i4','i2','i1',
'u8','u4','u2','u1','c16','c8'):
stream.truncate(0)
stream.seek(0) # needed for BytesIO in python 3
savemat(stream, {'arr': arr.astype(dts)})
vars = loadmat(stream)
assert_equal(np.dtype(dts), vars['arr'].dtype)
def test_varmats_from_mat():
# Make a mat file with several variables, write it, read it back
names_vars = (('arr', mlarr(np.arange(10))),
('mystr', mlarr('a string')),
('mynum', mlarr(10)))
# Dict like thing to give variables in defined order
class C(object):
def items(self):
return names_vars
stream = BytesIO()
savemat(stream, C())
varmats = varmats_from_mat(stream)
assert_equal(len(varmats), 3)
for i in range(3):
name, var_stream = varmats[i]
exp_name, exp_res = names_vars[i]
assert_equal(name, exp_name)
res = loadmat(var_stream)
assert_array_equal(res[name], exp_res)
def test_one_by_zero():
# Test 1x0 chars get read correctly
func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_equal(d['var'].shape, (0,))
def test_load_mat4_le():
# We were getting byte order wrong when reading little-endian floa64 dense
# matrices on big-endian platforms
mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')
vars = loadmat(mat4_fname)
assert_array_equal(vars['a'], [[0.1, 1.2]])
def test_unicode_mat4():
# Mat4 should save unicode as latin1
bio = BytesIO()
var = {'second_cat': u('Schrödinger')}
savemat(bio, var, format='4')
var_back = loadmat(bio)
assert_equal(var_back['second_cat'], var['second_cat'])
def test_empty_sparse():
# Can we read empty sparse matrices?
sio = BytesIO()
import scipy.sparse
empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])
savemat(sio, dict(x=empty_sparse))
sio.seek(0)
res = loadmat(sio)
assert_array_equal(res['x'].shape, empty_sparse.shape)
assert_array_equal(res['x'].todense(), 0)
# Do empty sparse matrices get written with max nnz 1?
# See https://github.com/scipy/scipy/issues/4208
sio.seek(0)
reader = MatFile5Reader(sio)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.nzmax, 1)
def test_empty_mat_error():
# Test we get a specific warning for an empty mat file
sio = BytesIO()
assert_raises(MatReadError, loadmat, sio)
if __name__ == "__main__":
run_module_suite()
|
jsilter/scipy
|
scipy/io/matlab/tests/test_mio.py
|
Python
|
bsd-3-clause
| 36,362
|
import os
import random
from datetime import datetime
from inspect import isclass
import warnings
import logging
from io import BytesIO
try:
from importlib import import_module
except ImportError:
# Compatibility with Python 2.6.
from django.utils.importlib import import_module
import django
from django.utils.timezone import now
from django.db import models
from django.db.models.signals import post_save
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.template.defaultfilters import slugify
from django.utils.encoding import force_text, smart_str, filepath_to_uri
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.core.validators import RegexValidator
from django.contrib.sites.models import Site
# Required PIL classes may or may not be available from the root namespace
# depending on the installation method used.
try:
import Image
import ImageFile
import ImageFilter
import ImageEnhance
except ImportError:
try:
from PIL import Image
from PIL import ImageFile
from PIL import ImageFilter
from PIL import ImageEnhance
except ImportError:
raise ImportError(
'Photologue was unable to import the Python Imaging Library. Please confirm it`s installed and available '
'on your current Python path.')
from sortedm2m.fields import SortedManyToManyField
from model_utils.managers import PassThroughManager
# attempt to load the django-tagging TagField from default location,
# otherwise we substitude a dummy TagField.
try:
from tagging.fields import TagField
tagfield_help_text = _('Separate tags with spaces, put quotes around multiple-word tags.')
except ImportError:
class TagField(models.CharField):
def __init__(self, **kwargs):
default_kwargs = {'max_length': 255, 'blank': True}
default_kwargs.update(kwargs)
super(TagField, self).__init__(**default_kwargs)
def get_internal_type(self):
return 'CharField'
tagfield_help_text = _('Django-tagging was not found, tags will be treated as plain text.')
# Tell South how to handle this custom field.
if django.VERSION[:2] < (1, 7):
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^photologue\.models\.TagField"])
from .utils import EXIF
from .utils.reflection import add_reflection
from .utils.watermark import apply_watermark
from .managers import GalleryQuerySet, PhotoQuerySet
logger = logging.getLogger('photologue.models')
# Default limit for gallery.latest
LATEST_LIMIT = getattr(settings, 'PHOTOLOGUE_GALLERY_LATEST_LIMIT', None)
# Number of random images from the gallery to display.
SAMPLE_SIZE = getattr(settings, 'PHOTOLOGUE_GALLERY_SAMPLE_SIZE', 5)
# max_length setting for the ImageModel ImageField
IMAGE_FIELD_MAX_LENGTH = getattr(settings, 'PHOTOLOGUE_IMAGE_FIELD_MAX_LENGTH', 100)
# Path to sample image
SAMPLE_IMAGE_PATH = getattr(settings, 'PHOTOLOGUE_SAMPLE_IMAGE_PATH', os.path.join(
os.path.dirname(__file__), 'res', 'sample.jpg'))
# Modify image file buffer size.
ImageFile.MAXBLOCK = getattr(settings, 'PHOTOLOGUE_MAXBLOCK', 256 * 2 ** 10)
# Photologue image path relative to media root
PHOTOLOGUE_DIR = getattr(settings, 'PHOTOLOGUE_DIR', 'photologue')
# Look for user function to define file paths
PHOTOLOGUE_PATH = getattr(settings, 'PHOTOLOGUE_PATH', None)
if PHOTOLOGUE_PATH is not None:
if callable(PHOTOLOGUE_PATH):
get_storage_path = PHOTOLOGUE_PATH
else:
parts = PHOTOLOGUE_PATH.split('.')
module_name = '.'.join(parts[:-1])
module = import_module(module_name)
get_storage_path = getattr(module, parts[-1])
else:
def get_storage_path(instance, filename):
return os.path.join(PHOTOLOGUE_DIR, 'photos', filename)
# Support CACHEDIR.TAG spec for backups for ignoring cache dir.
# See http://www.brynosaurus.com/cachedir/spec.html
PHOTOLOGUE_CACHEDIRTAG = os.path.join(PHOTOLOGUE_DIR, "photos", "cache", "CACHEDIR.TAG")
if not default_storage.exists(PHOTOLOGUE_CACHEDIRTAG):
default_storage.save(PHOTOLOGUE_CACHEDIRTAG, ContentFile(
"Signature: 8a477f597d28d172789f06886806bc55"))
# Orientation necessary mapped to EXIF data
IMAGE_EXIF_ORIENTATION_MAP = {
1: 0,
8: 2,
3: 3,
6: 4,
}
# Quality options for JPEG images
JPEG_QUALITY_CHOICES = (
(30, _('Very Low')),
(40, _('Low')),
(50, _('Medium-Low')),
(60, _('Medium')),
(70, _('Medium-High')),
(80, _('High')),
(90, _('Very High')),
)
# choices for new crop_anchor field in Photo
CROP_ANCHOR_CHOICES = (
('top', _('Top')),
('right', _('Right')),
('bottom', _('Bottom')),
('left', _('Left')),
('center', _('Center (Default)')),
)
IMAGE_TRANSPOSE_CHOICES = (
('FLIP_LEFT_RIGHT', _('Flip left to right')),
('FLIP_TOP_BOTTOM', _('Flip top to bottom')),
('ROTATE_90', _('Rotate 90 degrees counter-clockwise')),
('ROTATE_270', _('Rotate 90 degrees clockwise')),
('ROTATE_180', _('Rotate 180 degrees')),
)
WATERMARK_STYLE_CHOICES = (
('tile', _('Tile')),
('scale', _('Scale')),
)
# Prepare a list of image filters
filter_names = []
for n in dir(ImageFilter):
klass = getattr(ImageFilter, n)
if isclass(klass) and issubclass(klass, ImageFilter.BuiltinFilter) and \
hasattr(klass, 'name'):
filter_names.append(klass.__name__)
IMAGE_FILTERS_HELP_TEXT = _('Chain multiple filters using the following pattern "FILTER_ONE->FILTER_TWO->FILTER_THREE"'
'. Image filters will be applied in order. The following filters are available: %s.'
% (', '.join(filter_names)))
size_method_map = {}
@python_2_unicode_compatible
class Gallery(models.Model):
date_added = models.DateTimeField(_('date published'),
default=now)
title = models.CharField(_('title'),
max_length=50,
unique=True)
slug = models.SlugField(_('title slug'),
unique=True,
help_text=_('A "slug" is a unique URL-friendly title for an object.'))
description = models.TextField(_('description'),
blank=True)
is_public = models.BooleanField(_('is public'),
default=True,
help_text=_('Public galleries will be displayed '
'in the default views.'))
photos = SortedManyToManyField('Photo',
related_name='galleries',
verbose_name=_('photos'),
null=True,
blank=True)
tags = TagField(help_text=tagfield_help_text, verbose_name=_('tags'))
sites = models.ManyToManyField(Site, verbose_name=_(u'sites'),
blank=True, null=True)
objects = PassThroughManager.for_queryset_class(GalleryQuerySet)()
class Meta:
ordering = ['-date_added']
get_latest_by = 'date_added'
verbose_name = _('gallery')
verbose_name_plural = _('galleries')
def __str__(self):
return self.title
# My defs
def get_absolute_local_url(self):
return reverse('liz_details', args=[self.slug])
def get_absolute_url(self):
return reverse('photologue:pl-gallery', args=[self.slug])
def latest(self, limit=LATEST_LIMIT, public=True):
if not limit:
limit = self.photo_count()
if public:
return self.public()[:limit]
else:
return self.photos.filter(sites__id=settings.SITE_ID)[:limit]
def sample(self, count=None, public=True):
"""Return a sample of photos, ordered at random.
If the 'count' is not specified, it will return a number of photos
limited by the GALLERY_SAMPLE_SIZE setting.
"""
if not count:
count = SAMPLE_SIZE
if count > self.photo_count():
count = self.photo_count()
if public:
photo_set = self.public()
else:
photo_set = self.photos.filter(sites__id=settings.SITE_ID)
return random.sample(set(photo_set), count)
def photo_count(self, public=True):
"""Return a count of all the photos in this gallery."""
if public:
return self.public().count()
else:
return self.photos.filter(sites__id=settings.SITE_ID).count()
photo_count.short_description = _('count')
def public(self):
"""Return a queryset of all the public photos in this gallery."""
return self.photos.is_public().filter(sites__id=settings.SITE_ID)
def orphaned_photos(self):
"""
Return all photos that belong to this gallery but don't share the
gallery's site.
"""
return self.photos.filter(is_public=True)\
.exclude(sites__id__in=self.sites.all())
@property
def title_slug(self):
warnings.warn(
DeprecationWarning("`title_slug` field in Gallery is being renamed to `slug`. Update your code."))
return self.slug
class ImageModel(models.Model):
image = models.ImageField(_('image'),
max_length=IMAGE_FIELD_MAX_LENGTH,
upload_to=get_storage_path)
date_taken = models.DateTimeField(_('date taken'),
null=True,
blank=True,
editable=False)
view_count = models.PositiveIntegerField(_('view count'),
default=0,
editable=False)
crop_from = models.CharField(_('crop from'),
blank=True,
max_length=10,
default='center',
choices=CROP_ANCHOR_CHOICES)
effect = models.ForeignKey('PhotoEffect',
null=True,
blank=True,
related_name="%(class)s_related",
verbose_name=_('effect'))
class Meta:
abstract = True
@property
def EXIF(self):
try:
f = self.image.storage.open(self.image.name, 'rb')
tags = EXIF.process_file(f)
f.close()
return tags
except:
try:
f = self.image.storage.open(self.image.name, 'rb')
tags = EXIF.process_file(f, details=False)
f.close()
return tags
except:
return {}
def admin_thumbnail(self):
func = getattr(self, 'get_admin_thumbnail_url', None)
if func is None:
return _('An "admin_thumbnail" photo size has not been defined.')
else:
if hasattr(self, 'get_absolute_url'):
return u'<a href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), func())
else:
return u'<a href="%s"><img src="%s"></a>' % \
(self.image.url, func())
admin_thumbnail.short_description = _('Thumbnail')
admin_thumbnail.allow_tags = True
def cache_path(self):
return os.path.join(os.path.dirname(self.image.name), "cache")
def cache_url(self):
return '/'.join([os.path.dirname(self.image.url), "cache"])
def image_filename(self):
return os.path.basename(force_text(self.image.name))
def _get_filename_for_size(self, size):
size = getattr(size, 'name', size)
base, ext = os.path.splitext(self.image_filename())
return ''.join([base, '_', size, ext])
def _get_SIZE_photosize(self, size):
return PhotoSizeCache().sizes.get(size)
def _get_SIZE_size(self, size):
photosize = PhotoSizeCache().sizes.get(size)
if not self.size_exists(photosize):
self.create_size(photosize)
return Image.open(self.image.storage.open(
self._get_SIZE_filename(size))).size
def _get_SIZE_url(self, size):
photosize = PhotoSizeCache().sizes.get(size)
if not self.size_exists(photosize):
self.create_size(photosize)
if photosize.increment_count:
self.increment_count()
return '/'.join([
self.cache_url(),
filepath_to_uri(self._get_filename_for_size(photosize.name))])
def _get_SIZE_filename(self, size):
photosize = PhotoSizeCache().sizes.get(size)
return smart_str(os.path.join(self.cache_path(),
self._get_filename_for_size(photosize.name)))
def increment_count(self):
self.view_count += 1
models.Model.save(self)
def __getattr__(self, name):
global size_method_map
if not size_method_map:
init_size_method_map()
di = size_method_map.get(name, None)
if di is not None:
result = curry(getattr(self, di['base_name']), di['size'])
setattr(self, name, result)
return result
else:
raise AttributeError
def size_exists(self, photosize):
func = getattr(self, "get_%s_filename" % photosize.name, None)
if func is not None:
if self.image.storage.exists(func()):
return True
return False
def resize_image(self, im, photosize):
cur_width, cur_height = im.size
new_width, new_height = photosize.size
if photosize.crop:
ratio = max(float(new_width) / cur_width, float(new_height) / cur_height)
x = (cur_width * ratio)
y = (cur_height * ratio)
xd = abs(new_width - x)
yd = abs(new_height - y)
x_diff = int(xd / 2)
y_diff = int(yd / 2)
if self.crop_from == 'top':
box = (int(x_diff), 0, int(x_diff + new_width), new_height)
elif self.crop_from == 'left':
box = (0, int(y_diff), new_width, int(y_diff + new_height))
elif self.crop_from == 'bottom':
# y - yd = new_height
box = (int(x_diff), int(yd), int(x_diff + new_width), int(y))
elif self.crop_from == 'right':
# x - xd = new_width
box = (int(xd), int(y_diff), int(x), int(y_diff + new_height))
else:
box = (int(x_diff), int(y_diff), int(x_diff + new_width), int(y_diff + new_height))
im = im.resize((int(x), int(y)), Image.ANTIALIAS).crop(box)
else:
if not new_width == 0 and not new_height == 0:
ratio = min(float(new_width) / cur_width,
float(new_height) / cur_height)
else:
if new_width == 0:
ratio = float(new_height) / cur_height
else:
ratio = float(new_width) / cur_width
new_dimensions = (int(round(cur_width * ratio)),
int(round(cur_height * ratio)))
if new_dimensions[0] > cur_width or \
new_dimensions[1] > cur_height:
if not photosize.upscale:
return im
im = im.resize(new_dimensions, Image.ANTIALIAS)
return im
def create_size(self, photosize):
if self.size_exists(photosize):
return
try:
im = Image.open(self.image.storage.open(self.image.name))
except IOError:
return
# Save the original format
im_format = im.format
# Apply effect if found
if self.effect is not None:
im = self.effect.pre_process(im)
elif photosize.effect is not None:
im = photosize.effect.pre_process(im)
# Resize/crop image
if im.size != photosize.size and photosize.size != (0, 0):
im = self.resize_image(im, photosize)
# Rotate if found & necessary
if self.EXIF.get('Image Orientation', None) is not None:
im = im.transpose(IMAGE_EXIF_ORIENTATION_MAP[self.EXIF.get('Image Orientation', 1).values[0]])
# Apply watermark if found
if photosize.watermark is not None:
im = photosize.watermark.post_process(im)
# Apply effect if found
if self.effect is not None:
im = self.effect.post_process(im)
elif photosize.effect is not None:
im = photosize.effect.post_process(im)
# Save file
im_filename = getattr(self, "get_%s_filename" % photosize.name)()
try:
buffer = BytesIO()
if im_format != 'JPEG':
im.save(buffer, im_format)
else:
im.save(buffer, 'JPEG', quality=int(photosize.quality),
optimize=True)
buffer_contents = ContentFile(buffer.getvalue())
self.image.storage.save(im_filename, buffer_contents)
except IOError as e:
if self.image.storage.exists(im_filename):
self.image.storage.delete(im_filename)
raise e
def remove_size(self, photosize, remove_dirs=True):
if not self.size_exists(photosize):
return
filename = getattr(self, "get_%s_filename" % photosize.name)()
if self.image.storage.exists(filename):
self.image.storage.delete(filename)
def clear_cache(self):
cache = PhotoSizeCache()
for photosize in cache.sizes.values():
self.remove_size(photosize, False)
def pre_cache(self):
cache = PhotoSizeCache()
for photosize in cache.sizes.values():
if photosize.pre_cache:
self.create_size(photosize)
def save(self, *args, **kwargs):
if self.date_taken is None:
try:
exif_date = self.EXIF.get('EXIF DateTimeOriginal', None)
if exif_date is not None:
d, t = str.split(exif_date.values)
year, month, day = d.split(':')
hour, minute, second = t.split(':')
self.date_taken = datetime(int(year), int(month), int(day),
int(hour), int(minute), int(second))
except:
pass
if self.date_taken is None:
self.date_taken = now()
if self._get_pk_val():
self.clear_cache()
super(ImageModel, self).save(*args, **kwargs)
self.pre_cache()
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % \
(self._meta.object_name, self._meta.pk.attname)
self.clear_cache()
# Files associated to a FileField have to be manually deleted:
# https://docs.djangoproject.com/en/dev/releases/1.3/#deleting-a-model-doesn-t-delete-associated-files
# http://haineault.com/blog/147/
# The data loss scenarios mentioned in the docs hopefully do not apply
# to Photologue!
super(ImageModel, self).delete()
self.image.storage.delete(self.image.name)
@python_2_unicode_compatible
class Photo(ImageModel):
title = models.CharField(_('title'),
max_length=60,
unique=True)
slug = models.SlugField(_('slug'),
unique=True,
help_text=_('A "slug" is a unique URL-friendly title for an object.'))
caption = models.TextField(_('caption'),
blank=True)
date_added = models.DateTimeField(_('date added'),
default=now)
is_public = models.BooleanField(_('is public'),
default=True,
help_text=_('Public photographs will be displayed in the default views.'))
tags = TagField(help_text=tagfield_help_text, verbose_name=_('tags'))
sites = models.ManyToManyField(Site, verbose_name=_(u'sites'),
blank=True, null=True)
objects = PassThroughManager.for_queryset_class(PhotoQuerySet)()
class Meta:
ordering = ['-date_added']
get_latest_by = 'date_added'
verbose_name = _("photo")
verbose_name_plural = _("photos")
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if self.slug is None:
self.slug = slugify(self.title)
super(Photo, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('photologue:pl-photo', args=[self.slug])
def public_galleries(self):
"""Return the public galleries to which this photo belongs."""
return self.galleries.filter(is_public=True)
def get_previous_in_gallery(self, gallery):
"""Find the neighbour of this photo in the supplied gallery.
We assume that the gallery and all its photos are on the same site.
"""
if not self.is_public:
raise ValueError('Cannot determine neighbours of a non-public photo.')
photos = gallery.photos.is_public()
if self not in photos:
raise ValueError('Photo does not belong to gallery.')
previous = None
for photo in photos:
if photo == self:
return previous
previous = photo
def get_next_in_gallery(self, gallery):
"""Find the neighbour of this photo in the supplied gallery.
We assume that the gallery and all its photos are on the same site.
"""
if not self.is_public:
raise ValueError('Cannot determine neighbours of a non-public photo.')
photos = gallery.photos.is_public()
if self not in photos:
raise ValueError('Photo does not belong to gallery.')
matched = False
for photo in photos:
if matched:
return photo
if photo == self:
matched = True
return None
@property
def title_slug(self):
warnings.warn(
DeprecationWarning("`title_slug` field in Photo is being renamed to `slug`. Update your code."))
return self.slug
@python_2_unicode_compatible
class BaseEffect(models.Model):
name = models.CharField(_('name'),
max_length=30,
unique=True)
description = models.TextField(_('description'),
blank=True)
class Meta:
abstract = True
def sample_dir(self):
return os.path.join(PHOTOLOGUE_DIR, 'samples')
def sample_url(self):
return settings.MEDIA_URL + '/'.join([PHOTOLOGUE_DIR, 'samples', '%s %s.jpg' % (self.name.lower(), 'sample')])
def sample_filename(self):
return os.path.join(self.sample_dir(), '%s %s.jpg' % (self.name.lower(), 'sample'))
def create_sample(self):
try:
im = Image.open(SAMPLE_IMAGE_PATH)
except IOError:
raise IOError(
'Photologue was unable to open the sample image: %s.' % SAMPLE_IMAGE_PATH)
im = self.process(im)
buffer = BytesIO()
im.save(buffer, 'JPEG', quality=90, optimize=True)
buffer_contents = ContentFile(buffer.getvalue())
default_storage.save(self.sample_filename(), buffer_contents)
def admin_sample(self):
return u'<img src="%s">' % self.sample_url()
admin_sample.short_description = 'Sample'
admin_sample.allow_tags = True
def pre_process(self, im):
return im
def post_process(self, im):
return im
def process(self, im):
im = self.pre_process(im)
im = self.post_process(im)
return im
def __str__(self):
return self.name
def save(self, *args, **kwargs):
try:
default_storage.delete(self.sample_filename())
except:
pass
models.Model.save(self, *args, **kwargs)
self.create_sample()
for size in self.photo_sizes.all():
size.clear_cache()
# try to clear all related subclasses of ImageModel
for prop in [prop for prop in dir(self) if prop[-8:] == '_related']:
for obj in getattr(self, prop).all():
obj.clear_cache()
obj.pre_cache()
def delete(self):
try:
default_storage.delete(self.sample_filename())
except:
pass
models.Model.delete(self)
class PhotoEffect(BaseEffect):
""" A pre-defined effect to apply to photos """
transpose_method = models.CharField(_('rotate or flip'),
max_length=15,
blank=True,
choices=IMAGE_TRANSPOSE_CHOICES)
color = models.FloatField(_('color'),
default=1.0,
help_text=_('A factor of 0.0 gives a black and white image, a factor of 1.0 gives the '
'original image.'))
brightness = models.FloatField(_('brightness'),
default=1.0,
help_text=_('A factor of 0.0 gives a black image, a factor of 1.0 gives the '
'original image.'))
contrast = models.FloatField(_('contrast'),
default=1.0,
help_text=_('A factor of 0.0 gives a solid grey image, a factor of 1.0 gives the '
'original image.'))
sharpness = models.FloatField(_('sharpness'),
default=1.0,
help_text=_('A factor of 0.0 gives a blurred image, a factor of 1.0 gives the '
'original image.'))
filters = models.CharField(_('filters'),
max_length=200,
blank=True,
help_text=_(IMAGE_FILTERS_HELP_TEXT))
reflection_size = models.FloatField(_('size'),
default=0,
help_text=_('The height of the reflection as a percentage of the orignal '
'image. A factor of 0.0 adds no reflection, a factor of 1.0 adds a'
' reflection equal to the height of the orignal image.'))
reflection_strength = models.FloatField(_('strength'),
default=0.6,
help_text=_('The initial opacity of the reflection gradient.'))
background_color = models.CharField(_('color'),
max_length=7,
default="#FFFFFF",
help_text=_('The background color of the reflection gradient. Set this to '
'match the background color of your page.'))
class Meta:
verbose_name = _("photo effect")
verbose_name_plural = _("photo effects")
def pre_process(self, im):
if self.transpose_method != '':
method = getattr(Image, self.transpose_method)
im = im.transpose(method)
if im.mode != 'RGB' and im.mode != 'RGBA':
return im
for name in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
factor = getattr(self, name.lower())
if factor != 1.0:
im = getattr(ImageEnhance, name)(im).enhance(factor)
for name in self.filters.split('->'):
image_filter = getattr(ImageFilter, name.upper(), None)
if image_filter is not None:
try:
im = im.filter(image_filter)
except ValueError:
pass
return im
def post_process(self, im):
if self.reflection_size != 0.0:
im = add_reflection(im, bgcolor=self.background_color,
amount=self.reflection_size, opacity=self.reflection_strength)
return im
class Watermark(BaseEffect):
image = models.ImageField(_('image'),
upload_to=PHOTOLOGUE_DIR + "/watermarks")
style = models.CharField(_('style'),
max_length=5,
choices=WATERMARK_STYLE_CHOICES,
default='scale')
opacity = models.FloatField(_('opacity'),
default=1,
help_text=_("The opacity of the overlay."))
class Meta:
verbose_name = _('watermark')
verbose_name_plural = _('watermarks')
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." \
% (self._meta.object_name, self._meta.pk.attname)
super(Watermark, self).delete()
self.image.storage.delete(self.image.name)
def post_process(self, im):
mark = Image.open(self.image.storage.open(self.image.name))
return apply_watermark(im, mark, self.style, self.opacity)
@python_2_unicode_compatible
class PhotoSize(models.Model):
"""About the Photosize name: it's used to create get_PHOTOSIZE_url() methods,
so the name has to follow the same restrictions as any Python method name,
e.g. no spaces or non-ascii characters."""
name = models.CharField(_('name'),
max_length=40,
unique=True,
help_text=_(
'Photo size name should contain only letters, numbers and underscores. Examples: '
'"thumbnail", "display", "small", "main_page_widget".'),
validators=[RegexValidator(regex='^[a-z0-9_]+$',
message='Use only plain lowercase letters (ASCII), numbers and '
'underscores.'
)]
)
width = models.PositiveIntegerField(_('width'),
default=0,
help_text=_(
'If width is set to "0" the image will be scaled to the supplied height.'))
height = models.PositiveIntegerField(_('height'),
default=0,
help_text=_(
'If height is set to "0" the image will be scaled to the supplied width'))
quality = models.PositiveIntegerField(_('quality'),
choices=JPEG_QUALITY_CHOICES,
default=70,
help_text=_('JPEG image quality.'))
upscale = models.BooleanField(_('upscale images?'),
default=False,
help_text=_('If selected the image will be scaled up if necessary to fit the '
'supplied dimensions. Cropped sizes will be upscaled regardless of this '
'setting.')
)
crop = models.BooleanField(_('crop to fit?'),
default=False,
help_text=_('If selected the image will be scaled and cropped to fit the supplied '
'dimensions.'))
pre_cache = models.BooleanField(_('pre-cache?'),
default=False,
help_text=_('If selected this photo size will be pre-cached as photos are added.'))
increment_count = models.BooleanField(_('increment view count?'),
default=False,
help_text=_('If selected the image\'s "view_count" will be incremented when '
'this photo size is displayed.'))
effect = models.ForeignKey('PhotoEffect',
null=True,
blank=True,
related_name='photo_sizes',
verbose_name=_('photo effect'))
watermark = models.ForeignKey('Watermark',
null=True,
blank=True,
related_name='photo_sizes',
verbose_name=_('watermark image'))
class Meta:
ordering = ['width', 'height']
verbose_name = _('photo size')
verbose_name_plural = _('photo sizes')
def __str__(self):
return self.name
def clear_cache(self):
for cls in ImageModel.__subclasses__():
for obj in cls.objects.all():
obj.remove_size(self)
if self.pre_cache:
obj.create_size(self)
PhotoSizeCache().reset()
def clean(self):
if self.crop is True:
if self.width == 0 or self.height == 0:
raise ValidationError(
_("Can only crop photos if both width and height dimensions are set."))
def save(self, *args, **kwargs):
super(PhotoSize, self).save(*args, **kwargs)
PhotoSizeCache().reset()
self.clear_cache()
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." \
% (self._meta.object_name, self._meta.pk.attname)
self.clear_cache()
super(PhotoSize, self).delete()
def _get_size(self):
return (self.width, self.height)
def _set_size(self, value):
self.width, self.height = value
size = property(_get_size, _set_size)
class PhotoSizeCache(object):
__state = {"sizes": {}}
def __init__(self):
self.__dict__ = self.__state
if not len(self.sizes):
sizes = PhotoSize.objects.all()
for size in sizes:
self.sizes[size.name] = size
def reset(self):
global size_method_map
size_method_map = {}
self.sizes = {}
def init_size_method_map():
global size_method_map
for size in PhotoSizeCache().sizes.keys():
size_method_map['get_%s_size' % size] = \
{'base_name': '_get_SIZE_size', 'size': size}
size_method_map['get_%s_photosize' % size] = \
{'base_name': '_get_SIZE_photosize', 'size': size}
size_method_map['get_%s_url' % size] = \
{'base_name': '_get_SIZE_url', 'size': size}
size_method_map['get_%s_filename' % size] = \
{'base_name': '_get_SIZE_filename', 'size': size}
def add_default_site(instance, created, **kwargs):
"""
Called via Django's signals when an instance is created.
In case PHOTOLOGUE_MULTISITE is False, the current site (i.e.
``settings.SITE_ID``) will always be added to the site relations if none are
present.
"""
if not created:
return
if getattr(settings, 'PHOTOLOGUE_MULTISITE', False):
return
if instance.sites.exists():
return
instance.sites.add(Site.objects.get_current())
post_save.connect(add_default_site, sender=Gallery)
post_save.connect(add_default_site, sender=Photo)
|
anderdl/test3repo
|
photologue/models.py
|
Python
|
bsd-3-clause
| 36,475
|
"""
IT-specific Form helpers
"""
from django.newforms import ValidationError
from django.newforms.fields import Field, RegexField, Select, EMPTY_VALUES
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from django.contrib.localflavor.it.util import ssn_check_digit, vat_number_check_digit
import re
class ITZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a valid zip code.'),
}
def __init__(self, *args, **kwargs):
super(ITZipCodeField, self).__init__(r'^\d{5}$',
max_length=None, min_length=None, *args, **kwargs)
class ITRegionSelect(Select):
"""
A Select widget that uses a list of IT regions as its choices.
"""
def __init__(self, attrs=None):
from it_region import REGION_CHOICES
super(ITRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ITProvinceSelect(Select):
"""
A Select widget that uses a list of IT provinces as its choices.
"""
def __init__(self, attrs=None):
from it_province import PROVINCE_CHOICES
super(ITProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ITSocialSecurityNumberField(RegexField):
"""
A form field that validates Italian Social Security numbers (codice fiscale).
For reference see http://www.agenziaentrate.it/ and search for
'Informazioni sulla codificazione delle persone fisiche'.
"""
default_error_messages = {
'invalid': _(u'Enter a valid Social Security number.'),
}
def __init__(self, *args, **kwargs):
super(ITSocialSecurityNumberField, self).__init__(r'^\w{3}\s*\w{3}\s*\w{5}\s*\w{5}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
value = super(ITSocialSecurityNumberField, self).clean(value)
if value == u'':
return value
value = re.sub('\s', u'', value).upper()
try:
check_digit = ssn_check_digit(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not value[15] == check_digit:
raise ValidationError(self.error_messages['invalid'])
return value
class ITVatNumberField(Field):
"""
A form field that validates Italian VAT numbers (partita IVA).
"""
default_error_messages = {
'invalid': _(u'Enter a valid VAT number.'),
}
def clean(self, value):
value = super(ITVatNumberField, self).clean(value)
if value == u'':
return value
try:
vat_number = int(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
vat_number = str(vat_number).zfill(11)
check_digit = vat_number_check_digit(vat_number[0:10])
if not vat_number[10] == check_digit:
raise ValidationError(self.error_messages['invalid'])
return smart_unicode(vat_number)
|
diofeher/django-nfa
|
django/contrib/localflavor/it/forms.py
|
Python
|
bsd-3-clause
| 2,985
|
from pressgang.actions.exceptions import ActionError
class BlogAdditionError(ActionError):
"""An error that occurred during the adding of sub-blogs to a blog."""
pass
|
oberlin/pressgang
|
pressgang/actions/install/addblogs/exceptions.py
|
Python
|
bsd-3-clause
| 171
|
#!/usr/bin/env python
# Copyright 2017 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script creates a qemu image manifest for Fuchsia that contains the
# Dart tree. In particular in contains Dart's test suite, and test harness.
import argparse
import json
import os
import sys
import utils
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
FUCHSIA_ROOT = os.path.realpath(os.path.join(DART_ROOT, '..', '..'))
FUCHSIA_TEST_MANIFEST_PREFIX = os.path.join('test', 'dart')
EXCLUDE_DIRS = ['.git', 'out', '.jiri']
BINARY_FILES = ['dart', 'run_vm_tests', 'process_test']
def parse_args(args):
args = args[1:]
parser = argparse.ArgumentParser(
description='A script that generates Dart/Fuchsia test commands.')
parser.add_argument(
'--arch',
'-a',
type=str,
help='Target architectures (comma-separated).',
metavar='[x64]',
default='x64')
parser.add_argument(
'--mode',
'-m',
type=str,
help='Build variant',
metavar='[debug,release]',
default='debug')
parser.add_argument(
'--output', '-o', type=str, help='Path to output file prefix.')
parser.add_argument(
"-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
return parser.parse_args(args)
def fuchsia_arch(arch):
if arch is 'x64':
return 'x86-64'
return None
def main(argv):
args = parse_args(argv)
manifest_output = args.output + '.manifest'
with open(manifest_output, 'w') as manifest:
# Write the Dart tree.
for root, dirs, files in os.walk(DART_ROOT):
dirs[:] = [d for d in dirs if d not in EXCLUDE_DIRS]
for file in files:
filepath = os.path.join(root, file)
relpath = filepath[len(DART_ROOT) + 1:]
fuchsiapath = os.path.join(FUCHSIA_TEST_MANIFEST_PREFIX,
relpath)
manifest.write(
'%s=%s\n' % (fuchsiapath, os.path.join(root, file)))
dart_conf = utils.GetBuildConf(args.mode, args.arch)
dart_out = os.path.join(FUCHSIA_TEST_MANIFEST_PREFIX, 'out', dart_conf)
fuchsia_conf = '%s-%s' % (args.mode, fuchsia_arch(args.arch))
fuchsia_out = os.path.join(FUCHSIA_ROOT, 'out', fuchsia_conf)
for file in BINARY_FILES:
manifest.write('%s=%s\n' % (os.path.join(dart_out, file),
os.path.join(fuchsia_out, file)))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
dartino/dart-sdk
|
tools/gen_fuchsia_test_manifest.py
|
Python
|
bsd-3-clause
| 2,795
|
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epoll', 'poll', 'poll-cv']
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
elif self.platform == 'windows':
self._use_cmake = False
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
try:
cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
timeout_scaling = 1
if polling_strategy == 'poll-cv':
timeout_scaling *= 5
if polling_strategy in target.get('excluded_poll_engines', []):
continue
# Scale overall test timeout if running under various sanitizers.
config = self.args.config
if ('asan' in config
or config == 'msan'
or config == 'tsan'
or config == 'ubsan'
or config == 'helgrind'
or config == 'memcheck'):
timeout_scaling *= 20
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
if self._use_cmake:
binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
else:
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
out.append(self.config.job_spec(cmdline,
shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline,
shortname=' '.join(
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target]
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self._use_cmake:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.4':
return ('wheezy', self._gcc_make_options(version_suffix='-4.4'))
elif compiler == 'gcc4.6':
return ('wheezy', self._gcc_make_options(version_suffix='-4.6'))
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6',
'node7', 'electron1.3'])
if args.iomgr_platform == "uv":
self.use_uv = True
else:
self.use_uv = False
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '7'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
run_script = 'run_node'
if self.runtime == 'electron':
run_script += '_electron'
return [self.config.job_spec(['tools/run_tests/helper_scripts/{}.sh'.format(run_script),
self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
build_script = 'pre_build_node'
if self.runtime == 'electron':
build_script += '_electron'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
if self.config == 'dbg':
config_flag = '--debug'
else:
config_flag = '--release'
return [['tools\\run_tests\\helper_scripts\\build_node.bat',
'--grpc_uv={}'.format('true' if self.use_uv else 'false'),
config_flag]]
else:
build_script = 'build_node'
if self.runtime == 'electron':
build_script += '_electron'
# building for electron requires a patch version
self.node_version += '.0'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version,
'--grpc_uv={}'.format('true' if self.use_uv else 'false')]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(collections.namedtuple('PythonConfig', [
'name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
return 'pyenv' if self.args.compiler in ['python3.5', 'python3.6'] else 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
minor='7', bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(name='py34', major='3',
minor='4', bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35', major='3',
minor='5', bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36', major='3',
minor='6', bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy', major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3', major='3',
config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64' if self.args.compiler == 'coreclr' else 'Win32'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
if self.platform == 'linux' and self.args.compiler == 'coreclr':
self._docker_distro = 'coreclr'
else:
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['CFLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
nunit_args += ['--noresult', '--workers=1']
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.args.compiler == 'coreclr':
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp_coreclr.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp_coreclr.sh']]
else:
if self.platform == 'windows':
return [['vsprojects\\build_vs2015.bat',
'src/csharp/Grpc.sln',
'/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
timeout_seconds=30*60,
shortname='objc-examples-build',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ={'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=30*60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
class NodeExpressLanguage(object):
"""Dummy Node express test target to enable running express performance
benchmarks"""
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
return []
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node_express'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'node_express': NodeExpressLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3',
'node0.12', 'node4', 'node5', 'node6', 'node7',
'electron1.3',
'coreclr',
'cmake'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but dont run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print('WARNING: may need to regenerate projects, but since we are not on')
print(' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
language_make_options = next(iter(languages)).make_options()
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
if makefile.startswith('cmake/build/'):
return [jobset.JobSpec(['cmake', '--build', '.',
'--target', '%s' % target,
'--config', _MSBUILD_CONFIG[cfg]],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets]
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-j', '%d' % args.jobs] +
targets,
cwd='cmake/build',
timeout_seconds=None)]
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent/100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
a11r/grpc
|
tools/run_tests/run_tests.py
|
Python
|
bsd-3-clause
| 55,359
|
from numpy import arange, pi, zeros, exp
from scipy import *
from spectralTransform import specTrans2d;
import inversion;
class specDiffusion(object):
def __init__(self, numPointsX, numPointsY,alpha, nu,\
order = 8., length = 2*pi, xType='Fourier', yType='Fourier'):
# nu is the higher "order" dissipation coefficient
# alpha is the linear drag
self.xn = numPointsX;
self.yn = numPointsY;
self.xType = xType;
self.yType = yType;
self.alpha = alpha;
self.nu = nu;
self.order = order;
self.trans = specTrans2d(numPointsX, numPointsY, xType, yType);
#Prepare the wavenumber arrays
self.kxx = (2*pi/length)*concatenate((arange(0,numPointsX/2),arange(-numPointsX/2,0)));
self.kyy = (2*pi/length)*concatenate((arange(0,numPointsY/2),arange(-numPointsY/2,0)));
def diffusionFn(self, dt, field):
[kx,ky] = meshgrid(self.kxx,self.kyy);
self.trans.fwdTrans(field);
temp = self.trans.intArr;
temp *= exp(-(self.nu*(kx**self.order+ky**self.order) + self.alpha) *
dt);
temp[sqrt(kx**2+ky**2) > min(self.xn, self.yn)/3.] = 0;
self.trans.invTrans();
return self.trans.outArr.real.copy();
|
JoyMonteiro/parSpectral
|
pspec/diffusion.py
|
Python
|
bsd-3-clause
| 1,310
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import re
import tablemanager.models
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ForeignServer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.SlugField(unique=True, max_length=255)),
('user', models.CharField(max_length=320)),
('password', models.CharField(max_length=320)),
('sql', tablemanager.models.SQLField(default="CREATE SERVER {{self.name}} FOREIGN DATA WRAPPER oracle_fdw OPTIONS (dbserver '//<hostname>/<sid>');")),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ForeignTable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.SlugField(unique=True, max_length=255)),
('sql', tablemanager.models.SQLField(default="CREATE FOREIGN TABLE {{self.name}} () SERVER {{self.server.name}} OPTIONS (schema '<schema>', table '<table>');")),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('server', models.ForeignKey(to='tablemanager.ForeignServer')),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Input',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('job_batch_id', models.CharField(max_length=64, null=True, editable=False)),
('job_id', models.IntegerField(null=True, editable=False, db_index=True)),
('job_state', models.CharField(max_length=64, null=True, editable=False)),
('job_status', models.NullBooleanField(editable=False)),
('job_message', models.TextField(null=True, editable=False)),
('job_run_time', models.DateTimeField(null=True, editable=False)),
('name', models.SlugField(help_text='Name of table in harvest DB', unique=True, max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('generate_rowid', models.BooleanField(default=False, help_text="If true, a _rowid column will be generated with row data's hash value")),
('source', tablemanager.models.XMLField(default='<OGRVRTDataSource>\n <OGRVRTLayer name="tablename">\n <SrcDataSource>PG:dbname=databasename host=\'addr\' port=\'5432\' user=\'x\' password=\'y\'</SrcDataSource>\n </OGRVRTLayer>\n</OGRVRTDataSource>', help_text='GDAL VRT definition in xml', unique=True)),
('info', models.TextField(editable=False)),
('spatial_type', models.IntegerField(default=1, editable=False)),
('create_table_sql', models.TextField(null=True, editable=False)),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('data_source', models.ForeignKey(to='tablemanager.DataSource')),
('foreign_table', models.ForeignKey(blank=True, to='tablemanager.ForeignTable', help_text='Foreign table to update VRT from', null=True)),
],
options={
'ordering': ['data_source', 'name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Normalise',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('job_batch_id', models.CharField(max_length=64, null=True, editable=False)),
('job_id', models.IntegerField(null=True, editable=False, db_index=True)),
('job_state', models.CharField(max_length=64, null=True, editable=False)),
('job_status', models.NullBooleanField(editable=False)),
('job_message', models.TextField(null=True, editable=False)),
('job_run_time', models.DateTimeField(null=True, editable=False)),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('name', models.CharField(unique=True, max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('sql', tablemanager.models.SQLField(default='CREATE FUNCTION {{trans_schema}}.{{self.func_name}}() RETURNS SETOF {{normal_schema}}.{{self.output_table.name}} as $$\nBEGIN\n RETURN QUERY SELECT * FROM {{input_schema}}.{{self.input_table.name}};\nEND;\n$$ LANGUAGE plpgsql;')),
('input_table', models.ForeignKey(to='tablemanager.Input')),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Normalise_NormalTable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NormalTable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('create_sql', tablemanager.models.SQLField(default='CREATE TABLE {{self.name}} (name varchar(32) unique);')),
('priority', models.PositiveIntegerField(default=1000)),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('normalise', models.OneToOneField(null=True, editable=False, to='tablemanager.Normalise')),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Publish',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('job_batch_id', models.CharField(max_length=64, null=True, editable=False)),
('job_id', models.IntegerField(null=True, editable=False, db_index=True)),
('job_state', models.CharField(max_length=64, null=True, editable=False)),
('job_status', models.NullBooleanField(editable=False)),
('job_message', models.TextField(null=True, editable=False)),
('job_run_time', models.DateTimeField(null=True, editable=False)),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('name', models.CharField(db_index=True, max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('interval', models.CharField(default=b'Weekly', max_length=64, choices=[(b'Manually', b'Manually'), (b'Hourly', b'Hourly'), (b'Daily', b'Daily'), (b'Weekly', b'Weekly'), (b'Monthly', b'Monthly')])),
('status', models.CharField(default=b'Enabled', max_length=32, choices=[(b'Enabled', b'Enabled'), (b'Disabled', b'Disabled')])),
('sql', tablemanager.models.SQLField(default='CREATE FUNCTION {{trans_schema}}.{{self.func_name}}() RETURNS SETOF {{input_table_schema}}.{{input_table_name}} as $$\nBEGIN\n RETURN QUERY SELECT * FROM {{input_table_schema}}.{{input_table_name}};\nEND;\n$$ LANGUAGE plpgsql;')),
('spatial_type', models.IntegerField(default=1, editable=False)),
('create_extra_index_sql', tablemanager.models.SQLField(null=True, blank=True)),
('priority', models.PositiveIntegerField(default=1000)),
('pgdump_file', models.FileField(null=True, editable=False)),
('style_file', models.FileField(null=True, editable=False)),
('create_table_sql', tablemanager.models.SQLField(null=True, editable=False)),
('running', models.PositiveIntegerField(default=0, editable=False)),
('completed', models.PositiveIntegerField(default=0, editable=False)),
('failed', models.PositiveIntegerField(default=0, editable=False)),
('waiting', models.PositiveIntegerField(default=0, editable=False)),
('job_create_time', models.DateTimeField(null=True, editable=False)),
('job_start_time', models.DateTimeField(null=True, editable=False)),
('job_end_time', models.DateTimeField(null=True, editable=False)),
('input_table', models.ForeignKey(blank=True, to='tablemanager.Input', null=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Publish_NormalTable',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('normal_table_1', models.ForeignKey(related_name='publish_normaltable_1', blank=True, to='tablemanager.NormalTable', null=True)),
('normal_table_2', models.ForeignKey(related_name='publish_normaltable_2', blank=True, to='tablemanager.NormalTable', null=True)),
('normal_table_3', models.ForeignKey(related_name='publish_normaltable_3', blank=True, to='tablemanager.NormalTable', null=True)),
('normal_table_4', models.ForeignKey(related_name='publish_normaltable_4', blank=True, to='tablemanager.NormalTable', null=True)),
('publish', models.ForeignKey(blank=True, to='tablemanager.Publish', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PublishChannel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.SlugField(help_text='Name of publish destination', unique=True, max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('sync_postgres_data', models.BooleanField(default=True)),
('sync_geoserver_data', models.BooleanField(default=True)),
('last_modify_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Replica',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('active', models.BooleanField(default=True)),
('namespace', models.BooleanField(default=True, help_text='Use schemas to namespace replicated tables, if not will use a prefix')),
('name', models.CharField(max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('link', models.TextField(default="CREATE SERVER {{self.name}} FOREIGN DATA WRAPPER postgres_fdw OPTIONS (dbserver '//<hostname>/<sid>');")),
('includes', models.ManyToManyField(help_text='Published tables to include, all if blank', to='tablemanager.Publish', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Workspace',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^[a-z0-9_]+$'), 'Slug can only contain lowercase letters, numbers and underscores', 'invalid')])),
('publish_channel', models.ForeignKey(to='tablemanager.PublishChannel')),
],
options={
'ordering': ['publish_channel', 'name'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='workspace',
unique_together=set([('publish_channel', 'name')]),
),
migrations.AddField(
model_name='publish',
name='relation_1',
field=models.OneToOneField(related_name='publish_1', null=True, blank=True, editable=False, to='tablemanager.Publish_NormalTable'),
preserve_default=True,
),
migrations.AddField(
model_name='publish',
name='relation_2',
field=models.OneToOneField(related_name='publish_2', null=True, blank=True, editable=False, to='tablemanager.Publish_NormalTable'),
preserve_default=True,
),
migrations.AddField(
model_name='publish',
name='relation_3',
field=models.OneToOneField(related_name='publish_3', null=True, blank=True, editable=False, to='tablemanager.Publish_NormalTable'),
preserve_default=True,
),
migrations.AddField(
model_name='publish',
name='workspace',
field=models.ForeignKey(to='tablemanager.Workspace'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='publish',
unique_together=set([('workspace', 'name')]),
),
migrations.AddField(
model_name='normalise_normaltable',
name='normal_table_1',
field=models.ForeignKey(related_name='normalise_normaltable_1', blank=True, to='tablemanager.NormalTable', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='normalise_normaltable',
name='normal_table_2',
field=models.ForeignKey(related_name='normalise_normaltable_2', blank=True, to='tablemanager.NormalTable', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='normalise_normaltable',
name='normal_table_3',
field=models.ForeignKey(related_name='normalise_normaltable_3', blank=True, to='tablemanager.NormalTable', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='normalise_normaltable',
name='normal_table_4',
field=models.ForeignKey(related_name='normalise_normaltable_4', blank=True, to='tablemanager.NormalTable', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='normalise_normaltable',
name='normalise',
field=models.ForeignKey(blank=True, to='tablemanager.Normalise', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='normalise',
name='relation_1',
field=models.OneToOneField(related_name='normalise_1', null=True, blank=True, editable=False, to='tablemanager.Normalise_NormalTable'),
preserve_default=True,
),
migrations.AddField(
model_name='normalise',
name='relation_2',
field=models.OneToOneField(related_name='normalise_2', null=True, blank=True, editable=False, to='tablemanager.Normalise_NormalTable'),
preserve_default=True,
),
migrations.AddField(
model_name='normalise',
name='relation_3',
field=models.OneToOneField(related_name='normalise_3', null=True, blank=True, editable=False, to='tablemanager.Normalise_NormalTable'),
preserve_default=True,
),
]
|
parksandwildlife/borgcollector
|
tablemanager/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 17,682
|
#!/usr/bin/env python
def igs_test(dry_run, target_dir, exp_name, group='', param_l=[]):
from scripts.conf.conf import machine_info
from scripts.pluto.pluto_utils import run_pluto_test
import itertools, os, pickle
from os.path import join as jpath
target_dir = jpath(os.path.abspath("."),target_dir)
# machine_info['hostname']=='IVB_10core'
kernels_limits = {'3d25pt':1089, '3d7pt':1217, '3d25pt_var':577, '3d7pt_var':769}
increment = 64
if(machine_info['hostname']=='Haswell_18core'):
kernels_limits = {'3d25pt':1281, '3d7pt':1409, '3d25pt_var':769, '3d7pt_var':897}
increment = 128
points = dict()
points['3d7pt'] = [64] + list(range(128, 5000, increment))
points['3d7pt_var'] = points['3d7pt']
points['3d25pt'] = points['3d7pt']
points['3d25pt_var'] = points['3d7pt']
count=0
#for kernel in ['3d7pt', '3d7pt_var', '3d25pt']:#, '3d25pt_var']:
for kernel in [ '3d25pt', '3d25pt_var']:
for N in points[kernel]:
if (N < kernels_limits[kernel]):
outfile=('pluto_kernel_%s_N%d_%s_%s.txt' % (kernel, N, group, exp_name[-13:]))
outfile = jpath(target_dir, outfile)
if(dry_run==1):
nt=32; param=[-1,-1,-1]
# nt = max(int(k_time_scale[kernel]/(N**3/1e6)), 30)
if (kernel, N, group) in param_l.keys():
continue # results exist for this test case
if (kernel, N, 'MEM') in param_l.keys(): # use the tuned params of memory results
if(dry_run==0): fp = open(outfile, 'w')
param, nt = param_l[(kernel, N, 'MEM')]
nt = nt*2
else:
# continue
if(dry_run==0):
fp = open(outfile, 'w')
param, nt, tune_res = pluto_tuner(kernel=kernel, nx=N, ny=N, nz=N, fp=fp)
with open(outfile[:-3]+'p', 'w') as fpickle:
pickle.dump(tune_res, fpickle)
if(dry_run==0): tee(fp, outfile)
# print outfile, param
test_str, telapsed = run_pluto_test(dry_run=dry_run, kernel=kernel, nx=N, ny=N, nz=N, nt=nt, params=param, outfile=outfile)
if(dry_run==0):
tee(fp, test_str)
fp.close()
count = count+1
return count
def main():
from scripts.utils import create_project_tarball, get_stencil_num
from scripts.conf.conf import machine_conf, machine_info
import os, sys
from csv import DictReader
import time,datetime
dry_run = 1 if len(sys.argv)<2 else int(sys.argv[1])
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H_%M')
exp_name = "pluto_increasing_grid_size_at_%s_%s" % (machine_info['hostname'], time_stamp)
tarball_dir='results/'+exp_name
if(dry_run==0): create_project_tarball(tarball_dir, "test_"+exp_name)
target_dir='results/' + exp_name
# parse the results to find out which of the already exist
data = []
data_file = os.path.join('results', 'summary.csv')
try:
with open(data_file, 'rb') as output_file:
raw_data = DictReader(output_file)
for k in raw_data:
kernel = get_stencil_num(k)
if(kernel==0):
k['stencil'] ='3d25pt'
elif(kernel==1):
k['stencil'] ='3d7pt'
elif(kernel==4):
k['stencil'] ='3d25pt_var'
elif(kernel==5):
k['stencil'] ='3d7pt_var'
else:
raise
data.append(k)
except:
pass
param_l = dict()
for k in data:
try:
param_l[(k['stencil'], int(k['Global NX']), k['LIKWID performance counter'] ) ] = ([int(k['PLUTO tile size of loop 1']), int(k['PLUTO tile size of loop 3']), int(k['PLUTO tile size of loop 4'])], int(k['Number of time steps']) )
except:
print k
raise
#update the pinning information to use all cores
th = machine_info['n_cores']
pin_str = "0-%d "%(th-1)
count = 0
for group in ['MEM']:
# for group in ['MEM', 'L2', 'L3', 'DATA', 'TLB_DATA', 'ENERGY']:
if(machine_info['hostname']=='Haswell_18core'):
machine_conf['pinning_args'] = " -m -g " + group + " -C S1:" + pin_str
elif(machine_info['hostname']=='IVB_10core'):
if group=='TLB_DATA': group='TLB'
machine_conf['pinning_args'] = " -g " + group + " -C S0:" + pin_str
# for k,v in param_l.iteritems(): print k,v
count = count + igs_test(dry_run, target_dir, exp_name, param_l=param_l, group=group)
print "experiments count =" + str(count)
if __name__ == "__main__":
main()
|
tareqmalas/girih
|
scripts/pluto/test_pluto_increasing_grid_size.py
|
Python
|
bsd-3-clause
| 4,390
|
from __future__ import print_function
import os
import subprocess
import unittest
class TestUnknownInitializerTypes(unittest.TestCase):
def test_unknown_initializer_types(self):
output = None
if os.environ['ROS_PYTHON_VERSION'] == '2':
output = subprocess.check_output(
["/usr/bin/env", "python", "-c", "'import pyexotica as exo ; initializers = exo.Setup.get_initializers()'"])
elif os.environ['ROS_PYTHON_VERSION'] == '3':
output = subprocess.check_output(
["/usr/bin/env", "python3", "-c", "'import pyexotica as exo ; initializers = exo.Setup.get_initializers()'"])
else:
raise AssertionError("Unknown ROS_PYTHON_VERSION")
if b"Skipping" in output:
raise AssertionError(output)
if __name__ == '__main__':
unittest.main()
|
openhumanoids/exotica
|
exotica_python/test/test_no_unknown_initializer_types.py
|
Python
|
bsd-3-clause
| 857
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-11-02 09:50
from __future__ import unicode_literals
from django.db import migrations, models
import contentstore.models
class Migration(migrations.Migration):
dependencies = [("contentstore", "0006_messageset_channel")]
operations = [
migrations.AlterField(
model_name="message",
name="text_content",
field=models.TextField(
blank=True,
null=True,
validators=[contentstore.models.validate_special_characters],
),
)
]
|
praekelt/seed-stage-based-messaging
|
contentstore/migrations/0007_auto_20171102_0950.py
|
Python
|
bsd-3-clause
| 608
|
"""
Set of tools to read SDF format.
First coded: 20111227 by Tiago Pereira (tiago.pereira@nasa.gov)
"""
import numpy as np
class SDFHeader:
def __init__(self, filename, verbose=False):
self.verbose = verbose
self.query(filename)
def query(self, filename, verbose=False):
''' Queries the file, returning datasets and shapes.'''
f = open(filename, 'r')
h = f.read(11)
hdstr = str(h[:-1])
if hdstr != 'SDF format':
raise IOError('SDF header not found in' +
' %s, probably wrong or corrupt file.' % filename)
self.hdrpos = np.fromfile(f, dtype='>l', count=1)[0]
self.datapos = np.fromfile(f, dtype='>l', count=1)[0]
self.norder = np.fromfile(f, dtype='>i', count=1)[0]
self.hdrsize = np.fromfile(f, dtype='>l', count=1)[0]
header = f.read(self.hdrpos - f.tell())
self.header = header
if self.verbose:
print(header)
f.close()
self.header_data(header)
return
def header_data(self, header):
''' Breaks header string into variable informationp. '''
self.variables = {}
offset = 19 + self.hdrsize
for line in header.split('\n')[:-1]:
l = line.split()
label = l.pop(1)
order = int(l[0])
dtype = '>' + l[1] + l[2] # force big endian
nbpw = int(l[2])
ndims = int(l[3])
shape = ()
for i in range(ndims):
shape += (int(l[4 + i]),)
nbytes = nbpw * np.prod(shape)
if dtype[1] == 'c':
nbytes *= 2
if dtype[1:] == 'c4': # these are the same internally to numpy
dtype = '>c8'
self.variables[label] = [order, dtype, nbpw, offset, shape]
offset += nbytes
return
def getvar(filename, variable, memmap=False):
''' Reads variable from SDF file.
IN:
filename - string with filename
variable - string with variable name
memmap - [OPTIONAL] booleanp. If true, will return a memmap object
(ie, data is only loaded into memory when needed)
OUT:
data - array with data
'''
ff = SDFHeader(filename, verbose=False)
if variable not in ff.variables:
raise KeyError(
'(EEE) getvar: variable %s not found in %s' %
(variable, filename))
order, dtype, nbpw, offset, shape = ff.variables[variable]
if memmap:
data = np.memmap(filename, dtype=dtype, mode='r', shape=shape,
offset=offset, order='F')
else:
f = open(filename, 'r')
f.seek(offset)
data = np.fromfile(f, dtype=dtype,
count=np.prod(shape)).reshape(shape[::-1]).T
f.close()
return data
def getall(filename, memmap=False):
''' Reads all the variables of an SDF file. Loads into a dictionary indexed
by variable name. '''
ff = SDFHeader(filename, verbose=False)
result = {}
for v in ff.variables:
result[v] = getvar(filename, v, memmap)
return result
|
ITA-Solar/helita
|
helita/io/sdf.py
|
Python
|
bsd-3-clause
| 3,207
|