code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from odoo import api
from odoo.addons.mail.tests.common import TestMail
class TestTracking(TestMail):
def test_message_track(self):
""" Testing auto tracking of fields. Warning, it has not be cleaned and
should probably be. """
Subtype = self.env['mail.message.subtype']
Data = self.env['ir.model.data']
note_subtype = self.env.ref('mail.mt_note')
group_system = self.env.ref('base.group_system')
group_user = self.env.ref('base.group_user')
self.group_pigs.write({'channel_partner_ids': [(4, self.user_employee.partner_id.id)]})
# mt_private: public field (tracked as onchange) set to 'private' (selection)
mt_private = Subtype.create({
'name': 'private',
'description': 'Public field set to private'
})
Data.create({
'name': 'mt_private',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_private.id
})
# mt_name_supername: name field (tracked as always) set to 'supername' (char)
mt_name_supername = Subtype.create({
'name': 'name_supername',
'description': 'Name field set to supername'
})
Data.create({
'name': 'mt_name_supername',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_name_supername.id
})
# mt_group_public_set: group_public field (tracked as onchange) set to something (m2o)
mt_group_public_set = Subtype.create({
'name': 'group_public_set',
'description': 'Group_public field set'
})
Data.create({
'name': 'mt_group_public_set',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_group_public_set.id
})
# mt_group_public_set: group_public field (tracked as onchange) set to nothing (m2o)
mt_group_public_unset = Subtype.create({
'name': 'group_public_unset',
'description': 'Group_public field unset'
})
Data.create({
'name': 'mt_group_public_unset',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_group_public_unset.id
})
@api.multi
def _track_subtype(self, init_values):
if 'public' in init_values and self.public == 'private':
return 'mail.mt_private'
elif 'name' in init_values and self.name == 'supername':
return 'mail.mt_name_supername'
elif 'group_public_id' in init_values and self.group_public_id:
return 'mail.mt_group_public_set'
elif 'group_public_id' in init_values and not self.group_public_id:
return 'mail.mt_group_public_unset'
return False
self.registry('mail.channel')._patch_method('_track_subtype', _track_subtype)
visibility = {
'public': 'onchange',
'name': 'always',
'group_public_id': 'onchange'
}
cls = type(self.env['mail.channel'])
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.group_pigs.sudo(self.user_employee).write({'name': 'my_name'})
self.assertEqual(len(self.group_pigs.message_ids), 1)
last_msg = self.group_pigs.message_ids[-1]
self.assertEqual(last_msg.subtype_id, note_subtype)
self.assertEqual(len(last_msg.tracking_value_ids), 1)
self.assertEqual(last_msg.tracking_value_ids.field, 'name')
self.assertEqual(last_msg.tracking_value_ids.field_desc, 'Name')
self.assertEqual(last_msg.tracking_value_ids.old_value_char, 'Pigs')
self.assertEqual(last_msg.tracking_value_ids.new_value_char, 'my_name')
# Test: change name as supername, public as private -> 1 subtype, private
self.group_pigs.sudo(self.user_employee).write({'name': 'supername', 'public': 'private'})
self.group_pigs.invalidate_cache()
self.assertEqual(len(self.group_pigs.message_ids.ids), 2)
last_msg = self.group_pigs.message_ids[0]
self.assertEqual(last_msg.subtype_id, mt_private)
self.assertEqual(len(last_msg.tracking_value_ids), 2)
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field')), set(['name', 'public']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field_desc')), set(['Name', 'Privacy']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_char')), set(['my_name', 'Selected group of users']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_char')), set(['supername', 'Invited people only']))
# Test: change public as public, group_public_id -> 1 subtype, group public set
self.group_pigs.sudo(self.user_employee).write({'public': 'public', 'group_public_id': group_system.id})
self.group_pigs.invalidate_cache()
self.assertEqual(len(self.group_pigs.message_ids), 3)
last_msg = self.group_pigs.message_ids[0]
self.assertEqual(last_msg.subtype_id, mt_group_public_set)
self.assertEqual(len(last_msg.tracking_value_ids), 2)
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field')), set(['group_public_id', 'public']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field_desc')), set(['Authorized Group', 'Privacy']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_char')), set([group_user.name_get()[0][1], 'Invited people only']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_char')), set([group_system.name_get()[0][1], 'Everyone']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_integer')), set([0, group_user.id]))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_integer')), set([0, group_system.id]))
|
ayepezv/GAD_ERP
|
addons/mail/tests/test_message_track.py
|
Python
|
gpl-3.0
| 6,334
|
import rules
from core.subscription.models import Organisation
@rules.predicate
def can_manage_an_organisation(user):
return is_organisation_member(user)
@rules.predicate
def is_organisation_member(user, organisation=None):
if organisation is None:
return bool(Organisation.objects.filter(members=user).count())
else:
return bool(organisation.members.filter(id=user.id).count())
|
erudit/zenon
|
eruditorg/core/subscription/predicates.py
|
Python
|
gpl-3.0
| 412
|
#!/usr/bin/env python3
"""
Read BAM file and split each line into columns.
"""
import argparse
import os
import re
import sys
def main():
parser = argparse.ArgumentParser( description='Groups transcripts by mapped read pairs')
parser.add_argument('-i', '--input_sam_file', type=str, required=False, help='SAM file of read alignments back to transcripts' )
args = parser.parse_args()
print("INFO: parsing SAM file and creating transcript pairings")
fh = sys.stdin
if args.input_sam_file != None:
fh = open(args.input_sam_file)
n_lines = 0
for line in fh:
n_lines = n_lines + 1
if line[0] == '@': continue
cols = line.split("\t")
ref_read = cols[0]
ref_transcript = cols[2]
other_transcript = cols[6]
if (n_lines % 1000000) == 0:
print("n_lines=" + str(n_lines))
print ("read " + str(n_lines) + " line(s) from file")
if __name__ == '__main__':
main()
|
jonathancrabtree/biocode
|
sandbox/jcrabtree/group_rnaseq_transcripts_by_read_alignment/3-timing-tests/read_and_split_only.py
|
Python
|
gpl-3.0
| 985
|
# framework/templates.py
#
# Copyright 2011 Spencer J. McIntyre <SMcIntyre [at] SecureState [dot] net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from framework.options import AdvancedOptions, Options
class TermineterModule(object):
frmwk_required_options = ()
def __init__(self, frmwk):
self.frmwk = frmwk
self.name = 'unknown'
self.version = 0
self.author = ['anonymous']
self.description = 'This module is undocumented.'
self.detailed_description = 'This module is undocumented.'
self.options = Options(frmwk.directories)
self.advanced_options = AdvancedOptions(frmwk.directories)
def __repr__(self):
return '<' + self.__class__.__name__ + ' ' + self.name + ' >'
def get_missing_options(self):
frmwk_missing_options = self.frmwk.options.get_missing_options()
frmwk_missing_options.extend(self.frmwk.advanced_options.get_missing_options())
missing_options = []
for required_option in self.frmwk_required_options:
if required_option in frmwk_missing_options:
missing_options.append(required_option)
missing_options.extend(self.options.get_missing_options())
missing_options.extend(self.advanced_options.get_missing_options())
return missing_options
@property
def logger(self):
return self.frmwk.get_module_logger(self.name)
class TermineterModuleOptical(TermineterModule):
frmwk_required_options = (
'CONNECTION',
'USERNAME',
'USERID',
'PASSWORD',
'PASSWORDHEX',
'BAUDRATE',
'BYTESIZE',
'CACHETBLS',
'STOPBITS',
'NBRPKTS',
'PKTSIZE'
)
require_connection = True
def __init__(self, *args, **kwargs):
super(TermineterModuleOptical, self).__init__(*args, **kwargs)
class TermineterModuleRfcat(TermineterModule):
pass
|
manueldelreal/termineter
|
framework/templates.py
|
Python
|
gpl-3.0
| 2,380
|
# coding=utf-8
# Author: miigotu <miigotu@gmail.com>
# URL: http://github.com/SiCKRAGETV/SickRage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from urllib import quote_plus
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.providers import generic
class BitCannonProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "BitCannon")
self.supportsBacklog = True
self.public = True
self.minseed = None
self.minleech = None
self.ratio = 0
self.cache = BitCannonCache(self)
self.url = 'http://127.0.0.1:1337/'
self.urls = {
'base_url': self.url,
'search': self.url + 'search/',
'trackers': self.url + 'stats',
}
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
trackers = (self.getURL(self.urls['trackers'], json=True) or {}).get(u'Trackers', [])
if not trackers:
logger.log(u'Could not get tracker list from BitCannon, aborting search')
return results
for mode in search_strings.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
searchURL = self.urls['search'] + search_string
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.getURL(searchURL, json=True)
for item in data or []:
if 'tv' not in (item.get('Category') or u'').lower():
continue
title = item.get(u'Title', u'')
info_hash = item.get(u'Btih', u'')
if not all([title, info_hash]):
continue
swarm = item.get(u'Swarm', {})
seeders = swarm.get(u'Seeders', 0)
leechers = swarm.get(u'Leechers', 0)
size = item.get(u'Size', -1)
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode is not 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
# Only build the url if we selected it
download_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (info_hash, quote_plus(title.encode('utf-8')), u'&tr='.join([quote_plus(x.encode('utf-8')) for x in trackers]))
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class BitCannonCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll KickAss every 10 minutes max
self.minTime = 20
def _getRSSData(self):
return {'entries': []}
# search_strings = {'RSS': ['']}
# return {'entries': self.provider._doSearch(search_strings)}
provider = BitCannonProvider()
|
srluge/SickRage
|
sickbeard/providers/bitcannon.py
|
Python
|
gpl-3.0
| 4,265
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: copy
version_added: "historical"
short_description: Copies files to remote locations.
description:
- The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. If you need variable interpolation in copied files, use the M(template) module.
options:
src:
description:
- Local path to a file to copy to the remote server; can be absolute or relative.
If path is a directory, it is copied recursively. In this case, if path ends
with "/", only inside contents of that directory are copied to destination.
Otherwise, if it does not end with "/", the directory itself with all contents
is copied. This behavior is similar to Rsync.
required: false
default: null
aliases: []
content:
version_added: "1.1"
description:
- When used instead of 'src', sets the contents of a file directly to the specified value.
This is for simple values, for anything complex or with formatting please switch to the template module.
required: false
default: null
dest:
description:
- Remote absolute path where the file should be copied to. If src is a directory,
this must be a directory too.
required: true
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
version_added: "0.7"
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
version_added: "1.1"
required: false
choices: [ "yes", "no" ]
default: "yes"
aliases: [ "thirsty" ]
directory_mode:
description:
- When doing a recursive copy set the mode for the directories. If this is not set we will use the system
defaults. The mode is only set on directories which are newly created, and will not affect those that
already existed.
required: false
version_added: "1.5"
remote_src:
description:
- If False, it will search for src at originating/master machine, if True it will go to the remote/target machine for the src. Default is False.
- Currently remote_src does not support recursive copying.
choices: [ "True", "False" ]
required: false
default: "False"
version_added: "2.0"
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.8"
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
extends_documentation_fragment:
- files
- validate
author:
- "Ansible Core Team"
- "Michael DeHaan"
notes:
- The "copy" module recursively copy facility does not scale to lots (>hundreds) of files.
For alternative, see synchronize module, which is a wrapper around rsync.
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: 0644
# The same example as above, but using a symbolic mode equivalent to 0644
- copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: "u=rw,g=r,o=r"
# Another symbolic mode example, adding some permissions and removing others
- copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: "u+rw,g-wx,o-rwx"
# Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version
- copy:
src: /mine/ntp.conf
dest: /etc/ntp.conf
owner: root
group: root
mode: 0644
backup: yes
# Copy a new "sudoers" file into place, after passing validation with visudo
- copy:
src: /mine/sudoers
dest: /etc/sudoers
validate: 'visudo -cf %s'
'''
RETURN = '''
dest:
description: destination file/path
returned: success
type: string
sample: "/path/to/file.txt"
src:
description: source file used for the copy on the target machine
returned: changed
type: string
sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source"
md5sum:
description: md5 checksum of the file after running copy
returned: when supported
type: string
sample: "2a5aeecc61dc98c4d780b14b330e3282"
checksum:
description: sha1 checksum of the file after running copy
returned: success
type: string
sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827"
backup_file:
description: name of backup file created
returned: changed and if backup=yes
type: string
sample: "/path/to/file.txt.2015-02-12@22:09~"
gid:
description: group id of the file, after execution
returned: success
type: int
sample: 100
group:
description: group of the file, after execution
returned: success
type: string
sample: "httpd"
owner:
description: owner of the file, after execution
returned: success
type: string
sample: "httpd"
uid:
description: owner id of the file, after execution
returned: success
type: int
sample: 100
mode:
description: permissions of the target, after execution
returned: success
type: string
sample: "0644"
size:
description: size of the target, after execution
returned: success
type: int
sample: 1220
state:
description: state of the target, after execution
returned: success
type: string
sample: "file"
'''
import os
import shutil
import tempfile
import traceback
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_bytes, to_native
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
'''
head, tail = os.path.split(dirname)
b_head = to_bytes(head, errors='surrogate_or_strict')
if not os.path.exists(b_head):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
return (head, [tail])
new_directory_list.append(tail)
return (pre_existing_dir, new_directory_list)
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
'''
Walk the new directories list and make sure that permissions are as we would expect
'''
if len(new_directory_list) > 0:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
src = dict(required=False, type='path'),
original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack
content = dict(required=False, no_log=True),
dest = dict(required=True, type='path'),
backup = dict(default=False, type='bool'),
force = dict(default=True, aliases=['thirsty'], type='bool'),
validate = dict(required=False, type='str'),
directory_mode = dict(required=False),
remote_src = dict(required=False, type='bool'),
),
add_file_common_args=True,
supports_check_mode=True,
)
src = module.params['src']
b_src = to_bytes(src, errors='surrogate_or_strict')
dest = module.params['dest']
b_dest = to_bytes(dest, errors='surrogate_or_strict')
backup = module.params['backup']
force = module.params['force']
original_basename = module.params.get('original_basename', None)
validate = module.params.get('validate', None)
follow = module.params['follow']
mode = module.params['mode']
remote_src = module.params['remote_src']
if not os.path.exists(b_src):
module.fail_json(msg="Source %s not found" % (src))
if not os.access(b_src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src))
if os.path.isdir(b_src):
module.fail_json(msg="Remote copy does not support recursive copy of directory: %s" % (src))
checksum_src = module.sha1(src)
checksum_dest = None
# Backwards compat only. This will be None in FIPS mode
try:
md5sum_src = module.md5(src)
except ValueError:
md5sum_src = None
changed = False
# Special handling for recursive copy - create intermediate dirs
if original_basename and dest.endswith(os.sep):
dest = os.path.join(dest, original_basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
dirname = os.path.dirname(dest)
b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
if not os.path.exists(b_dirname) and os.path.isabs(b_dirname):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
os.makedirs(b_dirname)
directory_args = module.load_file_common_arguments(module.params)
directory_mode = module.params["directory_mode"]
if directory_mode is not None:
directory_args['mode'] = directory_mode
else:
directory_args['mode'] = None
adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
if os.path.isdir(b_dest):
basename = os.path.basename(src)
if original_basename:
basename = original_basename
dest = os.path.join(dest, basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
if os.path.islink(b_dest) and follow:
b_dest = os.path.realpath(b_dest)
dest = to_native(b_dest, errors='surrogate_or_strict')
if not force:
module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
if os.access(b_dest, os.R_OK):
checksum_dest = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(b_dest)):
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(b_dest))
except OSError:
e = get_exception()
if "permission denied" in to_native(e).lower():
module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
if not os.access(os.path.dirname(b_dest), os.W_OK):
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None
if checksum_src != checksum_dest or os.path.islink(b_dest):
if not module.check_mode:
try:
if backup:
if os.path.exists(b_dest):
backup_file = module.backup_local(dest)
# allow for conversion from symlink.
if os.path.islink(b_dest):
os.unlink(b_dest)
open(b_dest, 'w').close()
if validate:
# if we have a mode, make sure we set it on the temporary
# file source as some validations may require it
# FIXME: should we do the same for owner/group here too?
if mode is not None:
module.set_mode_if_different(src, mode, False)
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % src)
if rc != 0:
module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
b_mysrc = b_src
if remote_src:
_, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
shutil.copy2(b_src, b_mysrc)
module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
except IOError:
module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
changed = True
else:
changed = False
res_args = dict(
dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
)
if backup_file:
res_args['backup_file'] = backup_file
module.params['dest'] = dest
if not module.check_mode:
file_args = module.load_file_common_arguments(module.params)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args)
if __name__ == '__main__':
main()
|
naslanidis/ansible
|
lib/ansible/modules/files/copy.py
|
Python
|
gpl-3.0
| 14,506
|
# Blender rock creation tool
#
# Based on BlenderGuru's asteroid tutorial and personal experimentation.
# Tutorial: http://www.blenderguru.com/how-to-make-a-realistic-asteroid/
# Update with another tutorial shared by "rusted" of BlenderArtists:
# Tutorial: http://saschahenrichs.blogspot.com/2010/03/3dsmax-environment-modeling-1.html
#
# Uses the NumPy Gaussian random number generator to generate a
# a rock within a given range and give some randomness to the displacement
# texture values. NumPy's gaussian generator was chosen as, based on
# profiling I performed, it runs in about half the time as the built in
# Python gaussian equivalent. I would like to shift the script to use the
# NumPy beta distribution as it ran in about half the time as the NumPy
# gaussian once the skew calculations are added.
#
# Set lower and upper bounds to the same for no randomness.
#
# Tasks:
# Generate meshes with random scaling between given values.
# - Allow for a skewed distribution
# *** Completed on 4/17/2011 ***
# - Create a set of meshes that can be used
# Give the user the ability to set the subsurf level (detail level)
# *** Completed on 4/29/2011 ***
# - Set subsurf modifiers to default at view:3, render:3.
# *** Completed on 4/17/2011 ***
# - Set crease values to allow for hard edges on first subsurf.
# *** Completed on 4/29/2011 ***
# Be able to generate and add a texture to the displacement modifiers.
# *** Completed 5/17/2011 ***
# - Generate three displacement modifiers.
# - The first only uses a Musgrave for initial intentations.
# *** Now generating four displacement modifiers ***
# *** Completed on 5/17/2011 ***
# - Set a randomness for the type and values of the displacement texture.
# *** Completed 5/9/2011 ***
# - Allow the user to set a value for the range of displacement.
# -> Modification: have user set "roughness" and "roughness range".
# *** Compleded on 4/23/2011 ***
# Set material settings and assign material textures
# *** Completed 6/9/2011 ***
# - Mossiness of the rocks.
# *** Completed 6/9/2011 ***
# - Color of the rocks.
# *** Completed 5/16/2011 ***
# - Wetness/shinyness of the rock.
# *** Completed 5/6/2011 ***
# - For all the user provides a mean value for a skewed distribution.
# *** Removed to lessen usage complexity ***
# Add some presets (mesh) to make it easier to use
# - Examples: river rock, asteroid, quaried rock, etc
# *** Completed 7/12/2011 ***
#
# Code Optimization:
# Remove all "bpy.ops" operations with "bpy.data" base operations.
# Remove material/texture cataloging with building a list of
# returned values from bpy.data.*.new() operations.
# *** Completed on 9/6/2011 ***
# Search for places where list comprehensions can be used.
# Look for alternate methods
# - Possible alternate and more efficient data structures
# - Possible alternate algorithms may realize greater performance
# - Look again at multi-processing. Without bpy.ops is might
# be viable.
#
# Future tasks:
# Multi-thread the script
# *** Will not be implemented. Multi-processing is adding to much
# overhead to realize a performance increase ***
# - Learn basic multi-threading in Python (multiprocessing)
# - Break material generation into separate threads (processes)
# - Break mesh generation into separate threads (processes)
# - Move name generation, texture ID generation, etc to process first
# - Roll version to 2.0 on completion
#
# Paul "BrikBot" Marshall
# Created: April 17, 2011
# Last Modified: November 17, 2011
# Homepage (blog): http://post.darkarsenic.com/
# //blog.darkarsenic.com/
# Thanks to Meta-Androco, RickyBlender, Ace Dragon, and PKHG for ideas
# and testing.
#
# Coded in IDLE, tested in Blender 2.59. NumPy Recommended.
# Search for "@todo" to quickly find sections that need work.
#
# Remeber -
# Functional code comes before fast code. Once it works, then worry about
# making it faster/more efficient.
#
# ##### BEGIN GPL LICENSE BLOCK #####
#
# The Blender Rock Creation tool is for rapid generation of mesh rocks.
# Copyright (C) 2011 Paul Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
import math
import time
from add_mesh_rocks import (settings,
utils)
from bpy_extras import object_utils
from mathutils import (Color,
Vector)
from bpy.props import (BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
EnumProperty)
# This try block allows for the script to psudo-intelligently select the
# appropriate random to use. If Numpy's random is present it will use that.
# If Numpy's random is not present, it will through a "module not found"
# exception and instead use the slower built-in random that Python has.
try:
from numpy.random import random_integers as randint
from numpy.random import normal as gauss
from numpy.random import (beta,
uniform,
seed,
weibull)
print("Rock Generator: Numpy found.")
numpy = True
except:
from random import (randint,
gauss,
uniform,
seed)
from random import betavariate as beta
from random import weibullvariate as weibull
print("Rock Generator: Numpy not found. Using Python's random.")
numpy = False
# Global variables:
lastRock = 0
# Creates a new mesh:
#
# param: verts - Vector of vertices for the mesh.
# edges - Edges for the mesh. Can be "[]".
# faces - Face tuples corresponding to vertices.
# name - Name of the mesh.
def createMeshObject(context, verts, edges, faces, name):
# Create new mesh
mesh = bpy.data.meshes.new(name)
# Make a mesh from a list of verts/edges/faces.
mesh.from_pydata(verts, edges, faces)
# Set mesh to use auto smoothing:
mesh.use_auto_smooth = True
# Update mesh geometry after adding stuff.
mesh.update()
return object_utils.object_data_add(context, mesh, operator=None)
# Set the values for a texture from parameters.
#
# param: texture - bpy.data.texture to modify.
# level - designated tweaked settings to use
# -> Below 10 is a displacment texture
# -> Between 10 and 20 is a base material texture
def randomizeTexture(texture, level=1):
noises = ['BLENDER_ORIGINAL', 'ORIGINAL_PERLIN', 'IMPROVED_PERLIN',
'VORONOI_F1', 'VORONOI_F2', 'VORONOI_F3', 'VORONOI_F4',
'VORONOI_F2_F1', 'VORONOI_CRACKLE']
if texture.type == 'CLOUDS':
if randint(0, 1) == 0:
texture.noise_type = 'SOFT_NOISE'
else:
texture.noise_type = 'HARD_NOISE'
if level != 11:
tempInt = randint(0, 6)
else:
tempInt = randint(0, 8)
texture.noise_basis = noises[tempInt]
texture.noise_depth = 8
if level == 0:
texture.noise_scale = gauss(0.625, 1 / 24)
elif level == 2:
texture.noise_scale = 0.15
elif level == 11:
texture.noise_scale = gauss(0.5, 1 / 24)
if texture.noise_basis in ['BLENDER_ORIGINAL', 'ORIGINAL_PERLIN',
'IMPROVED_PERLIN', 'VORONOI_F1']:
texture.intensity = gauss(1, 1 / 6)
texture.contrast = gauss(4, 1 / 3)
elif texture.noise_basis in ['VORONOI_F2', 'VORONOI_F3', 'VORONOI_F4']:
texture.intensity = gauss(0.25, 1 / 12)
texture.contrast = gauss(2, 1 / 6)
elif texture.noise_basis == 'VORONOI_F2_F1':
texture.intensity = gauss(0.5, 1 / 6)
texture.contrast = gauss(2, 1 / 6)
elif texture.noise_basis == 'VORONOI_CRACKLE':
texture.intensity = gauss(0.5, 1 / 6)
texture.contrast = gauss(2, 1 / 6)
elif texture.type == 'MUSGRAVE':
musgraveType = ['MULTIFRACTAL', 'RIDGED_MULTIFRACTAL',
'HYBRID_MULTIFRACTAL', 'FBM', 'HETERO_TERRAIN']
texture.musgrave_type = 'MULTIFRACTAL'
texture.dimension_max = abs(gauss(0, 0.6)) + 0.2
texture.lacunarity = beta(3, 8) * 8.2 + 1.8
if level == 0:
texture.noise_scale = gauss(0.625, 1 / 24)
texture.noise_intensity = 0.2
texture.octaves = 1.0
elif level == 2:
texture.intensity = gauss(1, 1 / 6)
texture.contrast = 0.2
texture.noise_scale = 0.15
texture.octaves = 8.0
elif level == 10:
texture.intensity = gauss(0.25, 1 / 12)
texture.contrast = gauss(1.5, 1 / 6)
texture.noise_scale = 0.5
texture.octaves = 8.0
elif level == 12:
texture.octaves = uniform(1, 3)
elif level > 12:
texture.octaves = uniform(2, 8)
else:
texture.intensity = gauss(1, 1 / 6)
texture.contrast = 0.2
texture.octaves = 8.0
elif texture.type == 'DISTORTED_NOISE':
tempInt = randint(0, 8)
texture.noise_distortion = noises[tempInt]
tempInt = randint(0, 8)
texture.noise_basis = noises[tempInt]
texture.distortion = skewedGauss(2.0, 2.6666, (0.0, 10.0), False)
if level == 0:
texture.noise_scale = gauss(0.625, 1 / 24)
elif level == 2:
texture.noise_scale = 0.15
elif level >= 12:
texture.noise_scale = gauss(0.2, 1 / 48)
elif texture.type == 'STUCCI':
stucciTypes = ['PLASTIC', 'WALL_IN', 'WALL_OUT']
if randint(0, 1) == 0:
texture.noise_type = 'SOFT_NOISE'
else:
texture.noise_type = 'HARD_NOISE'
tempInt = randint(0, 2)
texture.stucci_type = stucciTypes[tempInt]
if level == 0:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
texture.noise_scale = gauss(0.625, 1 / 24)
elif level == 2:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
texture.noise_scale = 0.15
elif level >= 12:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
texture.noise_scale = gauss(0.2, 1 / 30)
else:
tempInt = randint(0, 6)
texture.noise_basis = noises[tempInt]
elif texture.type == 'VORONOI':
metrics = ['DISTANCE', 'DISTANCE_SQUARED', 'MANHATTAN', 'CHEBYCHEV',
'MINKOVSKY_HALF', 'MINKOVSKY_FOUR', 'MINKOVSKY']
# Settings for first dispalcement level:
if level == 0:
tempInt = randint(0, 1)
texture.distance_metric = metrics[tempInt]
texture.noise_scale = gauss(0.625, 1 / 24)
texture.contrast = 0.5
texture.intensity = 0.7
elif level == 2:
texture.noise_scale = 0.15
tempInt = randint(0, 6)
texture.distance_metric = metrics[tempInt]
elif level >= 12:
tempInt = randint(0, 1)
texture.distance_metric = metrics[tempInt]
texture.noise_scale = gauss(0.125, 1 / 48)
texture.contrast = 0.5
texture.intensity = 0.7
else:
tempInt = randint(0, 6)
texture.distance_metric = metrics[tempInt]
return
# Randomizes the given material given base values.
#
# param: Material to randomize
def randomizeMaterial(material, color, dif_int, rough, spec_int, spec_hard,
use_trans, alpha, cloudy, mat_IOR, mossiness, spec_IOR):
skew = False
stddev = 0.0
lastUsedTex = 1
numTex = 6
baseColor = []
# Diffuse settings:
material.diffuse_shader = 'OREN_NAYAR'
if 0.5 > dif_int:
stddev = dif_int / 3
skew = False
else:
stddev = (1 - dif_int) / 3
skew = True
material.diffuse_intensity = skewedGauss(dif_int, stddev, (0.0, 1.0), skew)
if 1.57 > rough:
stddev = rough / 3
skew = False
else:
stddev = (3.14 - rough) / 3
skew = True
material.roughness = skewedGauss(rough, stddev, (0.0, 3.14), skew)
for i in range(3):
if color[i] > 0.9 or color[i] < 0.1:
baseColor.append(skewedGauss(color[i], color[i] / 30,
(0, 1), color[i] > 0.9))
else:
baseColor.append(gauss(color[i], color[i] / 30))
material.diffuse_color = baseColor
# Specular settings:
material.specular_shader = 'BLINN'
if 0.5 > spec_int:
variance = spec_int / 3
skew = False
else:
variance = (1 - spec_int) / 3
skew = True
material.specular_intensity = skewedGauss(spec_int, stddev,
(0.0, 1.0), skew)
if 256 > spec_hard:
variance = (spec_hard - 1) / 3
skew = False
else:
variance = (511 - spec_hard) / 3
skew = True
material.specular_hardness = int(round(skewedGauss(spec_hard, stddev,
(1.0, 511.0), skew)))
if 5.0 > spec_IOR:
variance = spec_IOR / 3
skew = False
else:
variance = (10.0 - spec_IOR) / 3
skew = True
material.specular_ior = skewedGauss(spec_IOR, stddev, (0.0, 10.0), skew)
# Raytrans settings:
# *** Added on 11/17/2011 ***
material.use_transparency = use_trans
if use_trans:
trans = material.raytrace_transparency
# Fixed values:
material.transparency_method = 'RAYTRACE'
trans.depth = 24
trans.gloss_samples = 32
trans.falloff = 1.0
# Needs randomization:
material.alpha = -gauss(alpha, 0.05) + 1;
trans.gloss_factor = -gauss(cloudy, 0.05) + 1
trans.filter = gauss(cloudy, 0.1)
trans.ior = skewedGauss(mat_IOR, 0.01, [0.25, 4.0], mat_IOR > 2.125)
#Misc. settings:
material.use_transparent_shadows = True
# Rock textures:
# Now using slot.texture for texture access instead of
# bpy.data.textures[newTex[<index>]]
# *** Completed on 9/6/2011 ***
# Create the four new textures:
textureTypes = ['MUSGRAVE', 'CLOUDS', 'DISTORTED_NOISE',
'STUCCI', 'VORONOI']
for i in range(numTex):
texColor = []
# Set the active material slot:
material.active_texture_index = i
# Assign a texture to the active material slot:
material.active_texture = bpy.data.textures.new(name = 'stone_tex',
type = 'NONE')
# Store the slot to easy coding access:
slot = material.texture_slots[i]
# If the texture is not a moss texture:
if i > 1:
slot.texture.type = textureTypes[randint(0, 3)]
# Set the texture's color (RGB):
for j in range(3):
if color[j] > 0.9 or color[j] < 0.1:
texColor.append(skewedGauss(color[j], color[j] / 30,
(0, 1), color[j] > 0.9))
else:
texColor.append(gauss(color[j], color[j] / 30))
slot.color = texColor
# Randomize the value (HSV):
v = material.diffuse_color.v
if v == 0.5:
slot.color.v = gauss(v, v / 3)
elif v > 0.5:
slot.color.v = skewedGauss(v, v / 3, (0, 1), True)
else:
slot.color.v = skewedGauss(v, (1 - v) / 3, (0, 1), False)
# Adjust scale and normal based on texture type:
if slot.texture.type == 'VORONOI':
slot.scale = (gauss(5, 1), gauss(5, 1), gauss(5, 1))
slot.normal_factor = gauss(rough / 10, rough / 30)
elif slot.texture.type == 'STUCCI':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough / 10, rough / 30)
elif slot.texture.type == 'DISTORTED_NOISE':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough / 10, rough / 30)
elif slot.texture.type == 'MUSGRAVE':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough, rough / 3)
elif slot.texture.type == 'CLOUDS':
slot.scale = (gauss(1.5, 0.25), gauss(1.5, 0.25),
gauss(1.5, 0.25))
slot.normal_factor = gauss(rough, rough / 3)
# Set the color influence to 0.5.
# This allows for the moss textures to show:
slot.diffuse_color_factor = 0.5
# Set additional influence booleans:
slot.use_stencil = True
slot.use_map_specular = True
slot.use_map_color_spec = True
slot.use_map_hardness = True
slot.use_map_normal = True
# The following is for setting up the moss textures:
else:
slot.texture.type = textureTypes[i]
# Set the mosses color (RGB):
texColor.append(gauss(0.5, 1 / 6))
texColor.append(1)
texColor.append(0)
slot.color = texColor
# Randomize the value (HSV):
slot.color.v = gauss(0.275, 1 / 24)
# Scale the texture size:
slot.scale = (gauss(1.5, 0.25),
gauss(1.5, 0.25),
gauss(1.5, 0.25))
# Set the strength of the moss color:
slot.diffuse_color_factor = mossiness
# Have it influence spec and hardness:
slot.use_map_specular = True
slot.use_map_color_spec = True
slot.use_map_hardness = True
# If the texutre is a voronoi crackle clouds, use "Negative":
if slot.texture.type == 'CLOUDS':
if slot.texture.noise_basis == 'VORONOI_CRACKLE':
slot.invert = True
if mossiness == 0:
slot.use = False
randomizeTexture(slot.texture, 10 + i)
return
# Generates an object based on one of several different mesh types.
# All meshes have exactly eight vertices, and may be built from either
# tri's or quads.
#
# param: muX - mean X offset value
# sigmaX - X offset standard deviation
# scaleX - X upper and lower bounds
# upperSkewX - Is the distribution upperskewed?
# muY - mean Y offset value
# sigmaY - Y offset standard deviation
# scaleY - Y upper and lower bounds
# upperSkewY - Is the distribution upperskewed?
# muZ - mean Z offset value
# sigmaZ - Z offset standard deviation
# scaleZ - Z upper and lower bounds
# upperSkewY - Is the distribution upperskewed?
# base - base number on the end of the object name
# shift - Addition to the base number for multiple runs.
# scaleDisplace - Scale the displacement maps
#
# return: name - the built name of the object
def generateObject(context, muX, sigmaX, scaleX, upperSkewX, muY, sigmaY,
scaleY, upperSkewY, muZ, sigmaZ, scaleZ, upperSkewZ, base,
shift, scaleDisplace, scale_fac):
x = []
y = []
z = []
shape = randint(0, 11)
# Cube
# Use parameters to re-scale cube:
# Reversed if/for nesting. Should be a little faster.
if shape == 0:
for j in range(8):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 1:
for j in range(8):
if j in [0, 1, 3, 4]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [2, 5]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [6, 7]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 2:
for j in range(8):
if j in [0, 2, 5, 7]:
if sigmaX == 0:
x.append(scaleX[0] / 4)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 4)
elif j in [1, 3, 4, 6]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 3:
for j in range(8):
if j > 0:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
else:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 8)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 8)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 8)
elif shape == 4:
for j in range(10):
if j in [0, 9]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [1, 2, 3, 4]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [5, 7]:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 3)
if sigmaY == 0:
y.append(scaleY[0] / 3)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 3)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 6)
elif j in [6, 8]:
if sigmaX == 0:
x.append(scaleX[0] / 3)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 3)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 3)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 6)
elif shape == 5:
for j in range(10):
if j == 0:
if sigmaX == 0:
x.append(0)
else:
x.append(skewedGauss(0, sigmaX, scaleX, upperSkewX) / 8)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 8)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [1, 2]:
if sigmaX == 0:
x.append(scaleZ[0] * .125)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) * 0.125)
if sigmaY == 0:
y.append(scaleZ[0] * 0.2165)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) * 0.2165)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 4)
elif j == 3:
if sigmaX == 0:
x.append(scaleX[0] / 4)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(0)
else:
z.append(skewedGauss(0, sigmaZ, scaleZ, upperSkewZ) / 4)
elif j in [4, 6]:
if sigmaX == 0:
x.append(scaleX[0] * 0.25)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) * 0.25)
if sigmaY == 0:
y.append(scaleY[0] * 0.433)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) * 0.433)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j == 5:
if sigmaX == 0:
x.append(scaleX[0] / 4)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 4)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j in [7, 9]:
if sigmaX == 0:
x.append(scaleX[0] * 0.10825)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) * 0.10825)
if sigmaY == 0:
y.append(scaleY[0] * 0.2165)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) * 0.2165)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif j == 8:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 4)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 6:
for j in range(7):
if j > 0:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
else:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 7:
for j in range(10):
if j in [1, 3, 4, 5, 8, 9]:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
else:
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(0)
else:
y.append(skewedGauss(0, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 8:
for j in range(7):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 9:
for j in range(8):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 10:
for j in range(7):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
elif shape == 11:
for j in range(7):
if sigmaX == 0:
x.append(scaleX[0] / 2)
else:
x.append(skewedGauss(muX, sigmaX, scaleX, upperSkewX) / 2)
if sigmaY == 0:
y.append(scaleY[0] / 2)
else:
y.append(skewedGauss(muY, sigmaY, scaleY, upperSkewY) / 2)
if sigmaZ == 0:
z.append(scaleZ[0] / 2)
else:
z.append(skewedGauss(muZ, sigmaZ, scaleZ, upperSkewZ) / 2)
# This is for scaling the displacement textures.
# Scale the vertices so that their average is equal to 1 * scale factor.
if scaleDisplace:
averageX = (sum(x) / len(x)) * scale_fac[0]
for i in range(len(x)):
x[i] /= averageX
averageY = (sum(y) / len(y)) * scale_fac[1]
for i in range(len(y)):
y[i] /= averageY
averageZ = (sum(z) / len(z)) * scale_fac[2]
for i in range(len(z)):
z[i] /= averageZ
# Build vertex and face arrays:
if shape == 1:
verts = [(-x[0],-y[0],-z[0]),(x[1],-y[1],-z[1]),(x[2],-y[2],z[2]),
(-x[3],y[3],-z[3]),(x[4],y[4],-z[4]),(x[5],y[5],z[5]),
(x[6],y[6],z[6]),(x[7],y[7],-z[7])]
faces = [[0,1,2],[0,1,7],[3,0,7],[3,4,7],[1,4,7],[3,4,5],[1,2,6],
[1,4,6],[4,5,6],[0,2,6],[0,3,6],[3,5,6]]
elif shape == 2:
verts = [(-x[0],y[0],-z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],-y[4],z[4]),(x[5],y[5],z[5]),
(x[6],y[6],z[6]),(-x[7],y[7],z[7])]
faces = [[0,1,2],[0,2,3],[0,3,7],[0,7,4],[1,4,5],[0,1,4],[5,1,2],
[5,2,6],[3,2,6],[3,6,7],[5,4,7],[5,6,7]]
elif shape == 3:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(x[4],-y[4],z[4]),(x[5],y[5],z[5]),
(-x[6],y[6],z[6]),(-x[7],-y[7],z[7])]
faces = [[0,1,2],[0,2,3],[0,3,6],[0,6,7],[0,7,4],[0,4,1],[5,4,1,2],
[5,6,3,2],[5,4,7,6]]
elif shape == 4:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],-y[4],-z[4]),(x[5],-y[5],-z[5]),
(x[6],y[6],-z[6]),(x[7],y[7],-z[7]),(-x[8],y[8],-z[8]),
(x[9],y[9],-z[9])]
faces = [[0,1,6],[0,6,2],[0,2,7],[0,7,3],[0,3,8],[0,8,4],[0,4,5],
[0,5,1],[1,9,2],[2,9,3],[3,9,4],[4,9,1],[1,6,2],[2,7,3],
[3,8,4],[4,5,1]]
elif shape == 5:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],z[1]),(x[2],y[2],z[2]),
(-x[3],y[3],z[3]),(x[4],-y[4],-z[4]),(x[5],y[5],-z[5]),
(x[6],y[6],-z[6]),(-x[7],y[7],-z[7]),(-x[8],y[8],-z[8]),
(-x[9],-y[9],-z[9])]
faces = [[0,1,2],[0,2,3],[0,3,1],[1,4,5],[1,5,2],[2,5,6],[2,6,7],
[2,7,3],[3,7,8],[3,8,9],[3,9,1],[1,9,4],[4,5,9],[5,6,7],
[7,8,9],[9,5,7]]
elif shape == 6:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],y[4],z[4]),(-x[5],-y[5],z[5]),
(-x[6],-y[6],-z[6])]
faces = [[0,1,2],[0,2,3,4],[0,1,6,5],[0,4,5],[1,2,3,6],[3,4,5,6]]
elif shape == 7:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(x[3],y[3],-z[3]),(-x[4],y[4],-z[4]),(-x[5],y[5],z[5]),
(-x[6],y[6],z[6]),(-x[7],y[7],-z[7]),(-x[8],-y[8],-z[8]),
(-x[9],-y[9],z[9])]
faces = [[0,1,2],[0,2,3],[0,5,6],[0,6,9],[0,1,8,9],[0,3,4,5],
[1,2,7,8],[2,3,4,7],[4,5,6,7],[6,7,8,9]]
elif shape == 8:
verts = [(x[0],y[0],z[0]),(x[1],-y[1],-z[1]),(x[2],y[2],-z[2]),
(-x[3],y[3],-z[3]),(-x[4],-y[4],-z[4]),(-x[5],-y[5],z[5]),
(-x[6],y[6],z[6])]
faces = [[0,2,1],[0,1,4],[0,4,5],[0,5,6],[0,6,3,2],[2,1,4,3],
[3,6,5,4]]
elif shape == 9:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],y[2],z[2]),
(-x[3],-y[3],z[3]),(x[4],-y[4],-z[4]),(x[5],y[5],-z[5]),
(x[6],y[6],z[6]),(x[7],-y[7],z[7])]
faces = [[0,1,6,2],[1,5,7,6],[5,4,3,7],[4,0,2,3],[0,1,5,4],[3,2,6,7]]
elif shape == 10:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],y[2],z[2]),
(x[3],-y[3],z[3]),(x[4],y[4],z[4]),(x[5],y[5],-z[5]),
(x[6],-y[6],-z[6])]
faces = [[0,2,3],[0,3,6],[0,1,5,6],[2,3,4],[0,1,2],[1,2,4,5],[3,4,5,6]]
elif shape == 11:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],y[2],z[2]),
(x[3],-y[3],z[3]),(x[4],y[4],z[4]),(x[5],y[5],-z[5]),
(x[6],-y[6],-z[6])]
faces = [[0,2,3],[0,3,6],[0,1,5,6],[2,3,4],[5,6,3],[1,5,3,4],[0,1,4,2]]
else:
verts = [(-x[0],-y[0],-z[0]),(-x[1],y[1],-z[1]),(-x[2],-y[2],z[2]),
(-x[3],y[3],z[3]),(x[4],-y[4],-z[4]),(x[5],y[5],-z[5]),
(x[6],-y[6],z[6]),(x[7],y[7],z[7])]
faces = [[0,1,3,2],[0,1,5,4],[0,4,6,2],[7,5,4,6],[7,3,2,6],[7,5,1,3]]
## name = "Rock." + str(base + shift).zfill(3)
name = "rock"
# Make object:
obj = createMeshObject(context, verts, [], faces, name)
if scaleDisplace:
## bpy.data.objects[name].scale = Vector((averageX, averageY, averageZ))
obj.object.scale = Vector((averageX, averageY, averageZ))
# For a slight speed bump / Readability:
## mesh = bpy.data.meshes[name]
mesh = obj.object.data
# Apply creasing:
if shape == 0:
for i in range(12):
# todo: "0.375 / 3"? WTF? That = 0.125. . . .
# *** Completed 7/15/2011: Changed second one ***
mesh.edges[i].crease = gauss(0.125, 0.125)
elif shape == 1:
for i in [0, 2]:
mesh.edges[i].crease = gauss(0.5, 0.125)
for i in [6, 9, 11, 12]:
mesh.edges[i].crease = gauss(0.25, 0.05)
for i in [5, 7, 15, 16]:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 2:
for i in range(18):
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 3:
for i in [0, 1, 6, 10, 13]:
mesh.edges[i].crease = gauss(0.25, 0.05)
mesh.edges[8].crease = gauss(0.5, 0.125)
elif shape == 4:
for i in [5, 6, 7, 10, 14, 16, 19, 21]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif shape == 7:
for i in range(18):
if i in [0, 1, 2, 3, 6, 7, 8, 9, 13, 16]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif i in [11,17]:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 8:
for i in range(12):
if i in [0, 3, 8, 9, 10]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif i == 11:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 9:
for i in range(12):
if i in [0, 3, 4, 11]:
mesh.edges[i].crease = gauss(0.5, 0.125)
else:
mesh.edges[i].crease = gauss(0.25, 0.05)
elif shape == 10:
for i in range(12):
if i in [0, 2, 3, 4, 8, 11]:
mesh.edges[i].crease = gauss(0.5, 0.125)
elif i in [1, 5, 7]:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
elif shape == 11:
for i in range(11):
if i in [1, 2, 3, 4, 8, 11]:
mesh.edges[i].crease = gauss(0.25, 0.05)
else:
mesh.edges[i].crease = gauss(0.125, 0.025)
return obj.object
## return name
# Artifically skews a normal (gaussian) distribution. This will not create
# a continuous distribution curve but instead acts as a piecewise finction.
# This linearly scales the output on one side to fit the bounds.
#
# Example output historgrams:
#
# Upper skewed: Lower skewed:
# | ▄ | _
# | █ | █
# | █_ | █
# | ██ | _█
# | _██ | ██
# | _▄███_ | ██ _
# | ▄██████ | ▄██▄█▄_
# | _█▄███████ | ███████
# | _██████████_ | ████████▄▄█_ _
# | _▄▄████████████ | ████████████▄█_
# | _▄_ ▄███████████████▄_ | _▄███████████████▄▄_
# ------------------------- -----------------------
# |mu |mu
# Historgrams were generated in R (http://www.r-project.org/) based on the
# calculations below and manually duplicated here.
#
# param: mu - mu is the mean of the distribution.
# sigma - sigma is the standard deviation of the distribution.
# bounds - bounds[0] is the lower bound and bounds[1]
# is the upper bound.
# upperSkewed - if the distribution is upper skewed.
# return: out - Rondomly generated value from the skewed distribution.
#
# @todo: Because NumPy's random value generators are faster when called
# a bunch of times at once, maybe allow this to generate and return
# multiple values at once?
def skewedGauss(mu, sigma, bounds, upperSkewed=True):
raw = gauss(mu, sigma)
# Quicker to check an extra condition than do unnecessary math. . . .
if raw < mu and not upperSkewed:
out = ((mu - bounds[0]) / (3 * sigma)) * raw + ((mu * (bounds[0] - (mu - 3 * sigma))) / (3 * sigma))
elif raw > mu and upperSkewed:
out = ((mu - bounds[1]) / (3 * -sigma)) * raw + ((mu * (bounds[1] - (mu + 3 * sigma))) / (3 * -sigma))
else:
out = raw
return out
# @todo create a def for generating an alpha and beta for a beta distribution
# given a mu, sigma, and an upper and lower bound. This proved faster in
# profiling in addition to providing a much better distribution curve
# provided multiple iterations happen within this function; otherwise it was
# slower.
# This might be a scratch because of the bounds placed on mu and sigma:
#
# For alpha > 1 and beta > 1:
# mu^2 - mu^3 mu^3 - mu^2 + mu
# ----------- < sigma < ----------------
# 1 + mu 2 - mu
#
##def generateBeta(mu, sigma, scale, repitions=1):
## results = []
##
## return results
# Creates rock objects:
def generateRocks(context, scaleX, skewX, scaleY, skewY, scaleZ, skewZ,
scale_fac, detail, display_detail, deform, rough,
smooth_fac, smooth_it, mat_enable, color, mat_bright,
mat_rough, mat_spec, mat_hard, mat_use_trans, mat_alpha,
mat_cloudy, mat_IOR, mat_mossy, numOfRocks=1, userSeed=1.0,
scaleDisplace=False, randomSeed=True):
global lastRock
newMat = []
sigmaX = 0
sigmaY = 0
sigmaZ = 0
upperSkewX = False
upperSkewY = False
upperSkewZ = False
shift = 0
lastUsedTex = 1
vertexScaling = []
# Seed the random Gaussian value generator:
if randomSeed:
seed(int(time.time()))
else:
seed(userSeed)
if mat_enable:
# Calculate the number of materials to use.
# If less than 10 rocks are being generated, generate one material
# per rock.
# If more than 10 rocks are being generated, generate
# ceil[(1/9)n + (80/9)] materials.
# -> 100 rocks will result in 20 materials
# -> 1000 rocks will result in 120 materials.
if numOfRocks < 10:
numOfMats = numOfRocks
else:
numOfMats = math.ceil((1/9) * numOfRocks + (80/9))
# newMat = generateMaterialsList(numOfMats)
# *** No longer needed on 9/6/2011 ***
# todo Set general material settings:
# *** todo completed 5/25/2011 ***
# Material roughness actual max = 3.14. Needs scaling.
mat_rough *= 0.628
spec_IOR = 1.875 * (mat_spec ** 2) + 7.125 * mat_spec + 1
# Changed as material mapping is no longer needed.
# *** Complete 9/6/2011 ***
for i in range(numOfMats):
newMat.append(bpy.data.materials.new(name = 'stone'))
randomizeMaterial(newMat[i], color, mat_bright,
mat_rough, mat_spec, mat_hard, mat_use_trans,
mat_alpha, mat_cloudy, mat_IOR, mat_mossy,
spec_IOR)
# These values need to be really small to look good.
# So the user does not have to use such ridiculously small values:
deform /= 10
rough /= 100
# Verify that the min really is the min:
if scaleX[1] < scaleX[0]:
scaleX[0], scaleX[1] = scaleX[1], scaleX[0]
if scaleY[1] < scaleY[0]:
scaleY[0], scaleY[1] = scaleY[1], scaleY[0]
if scaleZ[1] < scaleZ[0]:
scaleZ[0], scaleZ[1] = scaleZ[1], scaleZ[0]
# todo: edit below to allow for skewing the distribution
# *** todo completed 4/22/2011 ***
# *** Code now generating "int not scriptable error" in Blender ***
#
# Calculate mu and sigma for a Gaussian distributed random number
# generation:
# If the lower and upper bounds are the same, skip the math.
#
# sigma is the standard deviation of the values. The 95% interval is three
# standard deviations, which is what we want most generated values to fall
# in. Since it might be skewed we are going to use half the difference
# betwee the mean and the furthest bound and scale the other side down
# post-number generation.
if scaleX[0] != scaleX[1]:
skewX = (skewX + 1) / 2
muX = scaleX[0] + ((scaleX[1] - scaleX[0]) * skewX)
if skewX < 0.5:
sigmaX = (scaleX[1] - muX) / 3
else:
sigmaX = (muX - scaleX[0]) / 3
upperSkewX = True
else:
muX = scaleX[0]
if scaleY[0] != scaleY[1]:
skewY = (skewY + 1) / 2
muY = scaleY[0] + ((scaleY[1] - scaleY[0]) * skewY)
if skewY < 0.5:
sigmaY = (scaleY[1] - muY) / 3
else:
sigmaY = (muY - scaleY[0]) / 3
upperSkewY = True
else:
muY = scaleY[0]
if scaleZ[0] != scaleZ[1]:
skewZ = (skewZ + 1) / 2
muZ = scaleZ[0] + ((scaleZ[1] - scaleZ[0]) * skewZ)
if skewZ < 0.5:
sigmaZ = (scaleZ[1] - muZ) / 3
else:
sigmaZ = (muZ - scaleZ[0]) / 3
upperSkewZ = True
else:
muZ = scaleZ
for i in range(numOfRocks):
# todo: enable different random values for each (x,y,z) corrdinate for
# each vertex. This will add additional randomness to the shape of the
# generated rocks.
# *** todo completed 4/19/2011 ***
# *** Code is notably slower at high rock counts ***
rock = generateObject(context, muX, sigmaX, scaleX, upperSkewX, muY,
## name = generateObject(context, muX, sigmaX, scaleX, upperSkewX, muY,
sigmaY, scaleY, upperSkewY, muZ, sigmaZ, scaleZ,
upperSkewZ, i, lastRock, scaleDisplace, scale_fac)
## rock = bpy.data.objects[name]
# todo Map what the two new textures will be:
# This is not working. It works on paper so . . . ???
# *** todo completed on 4/23/2011 ***
# *** todo re-added as the first rock is getting
# 'Texture.001' twice. ***
# *** todo completed on 4/25/2011 ***
# *** Script no longer needs to map new texture names 9/6/2011 ***
# Create the four new textures:
# todo Set displacement texture parameters:
# *** todo completed on 5/31/2011 ***
# Voronoi has been removed from being an option for the fine detail
# texture.
texTypes = ['CLOUDS', 'MUSGRAVE', 'DISTORTED_NOISE', 'STUCCI', 'VORONOI']
newTex = []
# The first texture is to give a more ranodm base shape appearance:
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[1]))
randomizeTexture(newTex[0], 0)
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[4]))
randomizeTexture(newTex[1], 0)
if numpy:
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1)[0] / 2.125))]))
randomizeTexture(newTex[2], 1)
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1)[0] / 2.125))]))
randomizeTexture(newTex[3], 2)
else:
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1) / 2.125))]))
randomizeTexture(newTex[2], 1)
newTex.append(bpy.data.textures.new(name = 'rock_displacement',
type = texTypes[int(round(weibull(1, 1) / 2.125))]))
randomizeTexture(newTex[3], 2)
# Add modifiers:
rock.modifiers.new(name = "Subsurf", type = 'SUBSURF')
rock.modifiers.new(name = "Subsurf", type = 'SUBSURF')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
rock.modifiers.new(name = "Displace", type = 'DISPLACE')
# If smoothing is enabled, allow a little randomness into the
# smoothing factor. Then add the smoothing modifier.
if smooth_fac > 0.0 and smooth_it > 0:
rock.modifiers.new(name = "Smooth", type='SMOOTH')
rock.modifiers[6].factor = gauss(smooth_fac, (smooth_fac ** 0.5) / 12)
rock.modifiers[6].iterations = smooth_it
# Make a call to random to keep things consistant:
else:
gauss(0, 1)
# Set subsurf modifier parameters:
rock.modifiers[0].levels = display_detail
rock.modifiers[0].render_levels = detail
rock.modifiers[1].levels = display_detail
rock.modifiers[1].render_levels = detail
# todo Set displacement modifier parameters:
# *** todo completed on 4/23/2011 ***
# *** toned down the variance on 4/26/2011 ***
# *** added third modifier on 4/28/2011 ***
# *** texture access changed on 9/6/2011 ***
rock.modifiers[2].texture = newTex[0]
rock.modifiers[2].strength = gauss(deform / 100, (1 / 300) * deform)
rock.modifiers[2].mid_level = 0
rock.modifiers[3].texture = newTex[1]
rock.modifiers[3].strength = gauss(deform, (1 / 3) * deform)
rock.modifiers[3].mid_level = 0
rock.modifiers[4].texture = newTex[2]
rock.modifiers[4].strength = gauss(rough * 2, (1 / 3) * rough)
rock.modifiers[5].texture = newTex[3]
rock.modifiers[5].strength = gauss(rough, (1 / 3) * rough)
# Set mesh to be smooth and fix the normals:
utils.smooth(rock.data)
## utils.smooth(bpy.data.meshes[name])
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.editmode_toggle()
if mat_enable:
bpy.ops.object.material_slot_add()
rock.material_slots[0].material = newMat[randint(0, numOfMats - 1)]
# Store the last value of i:
shift = i
# Add the shift to lastRock:
lastRock += shift + 1
return
# Much of the code below is more-or-less imitation of other addons and as such
# I have left it undocumented.
class rocks(bpy.types.Operator):
"""Add rock objects"""
bl_idname = "mesh.rocks"
bl_label = "Add Rocks"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Add rocks"
# Get the preset values from the XML file.
# -> The script was morphed into a Python module
# to support this.
# Tell settings.py to parse the XML file with the settings.
# Then get the default values resulting from the parsing.
# Make a list containing the default values and append to that
# the presets specified in the same XML file. This list will
# be used to load preset values.
settings.parse()
defaults = settings.getDefault()
presetsList = [defaults]
presetsList += settings.getPresetLists()
presets = []
lastPreset = 0
# Build the presets list for the enum property.
# This needs to be a for loop as the user might add presets to
# the XML file and those should show here:
for i in range(len(presetsList)):
value = str(i)
name = presetsList[i][0]
description = name + " preset values."
presets.append((value, name, description))
preset_values = EnumProperty(items = presets,
name = "Presets",
description = "Preset values for some rock types")
num_of_rocks = IntProperty(name = "Number of rocks",
description = "Number of rocks to generate. WARNING: Slow at high values!",
min = 1, max = 1048576,
soft_max = 20,
default = 1)
scale_X = FloatVectorProperty(name = "X scale",
description = "X axis scaling range.",
min = 0.0, max = 256.0, step = 1,
default = defaults[1], size = 2)
skew_X = FloatProperty(name = "X skew",
description = "X Skew ratio. 0.5 is no skew.",
min = -1.0, max = 1.0, default = defaults[4])
scale_Y = FloatVectorProperty(name = "Y scale",
description = "Y axis scaling range.",
min = 0.0, max = 256.0, step = 1,
default = defaults[2], size = 2)
skew_Y = FloatProperty(name = "Y skew",
description = "Y Skew ratio. 0.5 is no skew.",
min = -1.0, max = 1.0, default = defaults[5])
scale_Z = FloatVectorProperty(name = "Z scale",
description = "Z axis scaling range.",
min = 0.0, max = 256.0, step = 1,
default = defaults[3], size = 2)
skew_Z = FloatProperty(name = "Z skew",
description = "Z Skew ratio. 0.5 is no skew.",
min = -1.0, max = 1.0, default = defaults[6])
use_scale_dis = BoolProperty(name = "Scale displace textures",
description = "Scale displacement textures with dimensions. May cause streched textures.",
default = defaults[7])
scale_fac = FloatVectorProperty(name = "Scaling Factor",
description = "XYZ scaling factor. 1 = no scaling.",
min = 0.0001, max = 256.0, step = 0.1,
default = defaults[8], size = 3)
# @todo Possible to title this section "Physical Properties:"?
deform = FloatProperty(name = "Deformation",
description = "Rock deformation",
min = 0.0, max = 1024.0, default = defaults[9])
rough = FloatProperty(name = "Roughness",
description = "Rock roughness",
min = 0.0, max = 1024.0, default = defaults[10])
detail = IntProperty(name = "Detail level",
description = "Detail level. WARNING: Slow at high values!",
min = 1, max = 1024, default = defaults[11])
display_detail = IntProperty(name = "Display Detail",
description = "Display detail. Use a lower value for high numbers of rocks.",
min = 1, max = 128, default = defaults[12])
smooth_fac = FloatProperty(name = "Smooth Factor",
description = "Smoothing factor. A value of 0 disables.",
min = 0.0, max = 128.0, default = defaults[13])
smooth_it = IntProperty(name = "Smooth Iterations",
description = "Smoothing iterations. A value of 0 disables.",
min = 0, max = 128, default = defaults[14])
# @todo Add material properties
mat_enable = BoolProperty(name = "Generate materials",
description = "Generate materials and textures for the rocks",
default = defaults[15])
mat_color = FloatVectorProperty(name = "Color",
description = "Base color settings (RGB)",
min = 0.0, max = 1.0, default = defaults[16], size = 3, subtype = 'COLOR')
mat_bright = FloatProperty(name = "Brightness",
description = "Material brightness",
min = 0.0, max = 1.0, default = defaults[17])
mat_rough = FloatProperty(name = "Roughness",
description = "Material roughness",
min = 0.0, max = 5.0, default = defaults[18])
mat_spec = FloatProperty(name = "Shine",
description = "Material specularity strength",
min = 0.0, max = 1.0, default = defaults[19])
mat_hard = IntProperty(name = "Hardness",
description = "Material hardness",
min = 0, max = 511, default = defaults[20])
mat_use_trans = BoolProperty(name = "Use Transparency",
description = "Enables transparency in rocks (WARNING: SLOW RENDER TIMES)",
default = defaults[21])
mat_alpha = FloatProperty(name = "Alpha",
description = "Transparency of the rocks",
min = 0.0, max = 1.0, default = defaults[22])
mat_cloudy = FloatProperty(name = "Cloudy",
description = "How cloudy the transparent rocks look",
min = 0.0, max = 1.0, default = defaults[23])
mat_IOR = FloatProperty(name = "IoR",
description = "Index of Refraction",
min = 0.25, max = 4.0, soft_max = 2.5,
default = defaults[24])
mat_mossy = FloatProperty(name = "Mossiness",
description = "Amount of mossiness on the rocks",
min = 0.0, max = 1.0, default = defaults[25])
use_generate = BoolProperty(name = "Generate Rocks",
description = "Enable actual generation.",
default = defaults[26])
use_random_seed = BoolProperty(name = "Use a random seed",
description = "Create a seed based on time. Causes user seed to be ignored.",
default = defaults[27])
user_seed = IntProperty(name = "User seed",
description = "Use a specific seed for the generator.",
min = 0, max = 1048576, default = defaults[28])
def draw(self, context):
layout = self.layout
box = layout.box()
box.prop(self, 'num_of_rocks')
box = layout.box()
box.prop(self, 'scale_X')
box.prop(self, 'skew_X')
box.prop(self, 'scale_Y')
box.prop(self, 'skew_Y')
box.prop(self, 'scale_Z')
box.prop(self, 'skew_Z')
box.prop(self, 'use_scale_dis')
if self.use_scale_dis:
box.prop(self, 'scale_fac')
else:
self.scale_fac = utils.toFloats(self.defaults[8])
box = layout.box()
box.prop(self, 'deform')
box.prop(self, 'rough')
box.prop(self, 'detail')
box.prop(self, 'display_detail')
box.prop(self, 'smooth_fac')
box.prop(self, 'smooth_it')
box = layout.box()
box.prop(self, 'mat_enable')
if self.mat_enable:
box.prop(self, 'mat_color')
box.prop(self, 'mat_bright')
box.prop(self, 'mat_rough')
box.prop(self, 'mat_spec')
box.prop(self, 'mat_hard')
box.prop(self, 'mat_use_trans')
if self.mat_use_trans:
box.prop(self, 'mat_alpha')
box.prop(self, 'mat_cloudy')
box.prop(self, 'mat_IOR')
box.prop(self, 'mat_mossy')
box = layout.box()
box.prop(self, 'use_generate')
box.prop(self, 'use_random_seed')
if not self.use_random_seed:
box.prop(self, 'user_seed')
box.prop(self, 'preset_values')
def execute(self, context):
# The following "if" block loads preset values:
if self.lastPreset != int(self.preset_values):
self.scale_X = utils.toFloats(self.presetsList[int(self.preset_values)][1])
self.scale_Y = utils.toFloats(self.presetsList[int(self.preset_values)][2])
self.scale_Z = utils.toFloats(self.presetsList[int(self.preset_values)][3])
self.skew_X = float(self.presetsList[int(self.preset_values)][4])
self.skew_Y = float(self.presetsList[int(self.preset_values)][5])
self.skew_Z = float(self.presetsList[int(self.preset_values)][6])
self.use_scale_dis = bool(self.presetsList[int(self.preset_values)][7])
self.scale_fac = utils.toFloats(self.presetsList[int(self.preset_values)][8])
self.deform = float(self.presetsList[int(self.preset_values)][9])
self.rough = float(self.presetsList[int(self.preset_values)][10])
self.detail = int(self.presetsList[int(self.preset_values)][11])
self.display_detail = int(self.presetsList[int(self.preset_values)][12])
self.smooth_fac = float(self.presetsList[int(self.preset_values)][13])
self.smooth_it = int(self.presetsList[int(self.preset_values)][14])
self.mat_enable = bool(self.presetsList[int(self.preset_values)][15])
self.mat_color = utils.toFloats(self.presetsList[int(self.preset_values)][16])
self.mat_bright = float(self.presetsList[int(self.preset_values)][17])
self.mat_rough = float(self.presetsList[int(self.preset_values)][18])
self.mat_spec = float(self.presetsList[int(self.preset_values)][19])
self.mat_hard = int(self.presetsList[int(self.preset_values)][20])
self.mat_use_trans = bool(self.presetsList[int(self.preset_values)][21])
self.mat_alpha = float(self.presetsList[int(self.preset_values)][22])
self.mat_cloudy = float(self.presetsList[int(self.preset_values)][23])
self.mat_IOR = float(self.presetsList[int(self.preset_values)][24])
self.mat_mossy = float(self.presetsList[int(self.preset_values)][25])
self.use_generate = bool(self.presetsList[int(self.preset_values)][26])
self.use_random_seed = bool(self.presetsList[int(self.preset_values)][27])
self.user_seed = int(self.presetsList[int(self.preset_values)][28])
self.lastPreset = int(self.preset_values)
# todo Add deform, deform_Var, rough, and rough_Var:
# *** todo completed 4/23/2011 ***
# *** Eliminated "deform_Var" and "rough_Var" so the script is not
# as complex to use. May add in again as advanced features. ***
if self.use_generate:
generateRocks(context,
self.scale_X,
self.skew_X,
self.scale_Y,
self.skew_Y,
self.scale_Z,
self.skew_Z,
self.scale_fac,
self.detail,
self.display_detail,
self.deform,
self.rough,
self.smooth_fac,
self.smooth_it,
self.mat_enable,
self.mat_color,
self.mat_bright,
self.mat_rough,
self.mat_spec,
self.mat_hard,
self.mat_use_trans,
self.mat_alpha,
self.mat_cloudy,
self.mat_IOR,
self.mat_mossy,
self.num_of_rocks,
self.user_seed,
self.use_scale_dis,
self.use_random_seed)
return {'FINISHED'}
|
Passtechsoft/TPEAlpGen
|
blender/release/scripts/addons_contrib/add_mesh_rocks/rockgen.py
|
Python
|
gpl-3.0
| 69,120
|
../../../../../../../share/pyshared/ubuntuone-client/ubuntuone/platform/linux/messaging.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/ubuntuone-client/ubuntuone/platform/linux/messaging.py
|
Python
|
gpl-3.0
| 90
|
# Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'lookup_table.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from ecl import EclPrototype
class LookupTable(BaseCClass):
_alloc = EclPrototype("void* lookup_table_alloc_empty()" , bind = False)
_max = EclPrototype("double lookup_table_get_max_value( lookup_table )")
_min = EclPrototype("double lookup_table_get_min_value( lookup_table )")
_arg_max = EclPrototype("double lookup_table_get_max_arg( lookup_table )")
_arg_min = EclPrototype("double lookup_table_get_min_arg( lookup_table )")
_append = EclPrototype("void lookup_table_append( lookup_table , double , double )")
_size = EclPrototype("int lookup_table_get_size( lookup_table )")
_interp = EclPrototype("double lookup_table_interp( lookup_table , double)")
_free = EclPrototype("void lookup_table_free( lookup_table )")
_set_low_limit = EclPrototype("void lookup_table_set_low_limit( lookup_table , double)")
_set_high_limit = EclPrototype("void lookup_table_set_high_limit( lookup_table , double)")
_has_low_limit = EclPrototype("bool lookup_table_has_low_limit( lookup_table)")
_has_high_limit = EclPrototype("bool lookup_table_has_high_limit( lookup_table)")
def __init__(self, lower_limit=None, upper_limit=None):
super(LookupTable, self).__init__(self._alloc())
if not lower_limit is None:
self.setLowerLimit(lower_limit)
if not upper_limit is None:
self.setUpperLimit(upper_limit)
def getMaxValue(self):
self.assertSize(1)
return self._max()
def getMinValue(self):
self.assertSize(1)
return self._min()
def getMinArg(self):
self.assertSize(1)
return self._arg_min()
def getMaxArg(self):
self.assertSize(1)
return self._arg_max()
def assertSize(self, N):
if len(self) < N:
raise ValueError("Lookup table is too small")
def __len__(self):
return self._size()
@property
def size(self):
return len(self)
# Deprecated properties
@property
def max(self):
return self.getMaxValue()
@property
def min(self):
return self.getMinValue()
@property
def arg_max(self):
return self.getMaxArg()
@property
def arg_min(self):
return self.getMinArg()
def setLowerLimit(self, value):
self._set_low_limit(value)
def hasLowerLimit(self):
return self._has_low_limit()
def setUpperLimit(self, value):
self._set_high_limit(value)
def hasUpperLimit(self):
return self._has_high_limit()
def interp(self, x):
self.assertSize(2)
if x < self.getMinArg():
if not self.hasLowerLimit():
raise ValueError("Interpolate argument:%g is outside valid interval: [%g,%g]" % (x, self.getMinArg(), self.getMaxArg()))
elif x > self.getMaxArg():
if not self.hasUpperLimit():
raise ValueError("Interpolate argument:%g is outside valid interval: [%g,%g]" % (x, self.getMinArg(), self.getMaxArg()))
return self._interp(x)
def append(self, x, y):
self._append( x, y)
#todo: necessary???
def __del__(self):
self._free()
def free(self):
self._free( )
|
Statoil/libecl
|
python/ecl/util/util/lookup_table.py
|
Python
|
gpl-3.0
| 4,010
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about object metadata."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW OF METADATA</B>
Objects can have associated metadata, which control aspects of how
GET requests are handled, including Content-Type, Cache-Control,
Content-Disposition, and Content-Encoding (discussed in more detail in
the subsections below). In addition, you can set custom metadata that
can be used by applications (e.g., tagging that particular objects possess
some property).
There are two ways to set metadata on objects:
- at upload time you can specify one or more headers to associate with
objects, using the gsutil -h option. For example, the following command
would cause gsutil to set the Content-Type and Cache-Control for each
of the files being uploaded:
gsutil -h "Content-Type:text/html" \\
-h "Cache-Control:public, max-age=3600" cp -r images \\
gs://bucket/images
Note that -h is an option on the gsutil command, not the cp sub-command.
- You can set or remove metadata fields from already uploaded objects using
the gsutil setmeta command. See "gsutil help setmeta".
More details about specific pieces of metadata are discussed below.
<B>CONTENT TYPE</B>
The most commonly set metadata is Content-Type (also known as MIME type),
which allows browsers to render the object properly.
gsutil sets the Content-Type automatically at upload time, based on each
filename extension. For example, uploading files with names ending in .txt
will set Content-Type to text/plain. If you're running gsutil on Linux or
MacOS and would prefer to have content type set based on naming plus content
examination, see the use_magicfile configuration variable in the gsutil/boto
configuration file (See also "gsutil help config"). In general, using
use_magicfile is more robust and configurable, but is not available on
Windows.
If you specify a Content-Type header with -h when uploading content (like the
example gsutil command given in the previous section), it overrides the
Content-Type that would have been set based on filename extension or content.
This can be useful if the Content-Type detection algorithm doesn't work as
desired for some of your files.
You can also completely suppress content type detection in gsutil, by
specifying an empty string on the Content-Type header:
gsutil -h 'Content-Type:' cp -r images gs://bucket/images
In this case, the Google Cloud Storage service will not attempt to detect
the content type. In general this approach will work better than using
filename extension-based content detection in gsutil, because the list of
filename extensions is kept more current in the server-side content detection
system than in the Python library upon which gsutil content type detection
depends. (For example, at the time of writing this, the filename extension
".webp" was recognized by the server-side content detection system, but
not by gsutil.)
<B>CACHE-CONTROL</B>
Another commonly set piece of metadata is Cache-Control, which allows
you to control whether and for how long browser and Internet caches are
allowed to cache your objects. Cache-Control only applies to objects with
a public-read ACL. Non-public data are not cacheable.
Here's an example of uploading an object set to allow caching:
gsutil -h "Cache-Control:public,max-age=3600" cp -a public-read \\
-r html gs://bucket/html
This command would upload all files in the html directory (and subdirectories)
and make them publicly readable and cacheable, with cache expiration of
one hour.
Note that if you allow caching, at download time you may see older versions
of objects after uploading a newer replacement object. Note also that because
objects can be cached at various places on the Internet there is no way to
force a cached object to expire globally (unlike the way you can force your
browser to refresh its cache).
Another use of the Cache-Control header is through the "no-transform" value,
which instructs Google Cloud Storage to not apply any content transformations
based on specifics of a download request, such as removing gzip
content-encoding for incompatible clients. Note that this parameter is only
respected by the XML API. The Google Cloud Storage JSON API respects only the
no-cache and max-age Cache-Control parameters.
<B>CONTENT-ENCODING</B>
You can specify a Content-Encoding to indicate that an object is compressed
(for example, with gzip compression) while maintaining its Content-Type.
You will need to ensure that the files have been compressed using the
specified Content-Encoding before using gsutil to upload them. Consider the
following example for Linux:
echo "Highly compressible text" | gzip > foo.txt
gsutil -h "Content-Encoding:gzip" -h "Content-Type:text/plain" \\
cp foo.txt gs://bucket/compressed
Note that this is different from uploading a gzipped object foo.txt.gz with
Content-Type: application/x-gzip because most browsers are able to
dynamically decompress and process objects served with Content-Encoding: gzip
based on the underlying Content-Type.
For compressible content, using Content-Encoding: gzip saves network and
storage costs, and improves content serving performance. However, for content
that is already inherently compressed (archives and many media formats, for
instance) applying another level of compression via Content-Encoding is
typically detrimental to both object size and performance and should be
avoided.
Note also that gsutil provides an easy way to cause content to be compressed
and stored with Content-Encoding: gzip: see the -z option in "gsutil help cp".
<B>CONTENT-DISPOSITION</B>
You can set Content-Disposition on your objects, to specify presentation
information about the data being transmitted. Here's an example:
gsutil -h 'Content-Disposition:attachment; filename=filename.ext' \\
cp -r attachments gs://bucket/attachments
Setting the Content-Disposition allows you to control presentation style
of the content, for example determining whether an attachment should be
automatically displayed vs should require some form of action from the user to
open it. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1
for more details about the meaning of Content-Disposition.
<B>CUSTOM METADATA</B>
You can add your own custom metadata (e.g,. for use by your application)
to an object by setting a header that starts with "x-goog-meta", for example:
gsutil -h x-goog-meta-reviewer:jane cp mycode.java gs://bucket/reviews
You can add multiple differently named custom metadata fields to each object.
<B>SETTABLE FIELDS; FIELD VALUES</B>
You can't set some metadata fields, such as ETag and Content-Length. The
fields you can set are:
- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-MD5
- Content-Type
- Any field starting with a matching Cloud Storage Provider
prefix, such as x-goog-meta- (i.e., custom metadata).
Header names are case-insensitive.
x-goog-meta- fields can have data set to arbitrary Unicode values. All
other fields must have ASCII values.
<B>VIEWING CURRENTLY SET METADATA</B>
You can see what metadata is currently set on an object by using:
gsutil ls -L gs://the_bucket/the_object
""")
class CommandOptions(HelpProvider):
"""Additional help about object metadata."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='metadata',
help_name_aliases=[
'cache-control', 'caching', 'content type', 'mime type', 'mime',
'type'],
help_type='additional_help',
help_one_line_summary='Working With Object Metadata',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/gsutil/gslib/addlhelp/metadata.py
|
Python
|
gpl-3.0
| 8,672
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-05 16:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Producao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ano', models.IntegerField()),
('mes', models.IntegerField()),
('area_plantada', models.DecimalField(decimal_places=2, max_digits=10)),
('area_em_producao', models.DecimalField(decimal_places=2, max_digits=10)),
('area_colhida', models.DecimalField(decimal_places=2, max_digits=10)),
('area_em_formacao', models.DecimalField(decimal_places=2, max_digits=10)),
('irrigado', models.BooleanField(default=False)),
('municipio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Municipio')),
],
options={
'ordering': ['ano'],
},
),
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=100)),
('codigo', models.CharField(max_length=20)),
],
options={
'ordering': ['nome'],
},
),
migrations.AddField(
model_name='producao',
name='produto',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='agricultura.Produto'),
),
]
|
LEDS/sipac
|
sipac/agricultura/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 1,901
|
with open('myfile.txt', "w") as myFile:
for a in ["a", "b", "c"]:
myFile.write(str(a))
for a in [1,2,3,4,5,"6"]:
myFile.write(str(a))
try:
myFile.write("ERRRRR")
except:
print("Error, what are you trying to do with closed file")
|
mayankjohri/LetsExplorePython
|
Section 1 - Core Python/Chapter 11 - Exceptions/code/read_file_proper.py
|
Python
|
gpl-3.0
| 263
|
import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import RTMPStream
SWF_URL = "http://www.gaminglive.tv/lib/flowplayer/flash/flowplayer.commercial-3.2.18.swf"
CHANNELS_API_URL = "http://api.gaminglive.tv/channels/{0}"
QUALITY_WEIGHTS = {
"source": 5,
"live": 5,
"1080": 4,
"720": 3,
"480": 2,
"medium": 2,
"360": 1,
"low": 1
}
_url_re = re.compile("""
http(s)?://www\.gaminglive\.tv
/\#/channels/(?P<channel>[^/]+)
""", re.VERBOSE)
_quality_re = re.compile("[^/]+-(?P<quality>[^/]+)")
_channel_schema = validate.Schema(
{
validate.optional("state"): {
"stream": {
"qualities": [validate.text],
"rootUrl": validate.url(scheme="rtmp")
}
}
},
validate.get("state")
)
class GamingLive(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, key):
weight = QUALITY_WEIGHTS.get(key)
if weight:
return weight, "gaminglive"
return Plugin.stream_weight(key)
def _get_quality(self, label):
match = _quality_re.match(label)
if match:
return match.group("quality")
return "live"
def _get_streams(self):
match = _url_re.match(self.url)
channel = match.group("channel")
res = http.get(CHANNELS_API_URL.format(channel))
json = http.json(res, schema=_channel_schema)
if not json:
return
streams = {}
for quality in json["stream"]["qualities"]:
stream_name = self._get_quality(quality)
stream = RTMPStream(self.session, {
"rtmp": json["stream"]["rootUrl"],
"playpath": quality,
"pageUrl": self.url,
"swfVfy": SWF_URL,
"live": True
})
streams[stream_name] = stream
return streams
__plugin__ = GamingLive
|
noam09/kodi
|
script.module.israeliveresolver/lib/livestreamer/plugins/gaminglive.py
|
Python
|
gpl-3.0
| 2,068
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Canonical
#
# Authors:
# Tin Tvrtković
# Jared Ravetch
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Rust module"""
from contextlib import suppress
from gettext import gettext as _
from glob import glob
import logging
import os
import re
from bs4 import BeautifulSoup
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.network.download_center import DownloadItem, DownloadCenter
from umake.tools import get_current_arch, add_env_to_user, ChecksumType, \
MainLoop, Checksum
from umake.ui import UI
logger = logging.getLogger(__name__)
class RustCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Rust", description=_("Rust language"),
logo_path=None)
class RustLang(umake.frameworks.baseinstaller.BaseInstaller):
# Button labels on the download page.
arch_trans = {
"amd64": "64-bit",
"i386": "32-bit"
}
def __init__(self, category):
super().__init__(name="Rust Lang",
description=_("The official Rust distribution"),
is_category_default=True,
category=category, only_on_archs=['i386', 'amd64'],
download_page="https://www.rust-lang.org/downloads.html",
checksum_type=ChecksumType.sha256,
dir_to_decompress_in_tarball="rust-*")
def parse_download_link(self, line, in_download):
"""Parse Rust download link, expect to find a url"""
url, sha1 = (None, None)
arch = get_current_arch()
if "{}-unknown-linux-gnu.tar.gz".format(self.arch_trans[arch]) in line:
in_download = True
if in_download:
p = re.search(r'href="(.*)">', line)
with suppress(AttributeError):
url = p.group(1)
p = re.search(r'<td><tt>(\w+)</tt></td>', line)
with suppress(AttributeError):
sha1 = p.group(1)
if "</tr>" in line:
in_download = False
if url is None and sha1 is None:
return (None, in_download)
return ((url, sha1), in_download)
@MainLoop.in_mainloop_thread
def get_metadata_and_check_license(self, result):
"""Override this so we can use BS and fetch the checksum separately."""
logger.debug("Fetched download page, parsing.")
page = result[self.download_page]
error_msg = page.error
if error_msg:
logger.error("An error occurred while downloading {}: {}".format(self.download_page_url, error_msg))
UI.return_main_screen(status_code=1)
soup = BeautifulSoup(page.buffer, 'html.parser')
link = (soup.find('div', class_="install")
.find('td', class_="inst-type", text="Linux (.tar.gz)")
.parent
.find(text=self.arch_trans[get_current_arch()])
.parent
.parent)
if link is None:
logger.error("Can't parse the download URL from the download page.")
UI.return_main_screen(status_code=1)
download_url = link.attrs['href']
checksum_url = download_url + '.sha256'
logger.debug("Found download URL: " + download_url)
logger.debug("Downloading checksum first, from " + checksum_url)
def checksum_downloaded(results):
checksum_result = next(iter(results.values())) # Just get the first.
if checksum_result.error:
logger.error(checksum_result.error)
UI.return_main_screen(status_code=1)
checksum = checksum_result.buffer.getvalue().decode('utf-8').split()[0]
logger.info('Obtained SHA256 checksum: ' + checksum)
self.download_requests.append(DownloadItem(download_url,
checksum=Checksum(ChecksumType.sha256, checksum),
ignore_encoding=True))
self.start_download_and_install()
DownloadCenter([DownloadItem(checksum_url)], on_done=checksum_downloaded, download=False)
def post_install(self):
"""Add rust necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": "{}:{}".format(os.path.join(self.install_path, "rustc", "bin"),
os.path.join(self.install_path, "cargo", "bin"))},
"LD_LIBRARY_PATH": {"value": os.path.join(self.install_path, "rustc", "lib")}})
# adjust for rust 1.5 some symlinks magic to have stdlib craft available
os.chdir(os.path.join(self.install_path, "rustc", "lib"))
os.rename("rustlib", "rustlib.init")
os.symlink(glob(os.path.join('..', '..', 'rust-std-*', 'lib', 'rustlib'))[0], 'rustlib')
os.symlink(os.path.join('..', 'rustlib.init', 'etc'), os.path.join('rustlib', 'etc'))
UI.delayed_display(DisplayMessage(_("You need to restart your current shell session for your {} installation "
"to work properly").format(self.name)))
|
Tankypon/ubuntu-make
|
umake/frameworks/rust.py
|
Python
|
gpl-3.0
| 5,879
|
# -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import set_cookie
class TurbobitNet(Account):
__name__ = "TurbobitNet"
__type__ = "account"
__version__ = "0.07"
__status__ = "testing"
__description__ = """TurbobitNet account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
def grab_info(self, user, password, data):
html = self.load("http://turbobit.net")
m = re.search(r'<u>Turbo Access</u> to ([\d.]+)', html)
if m is not None:
premium = True
validuntil = time.mktime(time.strptime(m.group(1), "%d.%m.%Y"))
else:
premium = False
validuntil = -1
return {'premium': premium, 'trafficleft': -1, 'validuntil': validuntil}
def signin(self, user, password, data):
set_cookie(self.req.cj, "turbobit.net", "user_lang", "en")
html = self.load("http://turbobit.net/user/login",
post={"user[login]" : user,
"user[pass]" : password,
"user[submit]": "Login"})
if not '<div class="menu-item user-name">' in html:
self.fail_login()
|
jansohn/pyload
|
module/plugins/accounts/TurbobitNet.py
|
Python
|
gpl-3.0
| 1,317
|
from .oauth import BaseOAuth1
class WithingsOAuth(BaseOAuth1):
name = 'withings'
AUTHORIZATION_URL = 'https://developer.health.nokia.com/account/authorize'
REQUEST_TOKEN_URL = 'https://developer.health.nokia.com/account/request_token'
ACCESS_TOKEN_URL = 'https://developer.health.nokia.com/account/access_token'
ID_KEY = 'userid'
def get_user_details(self, response):
"""Return user details from Withings account"""
return {'userid': response['access_token']['userid'],
'email': ''}
|
abhikumar22/MYBLOG
|
blg/Lib/site-packages/social_core/backends/withings.py
|
Python
|
gpl-3.0
| 542
|
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
'View board'
import xml.dom.minidom
from parser import ParserBoard
from tryton.gui.window.view_board.action import Action
class ViewBoard(object):
'View board'
def __init__(self, arch, context=None):
self.context = context
xml_dom = xml.dom.minidom.parseString(arch)
parser = ParserBoard(context)
for node in xml_dom.childNodes:
if not node.nodeType == node.ELEMENT_NODE:
continue
self.widget, self.widgets = parser.parse(node)
break
self.actions = [x for x in self.widgets if isinstance(x, Action)]
for action in self.actions:
action.signal_connect(self, 'active-changed',
self._active_changed)
self.widget.show_all()
self._active_changed(None)
def widget_get(self):
return self.widget
def reload(self):
for widget in self.widgets:
widget.display()
def _active_changed(self, event_action, *args):
for action in self.actions:
if action == event_action:
continue
action.update_domain(self.actions)
|
mediafactory/tryton_client_desktop
|
tryton/gui/window/view_board/view_board.py
|
Python
|
gpl-3.0
| 1,291
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
ST STM32 JTAG protocol decoder.
TODO.
Details:
https://en.wikipedia.org/wiki/STM32
http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/REFERENCE_MANUAL/CD00171190.pdf (e.g. chapter 31.7: "JTAG debug port")
'''
from .pd import *
|
jhol/libsigrokdecode
|
decoders/jtag_stm32/__init__.py
|
Python
|
gpl-3.0
| 1,090
|
import site
import sys
from os import path
def extendsyspath():
# we want to add the current directory and the vendor/lib/python directory
# at the beginning of sys.path
root_dir = path.abspath(path.join(path.dirname(path.abspath(__file__)), "..", ".."))
prev_sys_path = list(sys.path) # make a copy of the list
site.addsitedir(path.join(root_dir, "vendor/lib/python"))
# Now, move the new items to the front of sys.path. (via virtualenv)
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
|
testbhearsum/balrog
|
src/auslib/util/thirdparty.py
|
Python
|
mpl-2.0
| 665
|
from flask import request, Response, jsonify, current_app, session
from flask_oauth import OAuth
from datetime import datetime, timedelta, date
from functools import update_wrapper
from mongokit import cursor
from bson import ObjectId
from models import connection
from config import APIHOST, YT_SERVICE, BYU_WS_ID, BYU_SHARED_SECRET
from urllib2 import Request, urlopen, URLError
import json, byu_ws_sdk, requests, re, os, mimetypes
import time
class NoModelException(Exception):
pass
class OAuthProvider():
client_id=None
client_secret=None
base_url=None
authorize_url=None
request_token_url=None
request_token_params={'scope': None,'response_type': 'code'}
access_token_url=None
token_verify_url=None
access_token_method='POST'
access_token_params={'grant_type': 'authorization_code'}
oauth=OAuth()
remote_app=None
def __init__(self,providerService):
self.remote_app=self.oauth.remote_app(providerService,
base_url=self.base_url,
authorize_url=self.authorize_url,
request_token_url=self.request_token_url,
request_token_params=self.request_token_params,
access_token_url=self.access_token_url,
access_token_method=self.access_token_method,
access_token_params=self.access_token_params,
consumer_key=self.client_id,
consumer_secret=self.client_secret)
def get_remote_app(self):
return self.remote_app
class Resource(object):
collection=connection.test.test
model=collection.TestObject
namespace="hummedia:id/object"
endpoint="test"
bundle=None
request=None
part=None
manual_request={}
disallowed_atts=[]
override_only_triggers=[]
override_only=False
def __init__(self,request=None,bundle=None, client=None,**kwargs):
if bundle:
self.bundle=bundle
self.set_resource()
if client:
self.part=self.client_process()
else:
self.part=self.serialize_bundle(self.bundle)
elif request:
self.request=request
else:
self.manual_request=kwargs
self.set_disallowed_atts()
if request.args.get('inhibitor') in self.override_only_triggers:
self.override_only=True
if not model:
raise NoModelException("You have to declare the model for the resource")
def set_disallowed_atts(self):
pass
def patch(self,id):
from bson.objectid import ObjectId
self.bundle=self.model.find_one({'$or': [{'_id': str(id)}, {'_id': ObjectId(id)}]})
if self.acl_write_check(self.bundle):
setattrs=self.set_attrs()
if setattrs.get("resp")==200:
return self.save_bundle()
else:
return bundle_400(setattrs.get("msg"))
else:
return action_401()
def post(self,id=None):
if self.acl_write_check():
self.bundle=self.model()
self.bundle["_id"]=str(ObjectId(id))
self.preprocess_bundle()
setattrs=self.set_attrs()
if setattrs.get("resp")==200:
return self.save_bundle()
else:
return bundle_400(setattrs.get("msg"))
else:
return action_401()
def put(self,id):
return self.post(id)
def get(self,id,limit=0,projection=None):
q=self.set_query()
if id:
try:
q['_id']=str(ObjectId(id))
except Exception as e:
return bundle_400("The ID you submitted is malformed.")
self.bundle=self.get_bundle(q)
if self.bundle:
self.bundle=self.auth_filter(self.bundle)
if not self.bundle:
return action_401()
self.set_resource()
if self.request.args.get("client",None):
return self.client_process()
else:
return self.serialize_bundle(self.bundle)
else:
return bundle_404()
else:
proj_dict = None
if projection is not None:
proj_dict = {x:1 for x in projection}
self.bundle=self.collection.find(q, proj_dict).limit(limit)
return self.get_list()
def delete(self,id):
if self.acl_write_check():
self.bundle=self.model.find_one({'_id': str(id)})
self.delete_associated(id)
return self.delete_obj()
else:
return action_401()
def acl_read_check(self,obj,username,allowed,is_nested_obj=False,role="student"):
if self.read_override(obj,username,role):
return True
if not self.override_only:
return any([role=="faculty",obj["@graph"].get("dc:coverage") in allowed,username in obj["@graph"].get("dc:rights",{}).get("read",[]),not is_nested_obj and obj["@graph"].get("dc:creator")==username])
return False
def read_override(self,obj,username,role):
return False
def acl_write_check(self,bundle=None):
from auth import get_profile
atts=get_profile()
if atts['superuser'] or (atts['role']=='faculty' and not bundle):
return True
if bundle:
if bundle["@graph"].get("dc:creator")==atts['username'] or atts['username'] in bundle['@graph']["dc:rights"]["write"]:
return True
return False
def auth_filter(self,bundle=None):
from auth import get_profile
atts=get_profile()
if not atts['username']:
filtered_bundle=self.acl_filter(bundle=bundle)
elif not atts['superuser']:
filtered_bundle=self.acl_filter(["public","BYU"],atts['username'],atts['role'],bundle)
else:
filtered_bundle=bundle if bundle else self.bundle
return filtered_bundle
def acl_filter(self,allowed=["public"],username="unauth",role=None,bundle=None):
if not bundle:
bundle=self.bundle
if type(bundle)==cursor.Cursor:
bundle=list(bundle)
for obj in bundle[:]:
if not self.acl_read_check(obj,username,allowed,role=role):
bundle.remove(obj)
elif not self.acl_read_check(bundle,username,allowed,role=role):
bundle={}
return bundle
def retain(self,obj,username):
return False
def get_bundle(self,q):
return self.collection.find_one(q)
def get_list(self):
self.bundle=self.auth_filter()
return mongo_jsonify(list(self.bundle))
def serialize_bundle(self,payload):
return mongo_jsonify(payload)
def set_resource(self):
self.bundle["resource"]=uri_pattern(str(self.bundle["_id"]),APIHOST+"/"+self.endpoint)
def set_attrs(self):
for (k,v) in self.request.json.items():
if k in self.model.structure and k not in self.disallowed_atts:
if self.model.structure[k]==type(2):
self.bundle[k]=int(v)
elif self.model.structure[k]==type(2.0):
self.bundle[k]=float(v)
elif self.model.structure[k]==type(u""):
self.bundle[k]=unicode(v)
elif type(self.model.structure[k])==type([]):
self.bundle[k]=[]
for i in v:
self.bundle[k].append(i)
else:
self.bundle[k]=v
return ({"resp":200})
def preprocess_bundle(self):
pass
def save_bundle(self):
try:
self.bundle.save()
return self.get(self.bundle["_id"])
except Exception as e:
return bundle_400("The request was malformed: %s" % (e))
def client_process(self):
return self.bundle
def set_query(self):
return {}
def delete_obj(self):
try:
self.bundle.delete()
return jsonify({"success":"True"})
except Exception as e:
return bundle_400("The request was malformed: %s" % (e))
def delete_associated(self,id):
pass
def dispatch(self,id):
methods={"GET":self.get,"POST":self.post,"PUT":self.put,"PATCH":self.patch,"DELETE":self.delete}
return methods[self.request.method](id)
class mongokitJSON(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime, date)):
return int(time.mktime(obj.timetuple()))
else:
try:
return json.JSONEncoder.default(self, obj)
except TypeError:
return str(obj)
def get_enrollments():
from auth import get_user, get_userid
if not get_user():
return []
if get_user()=="B3yGtjkIFz":
return["ENGL 999 001 20135","SPAN 999 001 20135"]
if "enrollments" in session:
return session.get('enrollments')
url="https://ws.byu.edu/rest/v1.0/academic/registration/studentschedule/"+get_userid()+"/"+getCurrentSem()
headerVal = byu_ws_sdk.get_http_authorization_header(BYU_WS_ID, BYU_SHARED_SECRET, byu_ws_sdk.KEY_TYPE_API,byu_ws_sdk.ENCODING_NONCE,actor=get_user(),url=url,httpMethod=byu_ws_sdk.HTTP_METHOD_GET,actorInHash=True)
res=requests.get(url, headers={'Authorization': headerVal})
courses=[]
try:
content=json.loads(res.content)['WeeklySchedService']['response']
except ValueError:
content={"schedule_table":[]}
for course in content["schedule_table"]:
courses.append(" ".join((course['course'],course['section'],content['year_term'])))
session['enrollments']=courses
return courses
def is_enrolled(obj):
try:
return not set(get_enrollments()).isdisjoint(obj["@graph"]["dc:relation"])
except (TypeError,KeyError):
return False
def crossdomain(origin=None, methods=None, headers=None, credentials=False,
max_age=21600, attach_to_all=True,
automatic_options=True,nocache=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_origin():
return request.headers.get('Origin')
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = f(*args, **kwargs)
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
if origin=="*":
h['Access-Control-Allow-Origin'] = origin
else:
h['Access-Control-Allow-Origin'] = get_origin()
if credentials:
h['Access-Control-Allow-Credentials'] = "true"
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
if nocache:
h['Last-Modified'] = datetime.now()
h['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'
return resp
f.provide_automatic_options = False
f.required_methods=['OPTIONS']
return update_wrapper(wrapped_function, f)
return decorator
def mongo_jsonify(obj):
return Response(json.dumps(obj, cls=mongokitJSON),status=200,mimetype="application/json")
def bundle_404():
return Response("The object was not found",status=404,mimetype="text/plain")
def endpoint_404():
return Response("That service does not exist",status=404,mimetype="text/plain")
def bundle_400(e):
return Response(e,status=400,mimetype="text/plain")
def action_401():
return Response("You do not have permission to perform that action.",status=401,mimetype="text/plain")
def plain_resp(obj):
return Response(obj,status=200,mimetype="text/plain")
def parse_npt(nptstr):
times=nptstr.split(":")[1]
(start,end)=times.split(",")
end=end if end.strip() else "0"
return {"start":start,"end":end}
def resolve_type(t):
return t.split("/")[-1]
def uri_pattern(id,host=""):
return "%s/%s" % (host,id)
def getYtThumbs(ids=[]):
ytThumbs={}
if len(ids):
req=Request(YT_SERVICE+"&id=%s" % (",".join(ids)))
try:
res=urlopen(req)
j=json.loads(res.read())
res.close()
except:
return dict.fromkeys(ids, {'poster': None, 'thumb': None})
for vid in j["items"]:
ytThumbs[vid["id"]]={"poster":vid["snippet"]["thumbnails"]["high"]["url"],"thumb":vid["snippet"]["thumbnails"]["default"]["url"]}
return ytThumbs
def getCurrentSem():
today=datetime.now()
sem="1"
if today.month in [5,6]:
sem="4" if today.day > 22 else "3"
elif today.month in [7,8]:
sem="5" if today.day > 15 else "4"
elif today.month in [9,10,11,12]:
sem="5"
return str(today.year)+sem
def getVideoInfo(filename):
from subprocess import check_output
cmd = ['avprobe', filename, '-show_streams', '-show_format', '-loglevel', 'quiet']
if check_output('avprobe -version'.split()).find('avprobe 0.8') == -1:
cmd += ['-of','old']
output = check_output(cmd)
# ensure that the data we have is that for the video. doesn't include audio data
stream_match = re.search(
r'\[STREAM\]\s*(.*?codec_type=video.*?)\s*\[/STREAM\]',
output,
re.DOTALL)
format_match = re.search(r'\[FORMAT\]\s*(.*)\[/FORMAT\]', output, re.DOTALL)
lines = stream_match.group(1).splitlines() + format_match.group(1).splitlines()
data = {line.split('=')[0].strip(): line.split('=')[1].strip() for line in lines}
framerate = data['avg_frame_rate'].split('/')
data['framerate'] = round( float(framerate[0]) / float(framerate[1]), 3)
data['bitrate']=data['bit_rate']
return data
|
jlmcdonald/byu-hummedia-api
|
hummedia/helpers.py
|
Python
|
mpl-2.0
| 14,633
|
"""
OCL Organization Views
"""
#import requests
import logging
import json
import re
from django.shortcuts import redirect
from django.http import Http404
from django.views.generic import TemplateView, View
from django.views.generic.edit import FormView
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator
from braces.views import LoginRequiredMixin
from braces.views import JsonRequestResponseMixin
from django.http import HttpResponse
from apps.core.utils import SearchStringFormatter
from .forms import (OrganizationNewForm, OrganizationEditForm)
from .forms import (OrganizationMemberAddForm)
from libs.ocl import OclApi, OclSearch, OclConstants
logger = logging.getLogger('oclweb')
class OrganizationReadBaseView(TemplateView):
"""
Base class for Organization Read views.
"""
def get_org_details(self, org_id):
"""
Get the org details
"""
api = OclApi(self.request, debug=True)
search_result = api.get('orgs', org_id)
if search_result.status_code != 200:
if search_result.status_code == 404:
raise Http404
else:
search_result.raise_for_status()
return search_result.json()
def get_org_members(self, org_id):
"""
Load members of this org
"""
# TODO(paynejd@gmail.com): Access issue, error if user is not super user??
members = []
api = OclApi(self.request, debug=True)
search_results = api.get('orgs', org_id, 'members')
if search_results.status_code == 200:
members = search_results.json()
elif search_results.status_code != 404:
# TODO(paynejd@gmail.com): Confirm whether to pass or raise an exception here
#raise Exception(r.json())
pass
return members
def get_org_sources(self, org_id, search_params=None):
"""
Load org sources from the API and return OclSearch instance with results
"""
# TODO(paynejd@gmail.com): Validate the input parameters
# Perform the search
searcher = OclSearch(search_type=OclConstants.RESOURCE_NAME_SOURCES,
search_scope=OclConstants.SEARCH_SCOPE_RESTRICTED,
params=search_params)
api = OclApi(self.request, debug=True, facets=True)
search_response = api.get('orgs', org_id, 'sources', params=searcher.search_params)
if search_response.status_code == 404:
raise Http404
elif search_response.status_code != 200:
search_response.raise_for_status()
# Process the results
searcher.process_search_results(
search_type=searcher.search_type, search_response=search_response,
search_params=self.request.GET)
return searcher
def get_org_collections(self, org_id, search_params=None):
# Perform the search
searcher = OclSearch(search_type=OclConstants.RESOURCE_NAME_COLLECTIONS,
search_scope=OclConstants.SEARCH_SCOPE_RESTRICTED,
params=search_params)
api = OclApi(self.request, debug=True, facets=True)
search_response = api.get('orgs', org_id, 'collections', params=searcher.search_params)
if search_response.status_code == 404:
raise Http404
elif search_response.status_code != 200:
search_response.raise_for_status()
# Process the results
searcher.process_search_results(
search_type=searcher.search_type, search_response=search_response,
search_params=self.request.GET)
return searcher
class OrganizationDetailsView(OrganizationReadBaseView):
""" Organization details view. """
template_name = "orgs/org_details.html"
def get_context_data(self, *args, **kwargs):
"""
Loads the organization details and its members.
"""
# Set the context
context = super(OrganizationDetailsView, self).get_context_data(*args, **kwargs)
# Load the organization
org_id = self.kwargs.get('org')
org = self.get_org_details(org_id)
# Load members of this org
members = self.get_org_members(org_id)
# Set the context
context['url_params'] = self.request.GET
context['selected_tab'] = 'Details'
context['org'] = org
context['members'] = members
return context
class OrganizationSourcesView(OrganizationReadBaseView):
""" Organization Sources view """
template_name = "orgs/org_sources.html"
def get_context_data(self, *args, **kwargs):
""" Load sources search results, facets/filters, etc. for the org """
context = super(OrganizationSourcesView, self).get_context_data(*args, **kwargs)
# Load the organization
org_id = self.kwargs.get('org')
org = self.get_org_details(org_id)
# Load the sources in this org, applying search parameters
searcher = self.get_org_sources(org_id, search_params=self.request.GET)
search_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_current_page = search_paginator.page(searcher.current_page)
# Set the context for the sources
context['selected_tab'] = 'Sources'
context['org'] = org
context['sources'] = searcher.search_results
context['source_page'] = search_current_page
context['source_pagination_url'] = self.request.get_full_path()
context['source_q'] = self.search_string
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = searcher.get_sort()
context['search_filters'] = searcher.search_filter_list
context['search_type'] = searcher.search_type
context['url_params'] = self.request.GET
context['search_params'] = searcher.search_params
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
return context
def get(self, request, *args, **kwargs):
self.search_string = request.GET.get('q', '')
SearchStringFormatter.add_wildcard(request)
if request.is_ajax():
api = OclApi(self.request, debug=True)
result = api.get('orgs', kwargs.get("org"), "sources", params={'limit':'0'})
return HttpResponse(json.dumps(result.json()), content_type="application/json")
return super(OrganizationSourcesView, self).get(self, *args, **kwargs)
class OrganizationCollectionsView(OrganizationReadBaseView):
""" Organization Collection view """
template_name = "orgs/org_collections.html"
def get_context_data(self, *args, **kwargs):
""" Load sources search results, facets/filters, etc. for the org """
context = super(OrganizationCollectionsView, self).get_context_data(*args, **kwargs)
# Load the organization
org_id = self.kwargs.get('org')
org = self.get_org_details(org_id)
# Load the sources in this org, applying search parameters
searcher = self.get_org_collections(org_id, search_params=self.request.GET)
search_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_current_page = search_paginator.page(searcher.current_page)
# Set the context for the collections
context['selected_tab'] = 'Collections'
context['org'] = org
context['collections'] = searcher.search_results
context['collection_page'] = search_current_page
context['collection_pagination_url'] = self.request.get_full_path()
context['collection_q'] = searcher.get_query()
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = searcher.get_sort()
context['search_filters'] = searcher.search_filter_list
context['search_type'] = searcher.search_type
context['url_params'] = self.request.GET
context['search_params'] = searcher.search_params
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
return context
def get(self, request, *args, **kwargs):
if request.is_ajax():
api = OclApi(self.request, debug=True)
result = api.get('orgs', kwargs.get("org"), "collections", params={'limit': '0'})
return HttpResponse(json.dumps(result.json()), content_type="application/json")
return super(OrganizationCollectionsView, self).get(self, *args, **kwargs)
class OrganizationAboutView(OrganizationReadBaseView):
"""
Organization about page.
"""
template_name = "orgs/org_about.html"
def get_context_data(self, *args, **kwargs):
"""
Loads the org details and about text.
"""
context = super(OrganizationAboutView, self).get_context_data(*args, **kwargs)
# Load the organization
org_id = self.kwargs.get('org')
org = self.get_org_details(org_id)
# Set about text
about = None
if 'extras' in org and isinstance(org['extras'], dict) and 'about' in org['extras']:
about = org['extras'].get('about')
context['about'] = about
# Set the context
context['url_params'] = self.request.GET
context['selected_tab'] = 'About'
context['org'] = org
context['about'] = about
return context
class OrganizationNewView(LoginRequiredMixin, FormView):
"""
View to create new organization
"""
form_class = OrganizationNewForm
template_name = "orgs/org_new.html"
def form_valid(self, form, *args, **kwargs):
"""
Submits the validated form data
"""
api = OclApi(self.request, debug=True)
# Prepare form data for submission, incl. renaming fields as necessary
org_id = form.cleaned_data.pop('short_name')
data = {
'id': org_id,
}
data.update(form.cleaned_data)
if re.compile('^[a-zA-Z0-9\-]+$').match(org_id):
result = api.create_org(data)
# TODO: Catch exceptions that will be raised by Ocl lib.
if result.ok:
messages.add_message(self.request, messages.INFO, _('Organization Added'))
return redirect(reverse('org-details', kwargs={'org': org_id}))
# TODO: Add error messages from API to form.
else:
messages.add_message(self.request, messages.ERROR, result.json()['mnemonic'])
return super(OrganizationNewView, self).form_invalid(form)
else:
validator_template = ' Short Name \'%s\' is not valid. Allowed characters are : Alphabets(a-z,A-Z), Numbers(0-9) and Hyphen(-) '
messages.add_message(self.request, messages.ERROR, validator_template % org_id)
return super(OrganizationNewView, self).form_invalid(form)
# TODO: Implement OrganizationRetireView
class OrganizationRetireView(FormView):
"""
View to retire organization
"""
template_name = 'orgs/org_retire.html'
class OrganizationEditView(FormView):
"""
View to edit organization
"""
template_name = 'orgs/org_edit.html'
def get_form_class(self):
""" Trick to do some initial lookup """
self.org_id = self.kwargs.get('org')
api = OclApi(self.request, debug=True)
self.org = api.get('orgs', self.org_id).json()
return OrganizationEditForm
def get_context_data(self, *args, **kwargs):
"""
Returns the context data for the view
"""
context = super(OrganizationEditView, self).get_context_data(*args, **kwargs)
context['org'] = self.org
return context
def get_initial(self):
return self.org
def form_valid(self, form, *args, **kwargs):
"""
Validates the form data and submits if valid
"""
api = OclApi(self.request, debug=True)
data = {}
data.update(form.cleaned_data)
result = api.update_org(self.org_id, data)
# TODO: Catch exceptions that will be raised by
# Ocl lib.
if result.ok:
messages.add_message(self.request, messages.INFO, _('Organization updated.'))
return redirect(reverse('org-details', kwargs={'org': self.org_id}))
# TODO: Add error messages from API to form.
else:
return super(OrganizationEditView, self).form_invalid(form)
# TODO(paynejd): OrganizationMemberAddView only half works -- fix this
class OrganizationMemberAddView(LoginRequiredMixin, FormView):
"""
View to add member to organization
"""
form_class = OrganizationMemberAddForm
template_name = "orgs/org_member_add.html"
def get_org(self):
"""
Load the organization
"""
self.org_id = self.kwargs.get('org')
api = OclApi(self.request, debug=True)
self.org = api.get('orgs', self.org_id).json()
def get_initial(self):
""" Trick to do some initial lookup """
self.get_org()
data = super(OrganizationMemberAddView, self).get_initial()
return data
def get_context_data(self, *args, **kwargs):
"""
Returns context data for the view
"""
context = super(OrganizationMemberAddView, self).get_context_data(*args, **kwargs)
context['org'] = self.org
return context
def form_valid(self, form, *args, **kwargs):
"""
Validates the form data and submits if valid
"""
print args
print kwargs
self.get_org()
new_username = form.cleaned_data.pop('member_username')
api = OclApi(self.request, debug=True)
result = api.put('orgs', self.org['id'], 'members', new_username)
# TODO: Catch exceptions that will be raised by
# Ocl lib.
if result.status_code == 204:
messages.add_message(self.request, messages.INFO, _('Member Added'))
return redirect(reverse('org-details', kwargs={'org': self.org['id']}))
elif result.status_code == 404:
messages.add_message(self.request, messages.ERROR, 'Member doesn\'t exist')
return super(OrganizationMemberAddView, self).form_invalid(form)
# TODO(paynejd): OrganizationMemberRemoveView only half works -- fix this
class OrganizationMemberRemoveView(LoginRequiredMixin,
JsonRequestResponseMixin, View):
"""
View to remove member from organization
"""
def post(self, *args, **kwargs):
"""Posts member removal request to API"""
self.org_id = self.kwargs.get('org')
self.username = self.kwargs.get('username')
api = OclApi(self.request, debug=True)
result = api.delete('orgs', self.org_id, 'members', self.username)
return self.render_json_response({'message':'Member removed'})
# TODO(paynejd@gmail.com): Retire unreachable code?
#if result.status_code == 204:
# return self.render_json_response({'message':'Member removed'})
#else:
# return self.render_bad_request_response({'message': result.status_code})
class OrgJsonView(View):
def get(self, request, *args, **kwargs):
api = OclApi(self.request, debug=True)
result = api.get('orgs', params={'limit':'0'})
return HttpResponse(json.dumps(result.json()), content_type="application/json")
|
bhawnasingla/ocl_web
|
ocl_web/apps/orgs/views.py
|
Python
|
mpl-2.0
| 15,791
|
from client import Connection, Client
from exceptions import RedisError, ConnectionError, ResponseError, \
InvalidResponse
|
kaze/paasmaker
|
paasmaker/thirdparty/tornadoredis/__init__.py
|
Python
|
mpl-2.0
| 146
|
from superdesk.emails import send_user_status_changed_email, send_activity_emails, send_email, send_translation_changed
from superdesk.tests import TestCase
from unittest.mock import patch
class SendEmailTestCase(TestCase):
def test_send_email(self):
with self.app.app_context():
with self.app.mail.record_messages() as outbox:
assert len(outbox) == 0
send_user_status_changed_email(["test@sd.io"], "created")
assert len(outbox) == 1
assert outbox[0].subject == "Your Superdesk account is created"
def test_send_email_multiline_subject(self):
with self.app.app_context():
with self.app.mail.record_messages() as outbox:
send_email("foo\nbar", "admin@localhost", ["foo@example.com"], "text", "<p>html</p>")
assert len(outbox) == 1
assert outbox[0].subject == "foo"
def test_send_activity_emails_error(self):
recipients = ["foo", "bar"]
activities = [
{"message": "error", "data": {"foo": 1}},
{"message": "error", "data": {"bar": 1}},
]
with patch.object(send_email, "delay", return_value=None) as sent:
with self.app.app_context():
send_activity_emails(activities[0], recipients)
self.assertEqual(1, sent.call_count)
send_activity_emails(activities[0], recipients)
self.assertEqual(1, sent.call_count)
send_activity_emails(activities[1], recipients)
self.assertEqual(2, sent.call_count)
send_activity_emails(activities[1], recipients)
self.assertEqual(2, sent.call_count)
def test_send_translation_changed(self):
item = {"_id": "test_id", "guid": "guid", "headline": "headline test"}
with self.app.app_context():
with self.app.mail.record_messages() as outbox:
assert len(outbox) == 0
send_translation_changed("admin", item, ["test@sd.io"])
assert len(outbox) == 1
assert outbox[0].subject == "The original item headline test has been changed"
link = "http://localhost:9000/#/workspace?item=guid&action=edit"
assert outbox[0].body.find(link) != -1
assert outbox[0].html.find(link) != -1
|
petrjasek/superdesk-core
|
tests/send_email_test.py
|
Python
|
agpl-3.0
| 2,396
|
'''
Created on Jan 9, 2012
@package: superdesk
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Contains the services for superdesk.
'''
from ..plugin.registry import addService
from .db_superdesk import bindSuperdeskSession, bindSuperdeskValidations
from ally.container import support, bind, ioc
from itertools import chain
# --------------------------------------------------------------------
SERVICES = 'superdesk.*.api.**.I*Service'
@ioc.entity
def binders(): return [bindSuperdeskSession]
@ioc.entity
def bindersService(): return list(chain((bindSuperdeskValidations,), binders()))
bind.bindToEntities('superdesk.*.impl.**.*Alchemy', binders=binders)
support.createEntitySetup('superdesk.*.impl.**.*')
support.listenToEntities(SERVICES, listeners=addService(bindersService))
support.loadAllEntities(SERVICES)
# --------------------------------------------------------------------
|
superdesk/Live-Blog
|
plugins/superdesk/__plugin__/superdesk/service.py
|
Python
|
agpl-3.0
| 955
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Taŭga Tecnologia
# Aristides Caldeira <aristides.caldeira@tauga.com.br>
# License AGPL-3 or later (http://www.gnu.org/licenses/agpl)
#
from __future__ import division, print_function, unicode_literals
from odoo import fields, models
class SpedNCM(models.Model):
_name = b'sped.ncm'
_inherit = 'sped.ncm'
ibptax_ids = fields.One2many(
comodel_name='sped.ibptax.ncm',
inverse_name='ncm_id',
string='Alíquotas IBPT',
)
class SpedServico(models.Model):
_name = b'sped.servico'
_inherit = 'sped.servico'
ibptax_ids = fields.One2many(
comodel_name='sped.ibptax.servico',
inverse_name='servico_id',
string='Alíquotas IBPT'
)
class SpedNBS(models.Model):
_name = b'sped.nbs'
_inherit = 'sped.nbs'
ibptax_ids = fields.One2many(
comodel_name='sped.ibptax.nbs',
inverse_name='nbs_id',
string='Alíquotas IBPT'
)
|
odoo-brazil/l10n-brazil-wip
|
sped_imposto/models/sped_ncm_ibptax.py
|
Python
|
agpl-3.0
| 980
|
import zerodb
from zerodb.query import *
from models import Employee
PASSPHRASE = "very insecure passphrase - never use it"
SOCKET = ("localhost", 8001)
db = zerodb.DB(SOCKET, username="root", password=PASSPHRASE)
print("Connected")
print(len(db[Employee]))
johns = db[Employee].query(name="John", limit=10)
print(len(johns))
print(johns)
rich_johns = db[Employee].query(InRange("salary", 195000, 200000), name="John")
print(len(rich_johns))
print(rich_johns)
uk = db[Employee].query(Contains("description", "United Kingdom"))
print(len(uk))
if uk:
print(uk[0])
print(uk[0].description)
|
zero-db/zerodb-server
|
demo/demo.py
|
Python
|
agpl-3.0
| 601
|
"""
Modulestore configuration for test cases.
"""
from uuid import uuid4
from django.test import TestCase
from xmodule.modulestore.django import editable_modulestore, \
clear_existing_modulestores
def mixed_store_config(data_dir, mappings):
"""
Return a `MixedModuleStore` configuration, which provides
access to both Mongo- and XML-backed courses.
`data_dir` is the directory from which to load XML-backed courses.
`mappings` is a dictionary mapping course IDs to modulestores, for example:
{
'MITx/2.01x/2013_Spring': 'xml',
'edx/999/2013_Spring': 'default'
}
where 'xml' and 'default' are the two options provided by this configuration,
mapping (respectively) to XML-backed and Mongo-backed modulestores..
"""
mongo_config = mongo_store_config(data_dir)
xml_config = xml_store_config(data_dir)
store = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': mappings,
'stores': {
'default': mongo_config['default'],
'xml': xml_config['default']
}
}
}
}
store['direct'] = store['default']
return store
def mongo_store_config(data_dir):
"""
Defines default module store using MongoModuleStore.
Use of this config requires mongo to be running.
"""
store = {
'default': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'modulestore_%s' % uuid4().hex,
},
'OPTIONS': {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': data_dir,
'render_template': 'mitxmako.shortcuts.render_to_string'
}
}
}
store['direct'] = store['default']
return store
def draft_mongo_store_config(data_dir):
"""
Defines default module store using DraftMongoModuleStore.
"""
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': data_dir,
'render_template': 'mitxmako.shortcuts.render_to_string'
}
store = {
'default': {
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'modulestore_%s' % uuid4().hex,
},
'OPTIONS': modulestore_options
}
}
store['direct'] = store['default']
return store
def xml_store_config(data_dir):
"""
Defines default module store using XMLModuleStore.
"""
store = {
'default': {
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': data_dir,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
}
store['direct'] = store['default']
return store
def studio_store_config(data_dir):
"""
Defines modulestore structure used by Studio tests.
"""
store_config = {
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'modulestore_%s' % uuid4().hex,
}
options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': data_dir,
'render_template': 'mitxmako.shortcuts.render_to_string',
}
store = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': store_config,
'OPTIONS': options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': store_config,
'OPTIONS': options
},
'draft': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': store_config,
'OPTIONS': options
},
'split': {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': store_config,
'OPTIONS': options
}
}
return store
class ModuleStoreTestCase(TestCase):
"""
Subclass for any test case that uses a ModuleStore.
Ensures that the ModuleStore is cleaned before/after each test.
Usage:
1. Create a subclass of `ModuleStoreTestCase`
2. Use Django's @override_settings decorator to use
the desired modulestore configuration.
For example:
MIXED_CONFIG = mixed_store_config(data_dir, mappings)
@override_settings(MODULESTORE=MIXED_CONFIG)
class FooTest(ModuleStoreTestCase):
# ...
3. Use factories (e.g. `CourseFactory`, `ItemFactory`) to populate
the modulestore with test data.
NOTE:
* For Mongo-backed courses (created with `CourseFactory`),
the state of the course will be reset before/after each
test method executes.
* For XML-backed courses, the course state will NOT
reset between test methods (although it will reset
between test classes)
The reason is: XML courses are not editable, so to reset
a course you have to reload it from disk, which is slow.
If you do need to reset an XML course, use
`clear_existing_modulestores()` directly in
your `setUp()` method.
"""
@staticmethod
def update_course(course, data):
"""
Updates the version of course in the modulestore
with the metadata in 'data' and returns the updated version.
'course' is an instance of CourseDescriptor for which we want
to update metadata.
'data' is a dictionary with an entry for each CourseField we want to update.
"""
store = editable_modulestore('direct')
store.update_metadata(course.location, data)
updated_course = store.get_instance(course.id, course.location)
return updated_course
@staticmethod
def drop_mongo_collection():
"""
If using a Mongo-backed modulestore, drop the collection.
"""
# This will return the mongo-backed modulestore
# even if we're using a mixed modulestore
store = editable_modulestore()
if hasattr(store, 'collection'):
store.collection.drop()
@classmethod
def setUpClass(cls):
"""
Delete the existing modulestores, causing them to be reloaded.
"""
# Clear out any existing modulestores,
# which will cause them to be re-created
# the next time they are accessed.
clear_existing_modulestores()
TestCase.setUpClass()
@classmethod
def tearDownClass(cls):
"""
Drop the existing modulestores, causing them to be reloaded.
Clean up any data stored in Mongo.
"""
# Clean up by flushing the Mongo modulestore
cls.drop_mongo_collection()
# Clear out the existing modulestores,
# which will cause them to be re-created
# the next time they are accessed.
# We do this at *both* setup and teardown just to be safe.
clear_existing_modulestores()
TestCase.tearDownClass()
def _pre_setup(self):
"""
Flush the ModuleStore before each test.
"""
# Flush the Mongo modulestore
ModuleStoreTestCase.drop_mongo_collection()
# Call superclass implementation
super(ModuleStoreTestCase, self)._pre_setup()
def _post_teardown(self):
"""
Flush the ModuleStore after each test.
"""
ModuleStoreTestCase.drop_mongo_collection()
# Call superclass implementation
super(ModuleStoreTestCase, self)._post_teardown()
|
abo-abo/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py
|
Python
|
agpl-3.0
| 8,071
|
# -*- coding: utf-8 -*-
"""Stacked Autoencoder Pretrainer"""
import numpy as np
import tensorflow as tf
from tf_helper import tfMSE, fully_connected_layer
from jupyter_notebook_helper import getRunTime
def buildGraphOfStackedAutoencoder(inDim, variables, nonlinearity=tf.nn.tanh, avoidDeadNeurons = 0.,
lastNonLinearity=tf.nn.tanh):
graph = tf.Graph() #create new graph
with graph.as_default():
inputs = tf.placeholder(tf.float32, [None, inDim], 'inputs')
#with tf.name_scope('fullyConnected'):
layer = inputs
#inDim = inDim
for i, (weights, biases) in enumerate(variables): #[3000x400], [400,]
outDim = len(biases)
curNonLinearity = lastNonLinearity if i+1 == len(variables) else nonlinearity
layer = fully_connected_layer(layer, inDim, outDim, nonlinearity = curNonLinearity, w=weights, b=biases,
avoidDeadNeurons = avoidDeadNeurons)
inDim = outDim
init = tf.global_variables_initializer()
outputs = layer
return graph, init, inputs, outputs
def constructModelFromPretrainedByAutoEncoderStack(hiddenDimLayers, autoencoder, rng, dataProviderClass, config,
inputDim = 3000, batchSize=50):
#preTrainedModels = []
curInputDim = inputDim
curProcessBatch = None
variables = []
#count = 0
for hiddenDimLayer in hiddenDimLayers:
dataProvider = dataProviderClass(
'train', batch_size=batchSize, rng=rng, processBatch = curProcessBatch
)
_, weights, biases, _ = autoencoder(curInputDim, hiddenDimLayer, dataProvider, config=config) #graph, w, b, runTime
variables.append((weights, biases))
graph, init, inputs, outputs = buildGraphOfStackedAutoencoder(inputDim, variables) #recreate it from scratch actually
#count += 1
def processBatchFunc(inputBatch): #this is redefined automatically, no need to do any special code here
"""process batch should calculate fprop for inputBatch"""
with tf.Session(graph=graph, config=config) as sess:
sess.run(init)
(outValue, ) = sess.run([outputs], feed_dict={inputs: inputBatch})
#print "count now is: %d" % count
return outValue
curProcessBatch = processBatchFunc
curInputDim = hiddenDimLayer
return variables
def executeNonLinearAutoencoder(inputOutputDim, hiddenDim, dataProvider, config,
numEpochs=20, errorFunc = tfMSE,
nonLinearLayer1 = tf.nn.tanh, nonLinearLayer2=tf.nn.tanh, printing=True):
#inputOutputDim = dataProvider.inputs.shape[1]
graph = tf.Graph() #create new graph
with graph.as_default():
inputs = tf.placeholder(tf.float32, [None, inputOutputDim], 'inputs')
targets = tf.placeholder(tf.float32, [None, inputOutputDim], 'targets')
with tf.name_scope('fullyConnected'):
hidden_layer = fully_connected_layer(inputs, inputOutputDim, hiddenDim, nonlinearity = nonLinearLayer1)
with tf.name_scope('outputLayer'):
outputs = fully_connected_layer(hidden_layer, hiddenDim, inputOutputDim, nonlinearity = nonLinearLayer2)
with tf.name_scope('error'):
error = errorFunc(outputs, targets)
with tf.name_scope('train_auto_encoder'):
train_step = tf.train.AdamOptimizer().minimize(error)
init = tf.global_variables_initializer()
def trainAutoencoder(train_error, sess):
for step, (input_batch, target_batch) in enumerate(dataProvider):
_, batch_error = sess.run(
[train_step, error],
feed_dict={inputs: input_batch, targets: target_batch})
train_error += batch_error
return train_error
weights, biases = (None, None)
with tf.Session(graph=graph, config=config) as sess:
sess.run(init)
totalRuntime = 0.
prev_errors = np.zeros(3) #after three equalities break the while loop below
train_error = -1
#for e in range(numEpochs):
e = 0
while int(train_error * 1000) != int(np.mean(prev_errors)):
train_error = 0.
train_error, runTime = getRunTime(lambda : trainAutoencoder(train_error, sess))
train_error /= dataProvider.num_batches
totalRuntime += runTime
if printing:
print 'End epoch %02d (%.3f secs): err(train)=%.3f' % (e+1, runTime, train_error)
prev_errors[0] = prev_errors[1]
prev_errors[1] = prev_errors[2]
#prev_errors[2] = np.round(train_error, 3)
prev_errors[2] = int(train_error * 1000)
e += 1
weights, biases = [v.eval() for v in tf.trainable_variables() if "fullyConnected" in v.name]
assert weights.shape[1] == len(biases)
return graph, weights, biases, totalRuntime
|
pligor/predicting-future-product-prices
|
04_time_series_prediction/mylibs/stacked_autoencoder_pretrainer.py
|
Python
|
agpl-3.0
| 5,236
|
# encoding: utf-8
import os.path
import time
import requests
from ..test_common import skipifdev
from ..utils import get_running_krakens
ROOTDIR = os.path.dirname(os.path.abspath(__file__))
instances_names = {'us-wa', 'fr-nw', 'fr-npdc', 'fr-ne-amiens', 'fr-idf', 'fr-cen'}
@skipifdev
def test_upgrade_kraken(duplicated):
platform, fabric = duplicated
with fabric.set_call_tracker('-component.kraken.upgrade_engine_packages',
'-component.kraken.upgrade_monitor_kraken_packages',
'-component.kraken.test_kraken',
'component.kraken.restart_kraken_on_host',
'component.kraken.require_monitor_kraken_started') as data:
value, exception, stdout, stderr = fabric.execute_forked(
'tasks.upgrade_kraken', up_confs=False, supervision=False)
assert exception is None
assert stderr == ''
# upgrades apply on both machines
assert len(data()['upgrade_engine_packages']) == 2
assert len(data()['upgrade_monitor_kraken_packages']) == 2
assert len(data()['require_monitor_kraken_started']) == 2
assert len(data()['restart_kraken_on_host']) == 2 * len(fabric.env.instances)
assert len(set((x[0][1] for x in data()['restart_kraken_on_host']))) == 2
assert set((x[0][0].name for x in data()['restart_kraken_on_host'])) == instances_names
for instance in fabric.env.instances:
assert platform.docker_exec('readlink /srv/kraken/{}/kraken'.format(instance), 'host1') == '/usr/bin/kraken'
assert platform.docker_exec('readlink /srv/kraken/{}/kraken'.format(instance), 'host2') == '/usr/bin/kraken'
@skipifdev
def test_upgrade_kraken_restricted(duplicated):
platform, fabric = duplicated
# patch the eng role (as done in upgrade_all on prod platform)
fabric.env.roledefs['eng'] = fabric.env.eng_hosts_1
with fabric.set_call_tracker('-component.kraken.upgrade_engine_packages',
'-component.kraken.upgrade_monitor_kraken_packages',
'-component.kraken.test_kraken',
'component.kraken.restart_kraken_on_host',
'component.kraken.require_monitor_kraken_started') as data:
value, exception, stdout, stderr = fabric.execute_forked(
'tasks.upgrade_kraken', up_confs=False, supervision=False)
assert exception is None
assert stderr == ''
# upgrades apply only on restricted pool
assert len(data()['upgrade_engine_packages']) == 1
assert len(data()['upgrade_monitor_kraken_packages']) == 1
assert len(data()['require_monitor_kraken_started']) == 1
# kraken restart apply only on restricted pool
assert len(data()['restart_kraken_on_host']) == len(fabric.env.instances)
assert set((x[0][1] for x in data()['restart_kraken_on_host'])) == {fabric.env.eng_hosts_1[0]}
assert set((x[0][0].name for x in data()['restart_kraken_on_host'])) == instances_names
@skipifdev
def test_upgrade_all_load_balancer(duplicated):
platform, fabric = duplicated
fabric.env.use_load_balancer = True
# postgres is really long to warm up !
time.sleep(15)
# most of this test is unitary test: detailed functions are not called, they are traced
with fabric.set_call_tracker('-tasks.check_last_dataset',
'-tasks.upgrade_tyr',
'-tasks.upgrade_kraken',
'-tasks.upgrade_jormungandr',
'-prod_tasks.disable_nodes',
'-prod_tasks.enable_nodes',
'prod_tasks.restart_jormungandr',
'-component.load_balancer._adc_connection',
'-component.load_balancer.disable_node',
'-component.load_balancer.enable_node') as data:
value, exception, stdout, stderr = fabric.execute_forked('tasks.upgrade_all',
check_version=False, check_dead=False)
assert exception is None
assert stderr == ''
assert stdout.count("Executing task 'stop_tyr_beat'") == 1
assert stdout.count("Executing task 'start_tyr_beat'") == 1
# 1 call to component.load_balancer.disable_node by reload_jormun_safe()
assert len(data()['disable_node']) == 1
# 1 call to component.load_balancer.enable_node by reload_jormun_safe()
assert len(data()['enable_node']) == 1
# 4 calls: 1 eng_hosts_1, 1 ws_hosts_1, 1 eng_hosts_2, 1 empty
assert len(data()['disable_nodes']) == 4
for i, x in enumerate((fabric.env.eng_hosts_1, fabric.env.ws_hosts_1, fabric.env.eng_hosts_2, [])):
assert data()['disable_nodes'][i][0][0] == x
# 4 calls: 1 eng_hosts_1, 1 ws_hosts_1, 1 ws_hosts_2, 1 eng_hosts
assert len(data()['enable_nodes']) == 4
for i, x in enumerate((fabric.env.eng_hosts_1, fabric.env.ws_hosts_1, fabric.env.ws_hosts_2, fabric.env.eng_hosts)):
assert data()['enable_nodes'][i][0][0] == x
# 5 calls to restart_jormungandr
assert len(data()['restart_jormungandr']) == 5
# 1 call in first phase with supervision, 1 call in second phase without supervision
assert len(data()['upgrade_kraken']) == 2
assert data()['upgrade_kraken'][0][1].get('supervision') is True
assert data()['upgrade_kraken'][1][1].get('supervision') is None
# 1 call in first phase, 1 call in second phase
assert len(data()['upgrade_jormungandr']) == 2
# only one phase
assert len(data()['upgrade_tyr']) == 1
@skipifdev
def test_remove_instance(duplicated):
platform, fabric = duplicated
# postgres is really long to warm up !
time.sleep(15)
# set up a server for tyr API on host1 and start it
platform.scp(os.path.join(ROOTDIR, 'tyr-api.conf'), '/etc/apache2/conf-enabled/tyr-api.conf', 'host1')
platform.docker_exec('service apache2 restart', 'host1')
assert requests.get('http://{}/v0/instances/us-wa'.format(fabric.env.tyr_url)).json()
assert len(requests.get('http://{}/navitia/v1/status'.format(fabric.env.host1_ip)).json()['regions']) == \
len(instances_names)
value, exception, stdout, stderr = fabric.execute_forked('tasks.remove_instance', 'us-wa')
assert exception is None
assert stderr == ''
assert stdout.count("Executing task 'remove_postgresql_database'") == 1
assert stdout.count("Executing task 'remove_ed_instance'") == 2
assert stdout.count("Executing task 'remove_tyr_instance'") == 2
assert stdout.count("Executing task 'remove_jormungandr_instance'") == 1
assert len(requests.get('http://{}/navitia/v1/status'.format(fabric.env.host1_ip)).json()['regions']) == \
len(instances_names) - 1
assert requests.get('http://{}/v0/instances/us-wa'.format(fabric.env.tyr_url)).json() == []
assert set(get_running_krakens(platform, 'host1')) == instances_names.difference(['us-wa'])
assert set(get_running_krakens(platform, 'host2')) == instances_names.difference(['us-wa'])
assert platform.path_exists('/srv/ed/us-wa', negate=True)
assert platform.path_exists('/srv/ed/data/us-wa', negate=True)
assert platform.path_exists('/srv/ed/data/us-wa/backup', negate=True)
assert platform.path_exists('/etc/tyr.d/us-wa.ini', negate=True)
assert platform.path_exists('/etc/init.d/kraken_us-wa', negate=True)
assert platform.path_exists('/srv/kraken/us-wa', negate=True)
assert platform.path_exists('/etc/jormungandr.d/us-wa.json', negate=True)
@skipifdev
def test_update_instance(duplicated):
platform, fabric = duplicated
# postgres is really long to warm up !
time.sleep(15)
assert len(requests.get('http://{}/navitia/v1/status'.format(fabric.env.host1_ip)).json()['regions']) == \
len(instances_names)
# create a new instance
add_instance = fabric.get_object('instance.add_instance')
add_instance('toto', 'passwd', zmq_socket_port=30004)
with fabric.set_call_tracker('component.kraken.create_eng_instance',
'component.jormungandr.deploy_jormungandr_instance_conf') as data:
value, exception, stdout, stderr = fabric.execute_forked('tasks.update_instance', 'toto')
assert exception is None
time.sleep(10)
assert len(requests.get('http://{}/navitia/v1/status'.format(fabric.env.host1_ip)).json()['regions']) == \
len(instances_names) + 1
assert requests.get('http://{}/monitor-kraken/?instance=toto'.format(fabric.env.host1_ip)).json()
assert requests.get('http://{}/navitia/v1/coverage/toto/status'.format(fabric.env.host1_ip)).json()
assert len(data()['create_eng_instance']) == 1
assert len(data()['deploy_jormungandr_instance_conf']) == 1
@skipifdev
def test_check_last_dataset(duplicated):
platform, fabric = duplicated
platform.scp(os.path.join(ROOTDIR, '../test_tyr/data.zip'), '/srv/ed/data/us-wa/', 'host1')
# wait first binarization by tyr (automatic)
time.sleep(30)
# check that ata.zip as been moved to backup folder
assert platform.path_exists('/srv/ed/data/us-wa/data.zip', 'host1', negate=True)
backup = '/srv/ed/data/us-wa/backup'
folder = platform.docker_exec('ls {}'.format(backup), 'host1').strip()
assert platform.path_exists('{}/{}/data.zip'.format(backup, folder), 'host1')
value, exception, stdout, stderr = fabric.execute_forked('check_last_dataset')
assert exception is None
assert stderr == ''
assert '******** AVAILABLE DATASETS ********\n\x1b[32m/srv/ed/data/us-wa/backup/' in stdout
platform.docker_exec('rm {}/{}/data.zip'.format(backup, folder), 'host1')
value, exception, stdout, stderr = fabric.execute_forked('check_last_dataset')
assert '********* MISSING DATASETS *********\n\x1b[31m/srv/ed/data/us-wa/backup/' in stdout
|
CanalTP/fabric_navitia
|
integration_tests/test_tasks/test_duplicated.py
|
Python
|
agpl-3.0
| 9,940
|
import os
import traceback
from datetime import datetime
from sqlalchemy import (Column, BigInteger, String, DateTime, Boolean,
ForeignKey, Enum, inspect, bindparam, Index, event)
from sqlalchemy.orm import relationship
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.expression import false
from inbox.config import config
from inbox.sqlalchemy_ext.util import JSON, MutableDict, bakery
from inbox.models.mixins import (HasPublicID, HasEmailAddress, HasRunState,
HasRevisions, UpdatedAtMixin,
DeletedAtMixin)
from inbox.models.base import MailSyncBase
from inbox.models.calendar import Calendar
from inbox.scheduling.event_queue import EventQueue
from inbox.providers import provider_info
from nylas.logging.sentry import log_uncaught_errors
from nylas.logging import get_logger
log = get_logger()
# Note, you should never directly create Account objects. Instead you
# should use objects that inherit from this, such as GenericAccount or
# GmailAccount
class Account(MailSyncBase, HasPublicID, HasEmailAddress, HasRunState,
HasRevisions, UpdatedAtMixin, DeletedAtMixin):
API_OBJECT_NAME = 'account'
@property
def provider(self):
"""
A constant, unique lowercase identifier for the account provider
(e.g., 'gmail', 'eas'). Subclasses should override this.
"""
raise NotImplementedError
@property
def verbose_provider(self):
"""
A detailed identifier for the account provider
(e.g., 'gmail', 'office365', 'outlook').
Subclasses may override this.
"""
return self.provider
@property
def category_type(self):
"""
Whether the account is organized by folders or labels
('folder'/ 'label'), depending on the provider.
Subclasses should override this.
"""
raise NotImplementedError
@property
def auth_handler(self):
from inbox.auth.base import handler_from_provider
return handler_from_provider(self.provider)
@property
def provider_info(self):
return provider_info(self.provider)
@property
def thread_cls(self):
from inbox.models.thread import Thread
return Thread
# The default phrase used when sending mail from this account.
name = Column(String(256), nullable=False, server_default='')
# If True, throttle initial sync to reduce resource load
throttled = Column(Boolean, server_default=false())
# if True we sync contacts/events/email
# NOTE: these columns are meaningless for EAS accounts
sync_email = Column(Boolean, nullable=False, default=True)
sync_contacts = Column(Boolean, nullable=False, default=False)
sync_events = Column(Boolean, nullable=False, default=False)
last_synced_contacts = Column(DateTime, nullable=True)
# DEPRECATED
last_synced_events = Column(DateTime, nullable=True)
emailed_events_calendar_id = Column(BigInteger,
ForeignKey('calendar.id',
ondelete='SET NULL',
use_alter=True,
name='emailed_events_cal'),
nullable=True)
_emailed_events_calendar = relationship(
'Calendar', post_update=True,
foreign_keys=[emailed_events_calendar_id])
def create_emailed_events_calendar(self):
if not self._emailed_events_calendar:
calname = "Emailed events"
cal = Calendar(namespace=self.namespace,
description=calname,
uid='inbox',
name=calname,
read_only=True)
self._emailed_events_calendar = cal
@property
def emailed_events_calendar(self):
self.create_emailed_events_calendar()
return self._emailed_events_calendar
@emailed_events_calendar.setter
def emailed_events_calendar(self, cal):
self._emailed_events_calendar = cal
sync_host = Column(String(255), nullable=True)
desired_sync_host = Column(String(255), nullable=True)
# current state of this account
state = Column(Enum('live', 'down', 'invalid'), nullable=True)
# Based on account status, should the sync be running?
# (Note, this is stored via a mixin.)
# This is set to false if:
# - Account credentials are invalid (see mark_invalid())
# - External factors no longer require this account to sync
# The value of this bit should always equal the AND value of all its
# folders and heartbeats.
@property
def sync_enabled(self):
return self.sync_should_run
sync_state = Column(Enum('running', 'stopped', 'killed',
'invalid', 'connerror'),
nullable=True)
_sync_status = Column(MutableDict.as_mutable(JSON), default={},
nullable=True)
@property
def sync_status(self):
d = dict(id=self.id,
email=self.email_address,
provider=self.provider,
is_enabled=self.sync_enabled,
state=self.sync_state,
sync_host=self.sync_host,
desired_sync_host=self.desired_sync_host)
d.update(self._sync_status or {})
return d
@property
def sync_error(self):
return self._sync_status.get('sync_error')
@property
def initial_sync_start(self):
if len(self.folders) == 0 or \
any([f.initial_sync_start is None for f in self.folders]):
return None
return min([f.initial_sync_start for f in self.folders])
@property
def initial_sync_end(self):
if len(self.folders) == 0 \
or any([f.initial_sync_end is None for f in self.folders]):
return None
return max([f.initial_sync_end for f in self.folders])
@property
def initial_sync_duration(self):
if not self.initial_sync_start or not self.initial_sync_end:
return None
return (self.initial_sync_end - self.initial_sync_end).total_seconds()
def update_sync_error(self, error=None):
if error is None:
self._sync_status['sync_error'] = None
else:
error_obj = {
'message': str(error.message)[:3000],
'exception': "".join(traceback.format_exception_only(type(error), error))[:500],
'traceback': traceback.format_exc(20)[:3000]}
self._sync_status['sync_error'] = error_obj
def sync_started(self):
"""
Record transition to started state. Should be called after the
sync is actually started, not when the request to start it is made.
"""
current_time = datetime.utcnow()
# Never run before (vs restarting stopped/killed)
if self.sync_state is None and (
not self._sync_status or
self._sync_status.get('sync_end_time') is None):
self._sync_status['original_start_time'] = current_time
self._sync_status['sync_start_time'] = current_time
self._sync_status['sync_end_time'] = None
self._sync_status['sync_error'] = None
self._sync_status['sync_disabled_reason'] = None
self._sync_status['sync_disabled_on'] = None
self._sync_status['sync_disabled_by'] = None
self.sync_state = 'running'
def enable_sync(self):
""" Tell the monitor that this account should be syncing. """
self.sync_should_run = True
def disable_sync(self, reason):
""" Tell the monitor that this account should stop syncing. """
self.sync_should_run = False
self._sync_status['sync_disabled_reason'] = reason
self._sync_status['sync_disabled_on'] = datetime.utcnow()
self._sync_status['sync_disabled_by'] = os.environ.get('USER',
'unknown')
def mark_invalid(self, reason='invalid credentials', scope='mail'):
"""
In the event that the credentials for this account are invalid,
update the status and sync flag accordingly. Should only be called
after trying to re-authorize / get new token.
"""
if scope == 'calendar':
self.sync_events = False
elif scope == 'contacts':
self.sync_contacts = False
else:
self.disable_sync(reason)
self.sync_state = 'invalid'
def mark_for_deletion(self):
"""
Mark account for deletion
"""
self.disable_sync('account deleted')
self.sync_state = 'stopped'
# Commit this to prevent race conditions
inspect(self).session.commit()
def unmark_for_deletion(self):
self.enable_sync()
self._sync_status = {}
self.sync_state = 'running'
inspect(self).session.commit()
def sync_stopped(self, requesting_host):
"""
Record transition to stopped state. Should be called after the
sync is actually stopped, not when the request to stop it is made.
"""
if requesting_host == self.sync_host:
# Perform a compare-and-swap before updating these values.
# Only if the host requesting to update the account.sync_* attributes
# here still owns the account sync (i.e is account.sync_host),
# the request can proceed.
self.sync_host = None
if self.sync_state == 'running':
self.sync_state = 'stopped'
self._sync_status['sync_end_time'] = datetime.utcnow()
return True
return False
@classmethod
def get(cls, id_, session):
q = bakery(lambda session: session.query(cls))
q += lambda q: q.filter(cls.id == bindparam('id_'))
return q(session).params(id_=id_).first()
@property
def is_killed(self):
return self.sync_state == 'killed'
@property
def is_running(self):
return self.sync_state == 'running'
@property
def is_marked_for_deletion(self):
return self.sync_state in ('stopped', 'killed', 'invalid') and \
self.sync_should_run is False and \
self._sync_status.get('sync_disabled_reason') == 'account deleted'
@property
def should_suppress_transaction_creation(self):
# Only version if new or the `sync_state` has changed.
obj_state = inspect(self)
return not (obj_state.pending or
inspect(self).attrs.sync_state.history.has_changes())
@property
def server_settings(self):
return None
def get_raw_message_contents(self, message):
# Get the raw contents of a message. We do this differently
# for every backend (Gmail, IMAP, EAS), and the best way
# to do this across repos is to make it a method of the
# account class.
raise NotImplementedError
discriminator = Column('type', String(16))
__mapper_args__ = {'polymorphic_identity': 'account',
'polymorphic_on': discriminator}
def should_send_event(obj):
if not isinstance(obj, Account):
return False
inspected_obj = inspect(obj)
hist = inspected_obj.attrs.sync_host.history
if hist.has_changes():
return True
hist = inspected_obj.attrs.desired_sync_host.history
if hist.has_changes():
return True
hist = inspected_obj.attrs.sync_should_run.history
return hist.has_changes()
def already_registered_listener(obj):
return getattr(obj, '_listener_state', None) is not None
def update_listener_state(obj):
obj._listener_state['sync_should_run'] = obj.sync_should_run
obj._listener_state['sync_host'] = obj.sync_host
obj._listener_state['desired_sync_host'] = obj.desired_sync_host
obj._listener_state['sent_event'] = False
@event.listens_for(Session, "after_flush")
def after_flush(session, flush_context):
from inbox.mailsync.service import shared_sync_event_queue_for_zone, SYNC_EVENT_QUEUE_NAME
def send_migration_events(obj_state):
def f(session):
if obj_state['sent_event']:
return
id = obj_state['id']
sync_should_run = obj_state['sync_should_run']
sync_host = obj_state['sync_host']
desired_sync_host = obj_state['desired_sync_host']
try:
if sync_host is not None:
# Somebody is actively syncing this Account, so notify them if
# they should give up the Account.
if not sync_should_run or (sync_host != desired_sync_host and desired_sync_host is not None):
queue_name = SYNC_EVENT_QUEUE_NAME.format(sync_host)
log.info("Sending 'migrate_from' event for Account",
account_id=id, queue_name=queue_name)
EventQueue(queue_name).send_event({'event': 'migrate_from', 'id': id})
return
if not sync_should_run:
# We don't need to notify anybody because the Account is not
# actively being synced (sync_host is None) and sync_should_run is False,
# so just return early.
return
if desired_sync_host is not None:
# Nobody is actively syncing the Account, and we have somebody
# who wants to sync this Account, so notify them.
queue_name = SYNC_EVENT_QUEUE_NAME.format(desired_sync_host)
log.info("Sending 'migrate_to' event for Account",
account_id=id, queue_name=queue_name)
EventQueue(queue_name).send_event({'event': 'migrate_to', 'id': id})
return
# Nobody is actively syncing the Account, and nobody in particular
# wants to sync the Account so notify the shared queue.
shared_queue = shared_sync_event_queue_for_zone(config.get('ZONE'))
log.info("Sending 'migrate' event for Account",
account_id=id, queue_name=shared_queue.queue_name)
shared_queue.send_event({'event': 'migrate', 'id': id})
obj_state['sent_event'] = True
except:
log_uncaught_errors(log, account_id=id, sync_host=sync_host,
desired_sync_host=desired_sync_host)
return f
for obj in session.new:
if isinstance(obj, Account):
if already_registered_listener(obj):
update_listener_state(obj)
else:
obj._listener_state = {'id': obj.id}
update_listener_state(obj)
event.listen(session,
'after_commit',
send_migration_events(obj._listener_state))
for obj in session.dirty:
if not session.is_modified(obj):
continue
if should_send_event(obj):
if already_registered_listener(obj):
update_listener_state(obj)
else:
obj._listener_state = {'id': obj.id}
update_listener_state(obj)
event.listen(session,
'after_commit',
send_migration_events(obj._listener_state))
Index('ix_account_sync_should_run_sync_host', Account.sync_should_run,
Account.sync_host, mysql_length={'sync_host': 191})
|
jobscore/sync-engine
|
inbox/models/account.py
|
Python
|
agpl-3.0
| 15,882
|
# coding=utf-8
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
from flask import request, jsonify, abort, make_response
from flask.ext.login import current_user
import astroprint.users as users
from octoprint.server import restricted_access, SUCCESS, admin_permission, userManager
from octoprint.server.api import api
#~~ user settings
@api.route("/users", methods=["GET"])
@restricted_access
@admin_permission.require(403)
def getUsers():
if userManager is None:
return jsonify(SUCCESS)
return jsonify({"users": userManager.getAllUsers()})
@api.route("/users", methods=["POST"])
@restricted_access
@admin_permission.require(403)
def addUser():
# if userManager is None:
# return jsonify(SUCCESS)
# if "application/json" in request.headers["Content-Type"]:
# data = request.json
# name = data["name"]
# password = data["password"]
# active = data["active"]
# roles = ["user"]
# if "admin" in data.keys() and data["admin"]:
# roles.append("admin")
# try:
# userManager.addUser(name, password, active, roles)
# except users.UserAlreadyExists:
# abort(409)
# return getUsers()
abort(404)
@api.route("/users/<username>", methods=["GET"])
@restricted_access
def getUser(username):
if userManager is None:
return jsonify(SUCCESS)
if current_user is not None and not current_user.is_anonymous and (current_user.get_name() == username or current_user.is_admin()):
user = userManager.findUser(username)
if user is not None:
return jsonify(user.asDict())
else:
abort(404)
else:
abort(403)
@api.route("/users/<username>", methods=["PUT"])
@restricted_access
@admin_permission.require(403)
def updateUser(username):
if userManager is None:
return jsonify(SUCCESS)
user = userManager.findUser(username)
if user is not None:
if "application/json" in request.headers["Content-Type"]:
data = request.json
# change roles
roles = ["user"]
if "admin" in data.keys() and data["admin"]:
roles.append("admin")
userManager.changeUserRoles(username, roles)
# change activation
if "active" in data.keys():
userManager.changeUserActivation(username, data["active"])
return getUsers()
else:
abort(404)
@api.route("/users/<username>", methods=["DELETE"])
@restricted_access
@admin_permission.require(http_exception=403)
def removeUser(username):
if userManager is None:
return jsonify(SUCCESS)
try:
userManager.removeUser(username)
return getUsers()
except users.UnknownUser:
abort(404)
@api.route("/users/<username>/password", methods=["PUT"])
@restricted_access
def changePasswordForUser(username):
if userManager is None:
return jsonify(SUCCESS)
if current_user is not None and not current_user.is_anonymous and (current_user.get_name() == username or current_user.is_admin()):
if "application/json" in request.headers["Content-Type"]:
data = request.json
if "password" in data.keys() and data["password"]:
try:
userManager.changeUserPassword(username, data["password"])
except users.UnknownUser:
return make_response(("Unknown user: %s" % username, 404, []))
return jsonify(SUCCESS)
else:
return make_response(("Forbidden", 403, []))
@api.route("/users/<username>/apikey", methods=["DELETE"])
@restricted_access
def deleteApikeyForUser(username):
if userManager is None:
return jsonify(SUCCESS)
if current_user is not None and not current_user.is_anonymous and (current_user.get_name() == username or current_user.is_admin()):
try:
userManager.deleteApikey(username)
except users.UnknownUser:
return make_response(("Unknown user: %s" % username, 404, []))
return jsonify(SUCCESS)
else:
return make_response(("Forbidden", 403, []))
@api.route("/users/<username>/apikey", methods=["POST"])
@restricted_access
def generateApikeyForUser(username):
if userManager is None:
return jsonify(SUCCESS)
if current_user is not None and not current_user.is_anonymous and (current_user.get_name() == username or current_user.is_admin()):
try:
apikey = userManager.generateApiKey(username)
except users.UnknownUser:
return make_response(("Unknown user: %s" % username, 404, []))
return jsonify({"apikey": apikey})
else:
return make_response(("Forbidden", 403, []))
|
abinashk-inf/AstroBox
|
src/octoprint/server/api/users.py
|
Python
|
agpl-3.0
| 4,325
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _
from webnotes.utils import cstr
no_cache = True
def get_context():
from selling.utils.cart import get_lead_or_customer
party = get_lead_or_customer()
if party.doctype == "Lead":
mobile_no = party.mobile_no
phone = party.phone
else:
mobile_no, phone = webnotes.conn.get_value("Contact", {"email_id": webnotes.session.user,
"customer": party.name}, ["mobile_no", "phone"])
return {
"company_name": cstr(party.customer_name if party.doctype == "Customer" else party.company_name),
"mobile_no": cstr(mobile_no),
"phone": cstr(phone)
}
@webnotes.whitelist()
def update_profile(fullname, password=None, company_name=None, mobile_no=None, phone=None):
from selling.utils.cart import update_party
update_party(fullname, company_name, mobile_no, phone)
if not fullname:
return _("Name is required")
webnotes.conn.set_value("Profile", webnotes.session.user, "first_name", fullname)
webnotes.add_cookies["full_name"] = fullname
return _("Updated")
|
Yellowen/Owrang
|
portal/templates/pages/profile.py
|
Python
|
agpl-3.0
| 1,175
|
import pybullet as p
import time
import pybullet_data
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
fileIO = p.loadPlugin("fileIOPlugin")
if (fileIO >= 0):
#we can have a zipfile (pickup.zip) inside a zipfile (pickup2.zip)
p.executePluginCommand(fileIO, pybullet_data.getDataPath()+"/pickup2.zip", [p.AddFileIOAction, p.ZipFileIO])
p.executePluginCommand(fileIO, "pickup.zip", [p.AddFileIOAction, p.ZipFileIO])
objs = p.loadSDF("pickup/model.sdf")
dobot = objs[0]
p.changeVisualShape(dobot, -1, rgbaColor=[1, 1, 1, 1])
else:
print("fileIOPlugin is disabled.")
p.setPhysicsEngineParameter(enableFileCaching=False)
while (1):
p.stepSimulation()
time.sleep(1. / 240.)
|
nrz/ylikuutio
|
external/bullet3/examples/pybullet/gym/pybullet_examples/fileIOPlugin.py
|
Python
|
agpl-3.0
| 713
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from operator import itemgetter
from openerp.osv import fields, osv
from openerp.tools.translate import _
'''
Report base class, the sub class need todo:
1.Add the default value for the 'type' field
2.Add the search fields that the report need
3.Defined the view xml based on the new class
4.define method run_%type% to report the report line data
5.define _pdf_data() method to return the report name in xml
'''
class ir_translation(osv.osv):
_inherit = "ir.translation"
'''
Execute below SQL to do update manually at the first time deploy:
update ir_translation set is_translated = true where src is not null and src != value
'''
def _is_translated(self, cr, uid, ids, fields, args, context):
res = {}
for trans in self.read(cr, uid, ids, ['src','value'], context=context):
if not trans['value'] or trans['src'] == trans['value']:
res[trans['id']] = False
else:
res[trans['id']]= True
return res
def _is_translated_search(self, cr, uid, obj, name, args, context=None):
'''
@param args: [(u'is_translated', u'=', False)] / [(u'is_translated', u'=', True)]
'''
if not args:
return []
if args[0][2]:
where = 'value is not null and src != value'
else:
#the ids without translation
where = 'value is null or src = value'
cr.execute('SELECT id FROM ir_translation where ' + where)
res = cr.fetchall()
if not res:
return [('id','=',0)]
return [('id','in',map(itemgetter(0), res))]
_columns = {
'is_translated': fields.function(_is_translated, fnct_search=_is_translated_search, string='Is Translated', type='boolean'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
837278709/metro-openerp
|
metro/ir_translation.py
|
Python
|
agpl-3.0
| 2,901
|
"""
Mixin to support editing in Studio.
"""
from __future__ import absolute_import
import six
from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW, module_attr
class StudioEditableBlock(object):
"""
Helper methods for supporting Studio editing of XBlocks.
This class is only intended to be used with an XBlock!
"""
has_author_view = True
def render_children(self, context, fragment, can_reorder=False, can_add=False):
"""
Renders the children of the module with HTML appropriate for Studio. If can_reorder is True,
then the children will be rendered to support drag and drop.
"""
contents = []
for child in self.get_children(): # pylint: disable=no-member
if can_reorder:
context['reorderable_items'].add(child.location)
context['can_add'] = can_add
rendered_child = child.render(StudioEditableModule.get_preview_view_name(child), context)
fragment.add_fragment_resources(rendered_child)
contents.append({
'id': six.text_type(child.location),
'content': rendered_child.content
})
fragment.add_content(self.system.render_template("studio_render_children_view.html", { # pylint: disable=no-member
'items': contents,
'xblock_context': context,
'can_add': can_add,
'can_reorder': can_reorder,
}))
@staticmethod
def get_preview_view_name(block):
"""
Helper method for getting preview view name (student_view or author_view) for a given module.
"""
return AUTHOR_VIEW if has_author_view(block) else STUDENT_VIEW
StudioEditableModule = StudioEditableBlock
class StudioEditableDescriptor(object):
"""
Helper mixin for supporting Studio editing of xmodules.
This class is only intended to be used with an XModule Descriptor. This class assumes that the associated
XModule will have an "author_view" method for returning an editable preview view of the module.
"""
author_view = module_attr(AUTHOR_VIEW)
has_author_view = True
def has_author_view(descriptor):
"""
Returns True if the xmodule linked to the descriptor supports "author_view".
"""
return getattr(descriptor, 'has_author_view', False)
|
ESOedX/edx-platform
|
common/lib/xmodule/xmodule/studio_editable.py
|
Python
|
agpl-3.0
| 2,348
|
import commands
import packetFactory
from packetFactory import SystemMessagePacket
import plugins
maintmode = False
@plugins.CommandHook("maint", "[Admin Only] Toggle maintenance mode", True)
class maintmode(commands.Command):
def call_from_client(self, client):
global maintmode
maintmode = not maintmode
if maintmode:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Maint] Maintenance mode turned on.", 0x3).build())
return
else:
client.send_crypto_packet(packetFactory.SystemMessagePacket("[Maint] Maintenance mode turned off.", 0x3).build())
return
def call_from_console(self):
global maintmode
maintmode = not maintmode
if maintmode:
return "[Maint] Maintenance mode turned on."
else:
return "[Maint] Maintenance mode turned off."
@plugins.PacketHook(0x11, 0x0)
def Maint_check(context, data):
"""
:type context: ShipProxy.ShipProxy
"""
global maintmode
if not maintmode:
return data
context.send_crypto_packet(
SystemMessagePacket(
"The PSO2 or PSO2Proxy server is currently undergoing maintenance. Please try again later.", 0x1
).build()
)
context.transport.loseConnection()
return data
|
alama/PSO2Proxy
|
proxy/plugins/maint.py
|
Python
|
agpl-3.0
| 1,330
|
from mongoengine import *
# connect('edxapp')
class StaticPage(Document):
about = StringField(required=False)
faq = StringField(required=False)
privacy =StringField(required=False)
honor = StringField(required=False)
tos = StringField(required=False)
contact = StringField(required=False)
blog = StringField(required=False)
@classmethod
def get_content(cls):
if len(cls.objects.all()) != 0:
content = cls.objects.all()[0]
else:
content = cls.objects.create()
return content
@classmethod
def update_content(cls, page, content):
try:
obj = cls.objects.all()[0]
setattr(obj, page, content)
obj.save()
return {
'success': True,
'error_msg': ''
}
except Exception as error:
return {
'success': False,
'error_msg': error.message
}
|
synergeticsedx/deployment-wipro
|
lms/djangoapps/static_pages/models.py
|
Python
|
agpl-3.0
| 984
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Ort.polygontype'
db.alter_column(u'projects_ort', 'polygontype', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'Ort.polygontype'
db.alter_column(u'projects_ort', 'polygontype', self.gf('django.db.models.fields.TextField')(default=''))
models = {
u'projects.behoerde': {
'Meta': {'object_name': 'Behoerde'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'projects.bezirk': {
'Meta': {'object_name': 'Bezirk'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'projects.ort': {
'Meta': {'ordering': "('bezeichner',)", 'object_name': 'Ort'},
'adresse': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'beschreibung': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bezeichner': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bezirke': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'orte'", 'symmetrical': 'False', 'to': u"orm['projects.Bezirk']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'polygon': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polygontype': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'projects.verfahren': {
'Meta': {'object_name': 'Verfahren'},
'beschreibung': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'projects.verfahrensschritt': {
'Meta': {'ordering': "('verfahren', 'reihenfolge')", 'object_name': 'Verfahrensschritt'},
'beschreibung': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'hoverIcon': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'reihenfolge': ('django.db.models.fields.IntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'verfahren': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'verfahrensschritte'", 'to': u"orm['projects.Verfahren']"})
},
u'projects.veroeffentlichung': {
'Meta': {'ordering': "('-ende',)", 'object_name': 'Veroeffentlichung'},
'auslegungsstelle': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'beginn': ('django.db.models.fields.DateField', [], {}),
'behoerde': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Behoerde']"}),
'beschreibung': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'ende': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'ort': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'veroeffentlichungen'", 'to': u"orm['projects.Ort']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'verfahrensschritt': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'veroeffentlichungen'", 'to': u"orm['projects.Verfahrensschritt']"}),
'zeiten': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['projects']
|
BuergerbautStadt/bbs-old
|
projects/migrations/0004_auto__chg_field_ort_polygontype.py
|
Python
|
agpl-3.0
| 5,444
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2011 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import l10n_br_zip_search
|
CLVsol/odoo_addons_l10n_br
|
l10n_br_zip/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,413
|
"""Assertions related to event validation"""
import json
import pprint
def assert_event_matches(expected, actual, tolerate=None):
"""
Compare two event dictionaries.
Fail if any discrepancies exist, and output the list of all discrepancies. The intent is to produce clearer
error messages than "{ some massive dict } != { some other massive dict }", instead enumerating the keys that
differ. Produces period separated "paths" to keys in the output, so "context.foo" refers to the following
structure:
{
'context': {
'foo': 'bar' # this key, value pair
}
}
The other key difference between this comparison and `assertEquals` is that it supports differing levels of
tolerance for discrepancies. We don't want to litter our tests full of exact match tests because then anytime we
add a field to all events, we have to go update every single test that has a hardcoded complete event structure in
it. Instead we support making partial assertions about structure and content of the event. So if I say my expected
event looks like this:
{
'event_type': 'foo.bar',
'event': {
'user_id': 10
}
}
This method will raise an assertion error if the actual event either does not contain the above fields in their
exact locations in the hierarchy, or if it does contain them but has different values for them. Note that it will
*not* necessarily raise an assertion error if the actual event contains other fields that are not listed in the
expected event. For example, the following event would not raise an assertion error:
{
'event_type': 'foo.bar',
'referer': 'http://example.com'
'event': {
'user_id': 10
}
}
Note that the extra "referer" field is not considered an error by default.
The `tolerate` parameter takes a set that allows you to specify varying degrees of tolerance for some common
eventing related issues. See the `EventMatchTolerates` class for more information about the various flags that are
supported here.
Example output if an error is found:
Unexpected differences found in structs:
* <path>: not found in actual
* <path>: <expected_value> != <actual_value> (expected != actual)
Expected:
{ <expected event }
Actual:
{ <actual event> }
"<path>" is a "." separated string indicating the key that differed. In the examples above "event.user_id" would
refer to the value of the "user_id" field contained within the dictionary referred to by the "event" field in the
root dictionary.
"""
differences = get_event_differences(expected, actual, tolerate=tolerate)
if len(differences) > 0:
debug_info = [
'',
'Expected:',
block_indent(expected),
'Actual:',
block_indent(actual),
'Tolerating:',
block_indent(EventMatchTolerates.default_if_not_defined(tolerate)),
]
differences = ['* ' + d for d in differences]
message_lines = differences + debug_info
raise AssertionError('Unexpected differences found in structs:\n\n' + '\n'.join(message_lines))
class EventMatchTolerates:
"""
Represents groups of flags that specify the level of tolerance for deviation between an expected event and an actual
event.
These are common event specific deviations that we don't want to handle with special case logic throughout our
tests.
"""
# Allow the "event" field to be a string, currently this is the case for all browser events.
STRING_PAYLOAD = 'string_payload'
# Allow unexpected fields to exist in the top level event dictionary.
ROOT_EXTRA_FIELDS = 'root_extra_fields'
# Allow unexpected fields to exist in the "context" dictionary. This is where new fields that appear in multiple
# events are most commonly added, so we frequently want to tolerate variation here.
CONTEXT_EXTRA_FIELDS = 'context_extra_fields'
# Allow unexpected fields to exist in the "event" dictionary. Typically in unit tests we don't want to allow this
# type of variance since there are typically only a small number of tests for a particular event type.
PAYLOAD_EXTRA_FIELDS = 'payload_extra_fields'
@classmethod
def default(cls):
"""A reasonable set of tolerated variations."""
# NOTE: "payload_extra_fields" is deliberately excluded from this list since we want to detect erroneously added
# fields in the payload by default.
return {
cls.STRING_PAYLOAD,
cls.ROOT_EXTRA_FIELDS,
cls.CONTEXT_EXTRA_FIELDS,
}
@classmethod
def lenient(cls):
"""Allow all known variations."""
return cls.default() | {
cls.PAYLOAD_EXTRA_FIELDS
}
@classmethod
def strict(cls):
"""Allow no variation at all."""
return frozenset()
@classmethod
def default_if_not_defined(cls, tolerates=None):
"""Use the provided tolerance or provide a default one if None was specified."""
if tolerates is None:
return cls.default()
else:
return tolerates
def assert_events_equal(expected, actual):
"""
Strict comparison of two events.
This asserts that every field in the real event exactly matches the expected event.
"""
assert_event_matches(expected, actual, tolerate=EventMatchTolerates.strict())
def get_event_differences(expected, actual, tolerate=None):
"""Given two events, gather a list of differences between them given some set of tolerated variances."""
tolerate = EventMatchTolerates.default_if_not_defined(tolerate)
# Some events store their payload in a JSON string instead of a dict. Comparing these strings can be problematic
# since the keys may be in different orders, so we parse the string here if we were expecting a dict.
if EventMatchTolerates.STRING_PAYLOAD in tolerate:
expected = parse_event_payload(expected)
actual = parse_event_payload(actual)
def should_strict_compare(path):
"""
We want to be able to vary the degree of strictness we apply depending on the testing context.
Some tests will want to assert that the entire event matches exactly, others will tolerate some variance in the
context or root fields, but not in the payload (for example).
"""
if path == [] and EventMatchTolerates.ROOT_EXTRA_FIELDS in tolerate:
return False
elif path == ['event'] and EventMatchTolerates.PAYLOAD_EXTRA_FIELDS in tolerate:
return False
elif path == ['context'] and EventMatchTolerates.CONTEXT_EXTRA_FIELDS in tolerate:
return False
else:
return True
return compare_structs(expected, actual, should_strict_compare=should_strict_compare)
def block_indent(text, spaces=4):
"""
Given a multi-line string, indent every line of it by the given number of spaces.
If `text` is not a string it is formatted using pprint.pformat.
"""
return '\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])
def parse_event_payload(event):
"""
Given an event, parse the 'event' field, if found otherwise 'data' field as a JSON string.
Note that this may simply return the same event unchanged, or return a new copy of the event with the payload
parsed. It will never modify the event in place.
"""
payload_key = 'event' if 'event' in event else 'data'
if payload_key in event and isinstance(event[payload_key], str):
event = event.copy()
try:
event[payload_key] = json.loads(event[payload_key])
except ValueError:
pass
return event
def compare_structs(expected, actual, should_strict_compare=None, path=None):
"""
Traverse two structures to ensure that the `actual` structure contains all of the elements within the `expected`
one.
Note that this performs a "deep" comparison, descending into dictionaries, lists and ohter collections to ensure
that the structure matches the expectation.
If a particular value is not recognized, it is simply compared using the "!=" operator.
"""
if path is None:
path = []
differences = []
if isinstance(expected, dict) and isinstance(actual, dict):
expected_keys = frozenset(list(expected.keys()))
actual_keys = frozenset(list(actual.keys()))
for key in expected_keys - actual_keys:
differences.append('{}: not found in actual'.format(_path_to_string(path + [key])))
if should_strict_compare is not None and should_strict_compare(path):
for key in actual_keys - expected_keys:
differences.append('{}: only defined in actual'.format(_path_to_string(path + [key])))
for key in expected_keys & actual_keys:
child_differences = compare_structs(expected[key], actual[key], should_strict_compare, path + [key])
differences.extend(child_differences)
elif expected != actual:
differences.append('{path}: {a} != {b} (expected != actual)'.format(
path=_path_to_string(path),
a=repr(expected),
b=repr(actual)
))
return differences
def is_matching_event(expected_event, actual_event, tolerate=None):
"""Return True iff the `actual_event` matches the `expected_event` given the tolerances."""
return len(get_event_differences(expected_event, actual_event, tolerate=tolerate)) == 0
def _path_to_string(path):
"""Convert a list of path elements into a single path string."""
return '.'.join(path)
|
eduNEXT/edunext-platform
|
openedx/core/lib/tests/assertions/events.py
|
Python
|
agpl-3.0
| 9,906
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, an open source suite of business apps
# This module copyright (C) 2013-2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Street name and number",
"summary": "Introduces separate fields for street name and street number.",
"version": "8.0.1.0.0",
"author": "Therp BV,Odoo Community Association (OCA)",
"website": "https://github.com/oca/partner-contact",
"category": 'Tools',
"depends": [
'base'
],
"data": [
'views/res_partner.xml',
],
'installable': True,
'license': 'AGPL-3',
'post_init_hook': 'post_init_hook',
}
|
open-synergy/partner-contact
|
partner_street_number/__openerp__.py
|
Python
|
agpl-3.0
| 1,475
|
# handles server side table processing
class ServerSideTable:
"""
This class is used to do server-side processing
on the DataTables table such as paginating, sorting,
filtering(not implemented) etc. This takes the load off
the client-side and reduces the size of data interchanged.
Usage:
ServerSideTable(rows_count, table_rows, header_data_names, request_values)
where,
`rows_count` as number of rows in the table,
`table_rows` as data rows of the table,
`header_data_names` as headers names of the table.
`request_values` must have request arguments values
including the DataTables server-side processing arguments.
Have a look at snp_browser_table() function in
wqflask/wqflask/views.py for reference use.
"""
def __init__(self, rows_count, table_rows, header_data_names, request_values):
self.request_values = request_values
self.sEcho = self.request_values['sEcho']
self.rows_count = rows_count
self.table_rows = table_rows
self.header_data_names = header_data_names
self.sort_rows()
self.paginate_rows()
def sort_rows(self):
"""
Sorts the rows taking in to account the column (or columns) that the
user has selected.
"""
def is_reverse(str_direction):
""" Maps the 'desc' and 'asc' words to True or False. """
return True if str_direction == 'desc' else False
if (self.request_values['iSortCol_0'] != "") and (int(self.request_values['iSortingCols']) > 0):
for i in range(0, int(self.request_values['iSortingCols'])):
column_number = int(self.request_values['iSortCol_' + str(i)])
column_name = self.header_data_names[column_number - 1]
sort_direction = self.request_values['sSortDir_' + str(i)]
self.table_rows = sorted(self.table_rows,
key=lambda x: x[column_name],
reverse=is_reverse(sort_direction))
def paginate_rows(self):
"""
Selects a subset of the filtered and sorted data based on if the table
has pagination, the current page and the size of each page.
"""
def requires_pagination():
""" Check if the table is going to be paginated """
if self.request_values['iDisplayStart'] != "":
if int(self.request_values['iDisplayLength']) != -1:
return True
return False
if not requires_pagination():
return
start = int(self.request_values['iDisplayStart'])
length = int(self.request_values['iDisplayLength'])
# if search returns only one page
if len(self.table_rows) <= length:
# display only one page
self.table_rows = self.table_rows[start:]
else:
limit = -len(self.table_rows) + start + length
if limit < 0:
# display pagination
self.table_rows = self.table_rows[start:limit]
else:
# display last page of pagination
self.table_rows = self.table_rows[start:]
def get_page(self):
output = {}
output['sEcho'] = str(self.sEcho)
output['iTotalRecords'] = str(float('Nan'))
output['iTotalDisplayRecords'] = str(self.rows_count)
output['data'] = self.table_rows
return output
|
zsloan/genenetwork2
|
wqflask/wqflask/server_side.py
|
Python
|
agpl-3.0
| 3,585
|
from django import template
from ecommerce.extensions.offer.utils import format_benefit_value, get_benefit_type
register = template.Library()
@register.filter(name='benefit_discount')
def benefit_discount(benefit):
"""
Format benefit value for display based on the benefit type.
Example:
'100%' if benefit.value == 100.00 and benefit.type == 'Percentage'
'$100.00' if benefit.value == 100.00 and benefit.type == 'Absolute'
Arguments:
benefit (Benefit): Voucher's Benefit.
Returns:
str: String value containing formatted benefit value and type.
"""
return format_benefit_value(benefit)
@register.filter(name='benefit_type')
def benefit_type(benefit):
return get_benefit_type(benefit)
|
edx/ecommerce
|
ecommerce/extensions/offer/templatetags/offer_tags.py
|
Python
|
agpl-3.0
| 758
|
"""
Public interface for self-assessment.
"""
import logging
from django.db import DatabaseError, transaction
from openassessment.assessment.errors import SelfAssessmentInternalError, SelfAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection
from openassessment.assessment.serializers import (InvalidRubric, full_assessment_dict, rubric_from_dict,
serialize_assessments)
from submissions.api import SubmissionNotFoundError, get_submission_and_student
# Assessments are tagged as "self-evaluation"
SELF_TYPE = "SE"
logger = logging.getLogger("openassessment.assessment.api.self")
def submitter_is_finished(submission_uuid, self_requirements): # pylint: disable=unused-argument
"""
Check whether a self-assessment has been completed for a submission.
Args:
submission_uuid (str): The unique identifier of the submission.
self_requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the submitter has assessed their answer
Examples:
>>> submitter_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).exists()
def assessment_is_finished(submission_uuid, self_requirements):
"""
Check whether a self-assessment has been completed. For self-assessment,
this function is synonymous with submitter_is_finished.
Args:
submission_uuid (str): The unique identifier of the submission.
self_requirements (dict): Any attributes of the assessment module required
to determine if this assessment is complete. There are currently
no requirements for a self-assessment.
Returns:
True if the assessment is complete.
Examples:
>>> assessment_is_finished('222bdf3d-a88e-11e3-859e-040ccee02800', {})
True
"""
return submitter_is_finished(submission_uuid, self_requirements)
def get_score(submission_uuid, self_requirements): # pylint: disable=unused-argument
"""
Get the score for this particular assessment.
Args:
submission_uuid (str): The unique identifier for the submission
self_requirements (dict): Not used.
Returns:
A dictionary with the points earned, points possible, and
contributing_assessments information, along with a None staff_id.
Examples:
>>> get_score('222bdf3d-a88e-11e3-859e-040ccee02800', {})
{
'points_earned': 5,
'points_possible': 10
}
"""
assessment = get_assessment(submission_uuid)
if not assessment:
return None
return {
"points_earned": assessment["points_earned"],
"points_possible": assessment["points_possible"],
"contributing_assessments": [assessment["id"]],
"staff_id": None,
}
def create_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at=None
):
"""
Create a self-assessment for a submission.
Args:
submission_uuid (str): The unique identifier for the submission being assessed.
user_id (str): The ID of the user creating the assessment.
This must match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option values selected.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): Serialized Rubric model.
Keyword Arguments:
scored_at (datetime): The timestamp of the assessment; defaults to the current time.
Returns:
dict: serialized Assessment model
Raises:
SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
"""
# Check that there are not any assessments for this submission
if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
"because another self-assessment already exists for that submission."
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError(msg)
# Check that the student is allowed to assess this submission
try:
submission = get_submission_and_student(submission_uuid)
if submission['student_item']['student_id'] != user_id:
msg = (
u"Cannot submit a self-assessment for the submission {uuid} "
u"because it was created by another learner "
u"(submission learner ID {student_id} does not match your "
u"learner id {other_id})"
).format(
uuid=submission_uuid,
student_id=submission['student_item']['student_id'],
other_id=user_id
)
raise SelfAssessmentRequestError(msg)
except SubmissionNotFoundError:
msg = (
"Could not submit a self-assessment because no submission "
"exists with UUID {uuid}"
).format(uuid=submission_uuid)
raise SelfAssessmentRequestError()
try:
assessment = _complete_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at
)
_log_assessment(assessment, submission)
except InvalidRubric as ex:
msg = "Invalid rubric definition: " + str(ex)
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except InvalidRubricSelection as ex:
msg = "Selected options do not match the rubric: " + str(ex)
logger.warning(msg, exc_info=True)
raise SelfAssessmentRequestError(msg)
except DatabaseError:
error_message = (
u"Error creating self assessment for submission {}"
).format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
# Return the serialized assessment
return full_assessment_dict(assessment)
@transaction.atomic
def _complete_assessment(
submission_uuid,
user_id,
options_selected,
criterion_feedback,
overall_feedback,
rubric_dict,
scored_at
):
"""
Internal function for creating an assessment and its parts atomically.
Args:
submission_uuid (str): The unique identifier for the submission being
assessed.
user_id (str): The ID of the user creating the assessment. This must
match the ID of the user who made the submission.
options_selected (dict): Mapping of rubric criterion names to option
values selected.
criterion_feedback (dict): Dictionary mapping criterion names to the
free-form text feedback the user gave for the criterion.
Since criterion feedback is optional, some criteria may not appear
in the dictionary.
overall_feedback (unicode): Free-form text feedback on the submission overall.
rubric_dict (dict): Serialized Rubric model.
scored_at (datetime): The timestamp of the assessment.
Returns:
Assessment model
"""
# Get or create the rubric
rubric = rubric_from_dict(rubric_dict)
# Create the self assessment
assessment = Assessment.create(
rubric,
user_id,
submission_uuid,
SELF_TYPE,
scored_at=scored_at,
feedback=overall_feedback
)
# This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
return assessment
def get_assessment(submission_uuid):
"""
Retrieve a self-assessment for a submission_uuid.
Args:
submission_uuid (str): The submission UUID for we want information for
regarding self assessment.
Returns:
assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed)
If multiple submissions or self-assessments are found, returns the most recent one.
Raises:
SelfAssessmentRequestError: submission_uuid was invalid.
"""
# Retrieve assessments for the submission UUID
# We weakly enforce that number of self-assessments per submission is <= 1,
# but not at the database level. Someone could take advantage of the race condition
# between checking the number of self-assessments and creating a new self-assessment.
# To be safe, we retrieve just the most recent submission.
serialized_assessments = serialize_assessments(Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1])
if not serialized_assessments:
logger.info(
u"No self-assessment found for submission {}".format(submission_uuid)
)
return None
serialized_assessment = serialized_assessments[0]
logger.info(u"Retrieved self-assessment for submission {}".format(submission_uuid))
return serialized_assessment
def get_assessment_scores_by_criteria(submission_uuid):
"""Get the median score for each rubric criterion
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median score.
Returns:
(dict): A dictionary of rubric criterion names, with a median score of
the peer assessments.
Raises:
SelfAssessmentInternalError: If any error occurs while retrieving
information to form the median scores, an error is raised.
"""
try:
# This will always create a list of length 1
assessments = list(
Assessment.objects.filter(
score_type=SELF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1]
)
scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores)
except DatabaseError:
error_message = (
u"Error getting self assessment scores for submission {}"
).format(submission_uuid)
logger.exception(error_message)
raise SelfAssessmentInternalError(error_message)
def _log_assessment(assessment, submission):
"""
Log the creation of a self-assessment.
Args:
assessment (Assessment): The assessment model.
submission (dict): The serialized submission model.
Returns:
None
"""
logger.info(
u"Created self-assessment {assessment_id} for learner {user} on "
u"submission {submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}"
.format(
assessment_id=assessment.id,
user=submission['student_item']['student_id'],
submission_uuid=submission['uuid'],
course_id=submission['student_item']['course_id'],
item_id=submission['student_item']['item_id'],
rubric_content_hash=assessment.rubric.content_hash
)
)
|
Stanford-Online/edx-ora2
|
openassessment/assessment/api/self.py
|
Python
|
agpl-3.0
| 12,025
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PpopenAt(MakefilePackage):
"""ppOpen-AT is a part of the ppOpenHPC"""
homepage = "http://ppopenhpc.cc.u-tokyo.ac.jp/ppopenhpc/"
git = "https://github.com/Post-Peta-Crest/ppOpenHPC.git"
version('master', branch='AT')
def edit(self, spec, prefix):
makefile_in = FileFilter('Makefile.in')
makefile_in.filter('gcc', spack_cxx)
makefile_in.filter('~/ppohAT_1.0.0', prefix)
makefile_in.filter('mkdir', 'mkdir -p')
mkdirp('bin')
def install(self, spec, prefix):
make('install')
install_tree('examples', prefix.examples)
install_tree('doc', prefix.doc)
|
iulian787/spack
|
var/spack/repos/builtin/packages/ppopen-at/package.py
|
Python
|
lgpl-2.1
| 864
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import router
class Connection(object):
"""The connection object pairs a backend with an individual
identity (phone number, nickname, email, etc) so application
authors need not concern themselves with backends."""
def __init__(self, backend, identity):
# Ok, connections should be able to find their backend even
# for a name.
#
# NOTE: The whole 'finding' a backend from a stored 'slug'
# is _messed_ and probably shouldn't be here, but putting it
# here for now at least encapsulates it in core where it
# should be instead of floating around everyone's app
#
if isinstance(backend,basestring):
# try to find it from the router
backend=router.get_router().get_backend(backend)
if backend is None:
raise Exception(\
'%s is not a valid backend, did you add it to your rapidsms.ini?' % backend)
self.backend = backend
# unique identity with respect to backend
# (usually phone number, but may be a port number
# or email address, etc)
self.identity = identity
def fork (self, identity):
"""Create a new connection on the same backend to a different
identity."""
return type(self)(self.backend, identity)
|
ewheeler/rapidsms-core
|
lib/rapidsms/connection.py
|
Python
|
lgpl-3.0
| 1,406
|
import sys
def setup():
return
def run(core, actor, target, commandString):
newState = actor.toggleGroupDance()
msg = '@performance:dance_group_on' if newState else '@performance:dance_group_off'
actor.sendSystemMessage(msg, 0)
return
|
agry/NGECore2
|
scripts/commands/groupdance.py
|
Python
|
lgpl-3.0
| 258
|
"""
Unit tests for stem.descriptor.hidden_service_descriptor.
"""
import datetime
import functools
import unittest
import stem.descriptor
import stem.prereq
import test.require
from stem.descriptor.hidden_service_descriptor import (
REQUIRED_FIELDS,
DecryptionFailure,
HiddenServiceDescriptor,
)
from test.unit.descriptor import (
get_resource,
base_expect_invalid_attr,
base_expect_invalid_attr_for_text,
)
MESSAGE_BLOCK = """
-----BEGIN MESSAGE-----
%s
-----END MESSAGE-----\
"""
EXPECTED_DDG_PERMANENT_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAJ/SzzgrXPxTlFrKVhXh3buCWv2QfcNgncUpDpKouLn3AtPH5Ocys0jE
aZSKdvaiQ62md2gOwj4x61cFNdi05tdQjS+2thHKEm/KsB9BGLSLBNJYY356bupg
I5gQozM65ENelfxYlysBjJ52xSDBd8C4f/p9umdzaaaCmzXG/nhzAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
EXPECTED_DDG_INTRODUCTION_POINTS_ENCODED = """\
-----BEGIN MESSAGE-----
aW50cm9kdWN0aW9uLXBvaW50IGl3a2k3N3h0YnZwNnF2ZWRmcndkem5jeHMzY2th
eWV1CmlwLWFkZHJlc3MgMTc4LjYyLjIyMi4xMjkKb25pb24tcG9ydCA0NDMKb25p
b24ta2V5Ci0tLS0tQkVHSU4gUlNBIFBVQkxJQyBLRVktLS0tLQpNSUdKQW9HQkFL
OTRCRVlJSFo0S2RFa2V5UGhiTENwUlc1RVNnKzJXUFFock00eXVLWUd1cTh3Rldn
dW1aWVI5CmsvV0EvL0ZZWE1CejBiQitja3Vacy9ZdTluSytITHpwR2FwVjBjbHN0
NEdVTWNCSW5VQ3pDY3BqSlRRc1FEZ20KMy9ZM2NxaDBXNTVnT0NGaG9tUTQvMVdP
WWc3WUNqazRYWUhKRTIwT2RHMkxsNXpvdEs2ZkFnTUJBQUU9Ci0tLS0tRU5EIFJT
QSBQVUJMSUMgS0VZLS0tLS0Kc2VydmljZS1rZXkKLS0tLS1CRUdJTiBSU0EgUFVC
TElDIEtFWS0tLS0tCk1JR0pBb0dCQUpYbUpiOGxTeWRNTXFDZ0NnZmd2bEIyRTVy
cGQ1N2t6L0FxZzcvZDFIS2MzK2w1UW9Vdkh5dXkKWnNBbHlrYThFdTUzNGhsNDFv
cUVLcEFLWWNNbjFUTTB2cEpFR05WT2MrMDVCSW54STloOWYwTWcwMVBEMHRZdQpH
Y0xIWWdCemNyZkVtS3dNdE04V0VtY01KZDduMnVmZmFBdko4NDZXdWJiZVY3TVcx
WWVoQWdNQkFBRT0KLS0tLS1FTkQgUlNBIFBVQkxJQyBLRVktLS0tLQppbnRyb2R1
Y3Rpb24tcG9pbnQgZW00Z2prNmVpaXVhbGhtbHlpaWZyemM3bGJ0cnNiaXAKaXAt
YWRkcmVzcyA0Ni40LjE3NC41Mgpvbmlvbi1wb3J0IDQ0Mwpvbmlvbi1rZXkKLS0t
LS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JR0pBb0dCQUxCbWhkRjV3SHhI
cnBMU21qQVpvdHR4MjIwKzk5NUZkTU9PdFpOalJ3MURCU3ByVVpacXR4V2EKUDhU
S3BIS3p3R0pLQ1ZZSUlqN2xvaGJ2OVQ5dXJtbGZURTA1VVJHZW5ab2lmT0ZOejNZ
d01KVFhTY1FFQkoxMAo5aVdOTERUc2tMekRLQ0FiR2hibi9NS3dPZllHQmhOVGxq
ZHlUbU5ZNUVDUmJSempldjl2QWdNQkFBRT0KLS0tLS1FTkQgUlNBIFBVQkxJQyBL
RVktLS0tLQpzZXJ2aWNlLWtleQotLS0tLUJFR0lOIFJTQSBQVUJMSUMgS0VZLS0t
LS0KTUlHSkFvR0JBTXhNSG9BbXJiVU1zeGlJQ3AzaVRQWWdobjBZdWVLSHgyMTl3
dThPL1E1MVF5Y1ZWTHBYMjdkMQpoSlhrUEIzM1hRQlhzQlM3U3hzU3NTQ1EzR0V1
clFKN0d1QkxwWUlSL3Zxc2FrRS9sOHdjMkNKQzVXVWh5RkZrCisxVFdJVUk1dHhu
WEx5V0NSY0tEVXJqcWRvc0RhRG9zZ0hGZzIzTW54K3hYY2FRL2ZyQi9BZ01CQUFF
PQotLS0tLUVORCBSU0EgUFVCTElDIEtFWS0tLS0tCmludHJvZHVjdGlvbi1wb2lu
dCBqcWhmbDM2NHgzdXBlNmxxbnhpem9sZXdsZnJzdzJ6eQppcC1hZGRyZXNzIDYy
LjIxMC44Mi4xNjkKb25pb24tcG9ydCA0NDMKb25pb24ta2V5Ci0tLS0tQkVHSU4g
UlNBIFBVQkxJQyBLRVktLS0tLQpNSUdKQW9HQkFQVWtxeGdmWWR3MFBtL2c2TWJo
bVZzR0tsdWppZm1raGRmb0VldXpnbyt3bkVzR3Z3VWVienJ6CmZaSlJ0MGNhWEZo
bkNHZ1FEMklnbWFyVWFVdlAyNGZYby80bVl6TGNQZUk3Z1puZXVBUUpZdm05OFl2
OXZPSGwKTmFNL1d2RGtDc0ozR1ZOSjFIM3dMUFFSSTN2N0tiTnVjOXRDT1lsL3Iw
OU9oVmFXa3phakFnTUJBQUU9Ci0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K
c2VydmljZS1rZXkKLS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JR0pB
b0dCQUxieDhMZXFSb1Avcjl3OWhqd0Q0MVlVbTdQbzY5N3hSdHl0RjBNY3lMQ1M3
R1JpVVluamk3S1kKZmVwWGR2Ti9KbDVxUUtISUJiNjAya3VPVGwwcE44UStZZUZV
U0lJRGNtUEJMcEJEaEgzUHZyUU1jR1ZhaU9XSAo4dzBITVpDeGd3QWNDQzUxdzVW
d2l1bXhFSk5CVmNac094MG16TjFDbG95KzkwcTBsRlhMQWdNQkFBRT0KLS0tLS1F
TkQgUlNBIFBVQkxJQyBLRVktLS0tLQoK
-----END MESSAGE-----\
"""
EXPECTED_DDG_INTRODUCTION_POINTS_CONTENT = b"""\
introduction-point iwki77xtbvp6qvedfrwdzncxs3ckayeu
ip-address 178.62.222.129
onion-port 443
onion-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAK94BEYIHZ4KdEkeyPhbLCpRW5ESg+2WPQhrM4yuKYGuq8wFWgumZYR9
k/WA//FYXMBz0bB+ckuZs/Yu9nK+HLzpGapV0clst4GUMcBInUCzCcpjJTQsQDgm
3/Y3cqh0W55gOCFhomQ4/1WOYg7YCjk4XYHJE20OdG2Ll5zotK6fAgMBAAE=
-----END RSA PUBLIC KEY-----
service-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAJXmJb8lSydMMqCgCgfgvlB2E5rpd57kz/Aqg7/d1HKc3+l5QoUvHyuy
ZsAlyka8Eu534hl41oqEKpAKYcMn1TM0vpJEGNVOc+05BInxI9h9f0Mg01PD0tYu
GcLHYgBzcrfEmKwMtM8WEmcMJd7n2uffaAvJ846WubbeV7MW1YehAgMBAAE=
-----END RSA PUBLIC KEY-----
introduction-point em4gjk6eiiualhmlyiifrzc7lbtrsbip
ip-address 46.4.174.52
onion-port 443
onion-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBALBmhdF5wHxHrpLSmjAZottx220+995FdMOOtZNjRw1DBSprUZZqtxWa
P8TKpHKzwGJKCVYIIj7lohbv9T9urmlfTE05URGenZoifOFNz3YwMJTXScQEBJ10
9iWNLDTskLzDKCAbGhbn/MKwOfYGBhNTljdyTmNY5ECRbRzjev9vAgMBAAE=
-----END RSA PUBLIC KEY-----
service-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAMxMHoAmrbUMsxiICp3iTPYghn0YueKHx219wu8O/Q51QycVVLpX27d1
hJXkPB33XQBXsBS7SxsSsSCQ3GEurQJ7GuBLpYIR/vqsakE/l8wc2CJC5WUhyFFk
+1TWIUI5txnXLyWCRcKDUrjqdosDaDosgHFg23Mnx+xXcaQ/frB/AgMBAAE=
-----END RSA PUBLIC KEY-----
introduction-point jqhfl364x3upe6lqnxizolewlfrsw2zy
ip-address 62.210.82.169
onion-port 443
onion-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAPUkqxgfYdw0Pm/g6MbhmVsGKlujifmkhdfoEeuzgo+wnEsGvwUebzrz
fZJRt0caXFhnCGgQD2IgmarUaUvP24fXo/4mYzLcPeI7gZneuAQJYvm98Yv9vOHl
NaM/WvDkCsJ3GVNJ1H3wLPQRI3v7KbNuc9tCOYl/r09OhVaWkzajAgMBAAE=
-----END RSA PUBLIC KEY-----
service-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBALbx8LeqRoP/r9w9hjwD41YUm7Po697xRtytF0McyLCS7GRiUYnji7KY
fepXdvN/Jl5qQKHIBb602kuOTl0pN8Q+YeFUSIIDcmPBLpBDhH3PvrQMcGVaiOWH
8w0HMZCxgwAcCC51w5VwiumxEJNBVcZsOx0mzN1Cloy+90q0lFXLAgMBAAE=
-----END RSA PUBLIC KEY-----
"""
EXPECTED_DDG_SIGNATURE = """\
-----BEGIN SIGNATURE-----
VKMmsDIUUFOrpqvcQroIZjDZTKxqNs88a4M9Te8cR/ZvS7H2nffv6iQs0tom5X4D
4Dy4iZiy+pwYxdHfaOxmdpgMCRvgPb34MExWr5YemH0QuGtnlp5Wxr8GYaAQVuZX
cZjQLW0juUYCbgIGdxVEBnlEt2rgBSM9+1oR7EAfV1U=
-----END SIGNATURE-----\
"""
EXPECT_POINT_1_ONION_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAK94BEYIHZ4KdEkeyPhbLCpRW5ESg+2WPQhrM4yuKYGuq8wFWgumZYR9
k/WA//FYXMBz0bB+ckuZs/Yu9nK+HLzpGapV0clst4GUMcBInUCzCcpjJTQsQDgm
3/Y3cqh0W55gOCFhomQ4/1WOYg7YCjk4XYHJE20OdG2Ll5zotK6fAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
EXPECT_POINT_1_SERVICE_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAJXmJb8lSydMMqCgCgfgvlB2E5rpd57kz/Aqg7/d1HKc3+l5QoUvHyuy
ZsAlyka8Eu534hl41oqEKpAKYcMn1TM0vpJEGNVOc+05BInxI9h9f0Mg01PD0tYu
GcLHYgBzcrfEmKwMtM8WEmcMJd7n2uffaAvJ846WubbeV7MW1YehAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
EXPECT_POINT_2_ONION_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBALBmhdF5wHxHrpLSmjAZottx220+995FdMOOtZNjRw1DBSprUZZqtxWa
P8TKpHKzwGJKCVYIIj7lohbv9T9urmlfTE05URGenZoifOFNz3YwMJTXScQEBJ10
9iWNLDTskLzDKCAbGhbn/MKwOfYGBhNTljdyTmNY5ECRbRzjev9vAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
EXPECT_POINT_2_SERVICE_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAMxMHoAmrbUMsxiICp3iTPYghn0YueKHx219wu8O/Q51QycVVLpX27d1
hJXkPB33XQBXsBS7SxsSsSCQ3GEurQJ7GuBLpYIR/vqsakE/l8wc2CJC5WUhyFFk
+1TWIUI5txnXLyWCRcKDUrjqdosDaDosgHFg23Mnx+xXcaQ/frB/AgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
EXPECT_POINT_3_ONION_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAPUkqxgfYdw0Pm/g6MbhmVsGKlujifmkhdfoEeuzgo+wnEsGvwUebzrz
fZJRt0caXFhnCGgQD2IgmarUaUvP24fXo/4mYzLcPeI7gZneuAQJYvm98Yv9vOHl
NaM/WvDkCsJ3GVNJ1H3wLPQRI3v7KbNuc9tCOYl/r09OhVaWkzajAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
EXPECT_POINT_3_SERVICE_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBALbx8LeqRoP/r9w9hjwD41YUm7Po697xRtytF0McyLCS7GRiUYnji7KY
fepXdvN/Jl5qQKHIBb602kuOTl0pN8Q+YeFUSIIDcmPBLpBDhH3PvrQMcGVaiOWH
8w0HMZCxgwAcCC51w5VwiumxEJNBVcZsOx0mzN1Cloy+90q0lFXLAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
EXPECTED_BASIC_AUTH_INTRODUCTION_POINTS_ENCODED = """\
-----BEGIN MESSAGE-----
AQEAi3xIJz0Qv97ug9kr4U0UNN2kQhkddPHuj4op3cw+fgMLqzPlFBPAJgaEKc+g
8xBTRKUlvfkXxocfV75GyQGi2Vqu5iN1SbI5Uliu3n8IiUina5+WaOfUs9iuHJIK
cErgfT0bUfXKDLvW6/ncsgPdb6kb+jjT8NVhR4ZrRUf9ASfcY/f2WFNTmLgOR3Oa
f2tMLJcAck9VbCDjKfSC6e6HgtxRFe9dX513mDviZp15UAHkjJSKxKvqRRVkL+7W
KxJGfLY56ypZa4+afBYT/yqLzY4C47/g5TTTx9fvsdp0uQ0AmjF4LeXdZ58yNjrp
Da63SrgQQM7lZ3k4LGXzDS20FKW2/9rpWgD78QLJGeKdHngD3ERvTX4m43rtEFrD
oB/4l2nl6fh0507ASYHy7QQQMcdjpN0OWQQKpL9SskZ8aQw1dY4KU28Gooe9ff+B
RGm6BlVzMi+HGcqfMpGwFfYopmqJuOXjNlX7a1jRwrztpJKeu4J9iSTiuSOEiQSq
kUyHRLO4rWJXa2/RMWfH4XSgdUaWFjOF6kaSwmI/pRZIepi/sX8BSKm+vvOnOtlr
Tz2DVSiA2qM+P3Br9qNTDUmTu9mri6fRzzVnj+ybdTQXn60jwPw4vj4xmvVTkjfZ
ZB2gw2+sAmZJA5pnLNGu4N8veo1Jiz7FLE0m+7yjXbcBc/GHWGTJa0Sa1Hwfp82t
ohagQlRYKhLaRrM6ZvjnPMH5dqT/ypfBXcIQAh6td1+e1Hf/uXZPM/ZrgHeCJqF+
PvLDuu4TYxOod+elZE5LfwDFPzCcMA8XNuuDzGQOFOMh9o4xTbQchyRSfhDGev/H
HpY9qxRyua+PjDCmE/F3YiFy77ITJLhCyYEdzVw43hCVY52inEauvHRzqTl7Lc53
PhnSIW6rDWsrrSMWApCC5WRSOSKfh0u4vO13bVLTb/QmuvMEhGiXDVI3/0NEpqKF
ewqyiG9Dvv67A3/IjTe3aMRGfWREHFnEG9bonn03uoufgmQb4h9ci9+QU52sl16F
rxRpxLyMRp8dpUzZbK3qxtASp09Lc2pdgItWcMMTtPObcd7KVV/xkVqm3ezaUbRF
Nw5qDFxkG85ohTvFt3wnfxkpytMhWoBv9F0ZMEFRLY2j+cb8IqXN5dyz6rGqgSYY
dtItQvI7Lq3XnOSFy3uCGC9Vzr6PRPQIrVH/56rSRaEyM8TgVWyaQQ3xm26x9Fe2
jUg50lG/WVzsRueBImuai1KCRC4FB/cg/kVu/s+5f5H4Z/GSD+4UpDyg3i2RYuy9
WOA/AGEeOLY5FkOTARcWteUbi6URboaouX2lnAXK6vX6Ysn8HgE9JATVbVC/96c9
GnWaf9yCr6Q0BvrHkS7hsJJj+VwaNPW4POSqhL+p0p+2eSWZVMlFFxNr+BNKONk+
RAssIHF1xVRHzzl75wjzhzuq0A0crHcHb64P+glkPt4iI7SqejyCrMQh6BWia6RT
c+NwXTnbcibB56McF+xWoyHne6dg1F0urA61JfQboyWOy+Z+cNPjEIcwWhJr/+Gx
v7/yf3V1kNECa90L7BeUmFGKxL7SvgyapevWqkIQCZEcOnobXQRdWUmNqSoZmOxB
u5eDcvrdF9p5wG5IStpzO9OConG3SQb46S9OSU3O7PnjKFId6KRIM7VsprMIIBTz
HKy6ufKyMXgyxxnvE5TZQcLzA4Wv8vHWET3t3WSQEwSPx45IAbjsE587YNOkjK1X
HNT3ypfRdJacxtttR7Y5Y/XF4tJmXkCfb5RoEqIPrQTmiLYh0h02i6CqeFK9u7j/
yAdKY3NrCBuqPM4mWCdjvtgC9i1Q98LCDiVESRrvLlfvv3iWozDUZ3qIU4TnSgti
U5+xKrmlKcWHHgADS56IECgCQyr2nZEhcNK7vKvg+KgA667tRm7M35w9eHz+J7lg
x5v5GYPH4J1UjPEb5Cwl+Vlr0XIqbhMX9MZWimpOJ0l5TisOLuTJ9ennREsFPZjN
U4IZQht7gifFlemn7D4a+UXHu95bHxDBMPJky7iYc2U3r50+JWRF+LO1L2TNDQlV
iPO8AOoI0V0cGaYE+0ZUgpUDk8fxUH5CAPCn+dbsqDh165G6590cF9eF4/yrlf2V
nbhZipPQyOTrmiCkBPQ1zuXYyfFHrJL7yK4ykiBV8c/VLT8nxeKfPwW3USKOScnx
k68qqFZ6lNFxlDwPAJR3F2H+PN5JZ8H1lTE56ujgTBpArXMPYpKri4a0lG+8QnYK
D6jOJIli5QtVQxES4X64NDwducoGHnquMZs3ScvJQPSOuTvuqaad4FrTCZGbv6Ic
emUAHDsxjffMQ9IJYulluCTVWgS/AiBk31yiUB0GsAqZYcWz5kKgTpOXBQhulACM
waokEqbyH2Vtvc1peiPi+Vh6EhTSiDoEVZ2w9GrOnjgpyK6zxzH0aIhJJxlQu8it
w+xj/3+79Bf8myVesgzCWvXbkmvc6jJaoHGopV8lTM2JUn4xYCSz71Bt4wQBKZX4
hFXDlDZaY1k/QRP/zTfQ8pjbcohDgUVW8eftJz3ND5Iy8D3nRF9/BQB3PWox4vyQ
Fj94Eoe8NmEArIKWjUoSkn+EDgNcdHGBIaQ5is0N8r9n4E2cgMj57i4Fm37k8c6+
hlilrggVJ8qTBGs57M0ldqRLwt1bM6SkU//oMGel7Ft3EDd98W/6RXRkmAbsLhRx
7VMb4WCUBrIZLxo1/StwHa13RyTHAt0GKPu549l3oTZezsSad8vlurbnIbxtK9Cl
hp6mYPd3Djoe5OaLe8Gnu23ko+S2+kfHIjOwkza9R5w6AzLjkjYS3C8oRwuxKOft
lj/7xMZWDrfyw5H86L0QiaZnkmD+nig1+S+Rn39mmuEgl2iwZO/ihlncUJQTEULb
7IHpmofr+5ya5xWeo/BFQhulTNr2fJN0bPkVGfp+
-----END MESSAGE-----\
"""
expect_invalid_attr = functools.partial(base_expect_invalid_attr, HiddenServiceDescriptor, 'descriptor_id', 'y3olqqblqw2gbh6phimfuiroechjjafa')
expect_invalid_attr_for_text = functools.partial(base_expect_invalid_attr_for_text, HiddenServiceDescriptor, 'descriptor_id', 'y3olqqblqw2gbh6phimfuiroechjjafa')
class TestHiddenServiceDescriptor(unittest.TestCase):
def test_for_duckduckgo_with_validation(self):
"""
Parse duckduckgo's descriptor.
"""
descriptor_file = open(get_resource('hidden_service_duckduckgo'), 'rb')
desc = next(stem.descriptor.parse_file(descriptor_file, 'hidden-service-descriptor 1.0', validate = True))
self._assert_matches_duckduckgo(desc)
def test_for_duckduckgo_without_validation(self):
"""
Parse duckduckgo's descriptor
"""
descriptor_file = open(get_resource('hidden_service_duckduckgo'), 'rb')
desc = next(stem.descriptor.parse_file(descriptor_file, 'hidden-service-descriptor 1.0', validate = False))
self._assert_matches_duckduckgo(desc)
def test_for_facebook(self):
"""
Parse facebook's descriptor.
"""
descriptor_file = open(get_resource('hidden_service_facebook'), 'rb')
desc = next(stem.descriptor.parse_file(descriptor_file, 'hidden-service-descriptor 1.0', validate = True))
self.assertEqual('utjk4arxqg6s6zzo7n6cjnq6ot34udhr', desc.descriptor_id)
self.assertEqual(2, desc.version)
self.assertEqual('6355jaerje3bqozopwq2qmpf4iviizdn', desc.secret_id_part)
self.assertEqual(datetime.datetime(2014, 10, 31, 23, 0, 0), desc.published)
self.assertEqual([2, 3], desc.protocol_versions)
@test.require.cryptography
def test_descriptor_signing(self):
self.assertRaisesRegexp(NotImplementedError, 'Signing of HiddenServiceDescriptor not implemented', HiddenServiceDescriptor.create, sign = True)
@test.require.cryptography
def test_with_basic_auth(self):
"""
Parse a descriptor with introduction-points encrypted with basic auth.
"""
descriptor_file = open(get_resource('hidden_service_basic_auth'), 'rb')
desc = next(stem.descriptor.parse_file(descriptor_file, 'hidden-service-descriptor 1.0', validate = True))
self.assertEqual('yfmvdrkdbyquyqk5vygyeylgj2qmrvrd', desc.descriptor_id)
self.assertEqual(2, desc.version)
self.assertEqual('fluw7z3s5cghuuirq3imh5jjj5ljips6', desc.secret_id_part)
self.assertEqual(datetime.datetime(2015, 2, 24, 20, 0, 0), desc.published)
self.assertEqual([2, 3], desc.protocol_versions)
self.assertEqual(EXPECTED_BASIC_AUTH_INTRODUCTION_POINTS_ENCODED, desc.introduction_points_encoded)
self.assertEqual([], desc.introduction_points_auth)
self.assertRaises(DecryptionFailure, desc.introduction_points)
self.assertRaises(DecryptionFailure, desc.introduction_points, 'aCmx3qIvArbil8A0KM4KgQ==')
introduction_points = desc.introduction_points('dCmx3qIvArbil8A0KM4KgQ==')
self.assertEqual(3, len(introduction_points))
point = introduction_points[0]
self.assertEqual('hmtvoobwglmmec26alnvl7x7mgmmr7xv', point.identifier)
self.assertEqual('195.154.82.88', point.address)
self.assertEqual(443, point.port)
self.assertTrue('MIGJAoGBANbPRD07T' in point.onion_key)
self.assertTrue('MIGJAoGBAN+LAdZP/' in point.service_key)
self.assertEqual([], point.intro_authentication)
point = introduction_points[1]
self.assertEqual('q5w6l2f4g5zw4rkr56fkyovbkkrnzcj5', point.identifier)
self.assertEqual('37.252.190.133', point.address)
self.assertEqual(9001, point.port)
self.assertTrue('MIGJAoGBAKmsbKrtt' in point.onion_key)
self.assertTrue('MIGJAoGBANwczLtzR' in point.service_key)
self.assertEqual([], point.intro_authentication)
point = introduction_points[2]
self.assertEqual('qcvprvmvnjb4dfyqjtxskugniliwlrx3', point.identifier)
self.assertEqual('193.11.114.45', point.address)
self.assertEqual(9002, point.port)
self.assertTrue('MIGJAoGBAM1ILL+7P' in point.onion_key)
self.assertTrue('MIGJAoGBAM7B/cymp' in point.service_key)
self.assertEqual([], point.intro_authentication)
@test.require.cryptography
def test_with_stealth_auth(self):
"""
Parse a descriptor with introduction-points encrypted with stealth auth.
"""
descriptor_file = open(get_resource('hidden_service_stealth_auth'), 'rb')
desc = next(stem.descriptor.parse_file(descriptor_file, 'hidden-service-descriptor 1.0', validate = True))
self.assertEqual('ubf3xeibzlfil6s4larq6y5peup2z3oj', desc.descriptor_id)
self.assertEqual(2, desc.version)
self.assertEqual('jczvydhzetbpdiylj3d5nsnjvaigs7xm', desc.secret_id_part)
self.assertEqual(datetime.datetime(2015, 2, 24, 20, 0, 0), desc.published)
self.assertEqual([2, 3], desc.protocol_versions)
self.assertEqual([], desc.introduction_points_auth)
self.assertRaises(DecryptionFailure, desc.introduction_points)
self.assertRaises(DecryptionFailure, desc.introduction_points, 'aCmx3qIvArbil8A0KM4KgQ==')
introduction_points = desc.introduction_points('dCmx3qIvArbil8A0KM4KgQ==')
self.assertEqual(3, len(introduction_points))
point = introduction_points[0]
self.assertEqual('6h4bkedts3yz2exl3vu4lsyiwkjrx5ff', point.identifier)
self.assertEqual('95.85.60.23', point.address)
self.assertEqual(443, point.port)
self.assertTrue('MIGJAoGBAMX5hO5hQ' in point.onion_key)
self.assertTrue('MIGJAoGBAMNSjfydv' in point.service_key)
self.assertEqual([], point.intro_authentication)
point = introduction_points[1]
self.assertEqual('4ghasjftsdfbbycafvlfx7czln3hrk53', point.identifier)
self.assertEqual('178.254.55.101', point.address)
self.assertEqual(9901, point.port)
self.assertTrue('MIGJAoGBAL2v/KNEY' in point.onion_key)
self.assertTrue('MIGJAoGBAOXiuIgBr' in point.service_key)
self.assertEqual([], point.intro_authentication)
point = introduction_points[2]
self.assertEqual('76tsxvudxqx47gedk3tl5qpesdzrh6yh', point.identifier)
self.assertEqual('193.11.164.243', point.address)
self.assertEqual(9001, point.port)
self.assertTrue('MIGJAoGBALca3zEoS' in point.onion_key)
self.assertTrue('MIGJAoGBAL3rWIAQ6' in point.service_key)
self.assertEqual([], point.intro_authentication)
def _assert_matches_duckduckgo(self, desc):
self.assertEqual('y3olqqblqw2gbh6phimfuiroechjjafa', desc.descriptor_id)
self.assertEqual(2, desc.version)
self.assertEqual(EXPECTED_DDG_PERMANENT_KEY, desc.permanent_key)
self.assertEqual('e24kgecavwsznj7gpbktqsiwgvngsf4e', desc.secret_id_part)
self.assertEqual(datetime.datetime(2015, 2, 23, 20, 0, 0), desc.published)
self.assertEqual([2, 3], desc.protocol_versions)
self.assertEqual(EXPECTED_DDG_INTRODUCTION_POINTS_ENCODED, desc.introduction_points_encoded)
self.assertEqual([], desc.introduction_points_auth)
self.assertEqual(EXPECTED_DDG_INTRODUCTION_POINTS_CONTENT, desc.introduction_points_content)
self.assertEqual(EXPECTED_DDG_SIGNATURE, desc.signature)
introduction_points = desc.introduction_points()
self.assertEqual(3, len(introduction_points))
point = introduction_points[0]
self.assertEqual('iwki77xtbvp6qvedfrwdzncxs3ckayeu', point.identifier)
self.assertEqual('178.62.222.129', point.address)
self.assertEqual(443, point.port)
self.assertEqual(EXPECT_POINT_1_ONION_KEY, point.onion_key)
self.assertEqual(EXPECT_POINT_1_SERVICE_KEY, point.service_key)
self.assertEqual([], point.intro_authentication)
point = introduction_points[1]
self.assertEqual('em4gjk6eiiualhmlyiifrzc7lbtrsbip', point.identifier)
self.assertEqual('46.4.174.52', point.address)
self.assertEqual(443, point.port)
self.assertEqual(EXPECT_POINT_2_ONION_KEY, point.onion_key)
self.assertEqual(EXPECT_POINT_2_SERVICE_KEY, point.service_key)
self.assertEqual([], point.intro_authentication)
point = introduction_points[2]
self.assertEqual('jqhfl364x3upe6lqnxizolewlfrsw2zy', point.identifier)
self.assertEqual('62.210.82.169', point.address)
self.assertEqual(443, point.port)
self.assertEqual(EXPECT_POINT_3_ONION_KEY, point.onion_key)
self.assertEqual(EXPECT_POINT_3_SERVICE_KEY, point.service_key)
self.assertEqual([], point.intro_authentication)
def test_minimal_hidden_service_descriptor(self):
"""
Basic sanity check that we can parse a hidden service descriptor with minimal attributes.
"""
desc = HiddenServiceDescriptor.create()
self.assertEqual('y3olqqblqw2gbh6phimfuiroechjjafa', desc.descriptor_id)
self.assertEqual(2, desc.version)
self.assertEqual('e24kgecavwsznj7gpbktqsiwgvngsf4e', desc.secret_id_part)
self.assertEqual([2, 3], desc.protocol_versions)
self.assertEqual('-----BEGIN MESSAGE-----\n-----END MESSAGE-----', desc.introduction_points_encoded)
self.assertEqual([], desc.introduction_points_auth)
self.assertEqual(b'', desc.introduction_points_content)
self.assertEqual([], desc.introduction_points())
def test_unrecognized_line(self):
"""
Includes unrecognized content in the descriptor.
"""
desc = HiddenServiceDescriptor.create({'pepperjack': 'is oh so tasty!'})
self.assertEqual(['pepperjack is oh so tasty!'], desc.get_unrecognized_lines())
def test_proceeding_line(self):
"""
Includes a line prior to the 'rendezvous-service-descriptor' entry.
"""
expect_invalid_attr_for_text(self, b'hibernate 1\n' + HiddenServiceDescriptor.content())
def test_trailing_line(self):
"""
Includes a line after the 'router-signature' entry.
"""
expect_invalid_attr_for_text(self, HiddenServiceDescriptor.content() + b'\nhibernate 1')
def test_required_fields(self):
"""
Check that we require the mandatory fields.
"""
line_to_attr = {
'rendezvous-service-descriptor': 'descriptor_id',
'version': 'version',
'permanent-key': 'permanent_key',
'secret-id-part': 'secret_id_part',
'publication-time': 'published',
'introduction-points': 'introduction_points_encoded',
'protocol-versions': 'protocol_versions',
'signature': 'signature',
}
for line in REQUIRED_FIELDS:
desc_text = HiddenServiceDescriptor.content(exclude = (line,))
expected = [] if line == 'protocol-versions' else None
expect_invalid_attr_for_text(self, desc_text, line_to_attr[line], expected)
def test_invalid_version(self):
"""
Checks that our version field expects a numeric value.
"""
test_values = (
'',
'-10',
'hello',
)
for test_value in test_values:
expect_invalid_attr(self, {'version': test_value}, 'version')
def test_invalid_protocol_versions(self):
"""
Checks that our protocol-versions field expects comma separated numeric
values.
"""
test_values = (
'',
'-10',
'hello',
'10,',
',10',
'10,-10',
'10,hello',
)
for test_value in test_values:
expect_invalid_attr(self, {'protocol-versions': test_value}, 'protocol_versions', [])
def test_introduction_points_when_empty(self):
"""
It's valid to advertise zero introduciton points. I'm not clear if this
would mean an empty protocol-versions field or that it's omitted but either
are valid according to the spec.
"""
missing_field_desc = HiddenServiceDescriptor.create(exclude = ('introduction-points',))
self.assertEqual(None, missing_field_desc.introduction_points_encoded)
self.assertEqual([], missing_field_desc.introduction_points_auth)
self.assertEqual(None, missing_field_desc.introduction_points_content)
self.assertEqual([], missing_field_desc.introduction_points())
empty_field_desc = HiddenServiceDescriptor.create({'introduction-points': MESSAGE_BLOCK % ''})
self.assertEqual((MESSAGE_BLOCK % '').strip(), empty_field_desc.introduction_points_encoded)
self.assertEqual([], empty_field_desc.introduction_points_auth)
self.assertEqual(b'', empty_field_desc.introduction_points_content)
self.assertEqual([], empty_field_desc.introduction_points())
def test_introduction_points_when_not_base64(self):
"""
Checks the introduction-points field when the content isn't base64 encoded.
"""
test_values = (
MESSAGE_BLOCK % '12345',
MESSAGE_BLOCK % 'hello',
)
for test_value in test_values:
desc = expect_invalid_attr(self, {'introduction-points': test_value}, 'introduction_points_encoded', test_value.strip())
self.assertEqual([], desc.introduction_points_auth)
self.assertEqual(None, desc.introduction_points_content)
self.assertEqual([], desc.introduction_points())
|
ewongbb/stem
|
test/unit/descriptor/hidden_service_descriptor.py
|
Python
|
lgpl-3.0
| 23,069
|
#!/usr/bin/env python
"""
Test runner script.
Please use `tox` to run it in multiple environments.
"""
import django
from django.conf import settings
from django.core.management import call_command
gettext = lambda s: s
conf = dict(
LANGUAGES = (
('ru', gettext('Russian')),
('en', gettext('English')),
),
USE_TZ = False,
INSTALLED_APPS = [
'modeltranslation',
'autoslug'
],
DATABASES = dict(
default = dict(
ENGINE='django.db.backends.sqlite3',
NAME=':memory:',
),
),
AUTOSLUG_SLUGIFY_FUNCTION = 'django.template.defaultfilters.slugify',
)
settings.configure(**conf)
django.setup()
if __name__ == "__main__":
call_command('test', 'autoslug')
|
jpaulodit/django-autoslug
|
run_tests.py
|
Python
|
lgpl-3.0
| 758
|
"""
Cross section plots
===================
This example demonstrates contour plots of a cross-sectioned multi-dimensional
cube which features a hybrid height vertical coordinate system.
"""
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load some test data.
fname = iris.sample_data_path("hybrid_height.nc")
theta = iris.load_cube(fname, "air_potential_temperature")
# Extract a single height vs longitude cross-section. N.B. This could
# easily be changed to extract a specific slice, or even to loop over *all*
# cross section slices.
cross_section = next(
theta.slices(["grid_longitude", "model_level_number"])
)
qplt.contourf(
cross_section, coords=["grid_longitude", "altitude"], cmap="RdBu_r"
)
iplt.show()
# Now do the equivalent plot, only against model level
plt.figure()
qplt.contourf(
cross_section,
coords=["grid_longitude", "model_level_number"],
cmap="RdBu_r",
)
iplt.show()
if __name__ == "__main__":
main()
|
pp-mo/iris
|
docs/iris/example_code/General/cross_section.py
|
Python
|
lgpl-3.0
| 1,110
|
# funcion (recursiva) que imprime la expresion dado una lista de operadores
# y una lista de numeros
def pretty_print(L,sol):
if len(L)==1:
print(L[0],end='')
else:
print('(',end='')
pretty_print(L[:-1],sol[:-1])
print(sol[-1],end='')
print(L[-1],end='')
print(')',end='')
# Dada una lista L y un numero N, determina si
# hay una forma de intercalar operadores entre los numeros
# de tal forma que al computar la expresion resultante,
# asociando por la izquierda, el resultado es N
# El resultado impreso en pantalla son los operadores que van entre los números
def oplista(L,N,Lorig,sol=[],ultimo_op='+',res_parcial=0):
resultado=operacion(res_parcial,ultimo_op,L[0])
if len(L)==1:
if resultado==N:
pretty_print(Lorig,sol)
print(' =',N)
return True
else:
return False
operadores=['+','-','*','/']
for op in operadores:
if oplista(L[1:],N,Lorig,sol+[op],op,resultado):
return True
return False
def op(L,N):
if not oplista(L,N,L):
print("no es posible insertar operadores en",L,"para obtener",N)
def operacion(l,op,r):
if op=='+':
return l+r
if op=='-':
return l-r
if op=='*':
return l*r
if op=='/':
return l/r
else:
return 0
op([1,4,10,1],-30)
op([1,4,10,1],-29)
op([1,4,10,1],49)
op([1,4,10,1],5)
|
jabaier/iic1103.20152.s4
|
ejercicios_resueltos_recursion/operadoreslistas.py
|
Python
|
unlicense
| 1,461
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Alignment'
db.create_table('badges_alignment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('badges', ['Alignment'])
# Adding model 'Issuer'
db.create_table('badges_issuer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('email', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('badges', ['Issuer'])
# Adding model 'Revocation'
db.create_table('badges_revocation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('award', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revocations', to=orm['badges.Award'])),
('reason', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('badges', ['Revocation'])
# Adding model 'Tag'
db.create_table('badges_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('badges', ['Tag'])
# Adding model 'Identity'
db.create_table('badges_identity', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='identity', unique=True, to=orm['auth.User'])),
('type', self.gf('django.db.models.fields.CharField')(default='email', max_length=255)),
('hash', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('hashed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('salt', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('badges', ['Identity'])
# Adding field 'Award.uuid'
db.add_column('badges_award', 'uuid',
self.gf('django.db.models.fields.CharField')(default='97d87102-9bc4-11e2-8863-b499ba561baa', max_length=255, db_index=True),
keep_default=False)
# Adding field 'Award.evidence'
db.add_column('badges_award', 'evidence',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Award.image'
db.add_column('badges_award', 'image',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Award.expires'
db.add_column('badges_award', 'expires',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'Badge.criteria'
db.add_column('badges_badge', 'criteria',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Adding M2M table for field alignments on 'Badge'
db.create_table('badges_badge_alignments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('badge', models.ForeignKey(orm['badges.badge'], null=False)),
('alignment', models.ForeignKey(orm['badges.alignment'], null=False))
))
db.create_unique('badges_badge_alignments', ['badge_id', 'alignment_id'])
# Adding M2M table for field tags on 'Badge'
db.create_table('badges_badge_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('badge', models.ForeignKey(orm['badges.badge'], null=False)),
('tag', models.ForeignKey(orm['badges.tag'], null=False))
))
db.create_unique('badges_badge_tags', ['badge_id', 'tag_id'])
# Changing field 'Badge.description'
db.alter_column('badges_badge', 'description', self.gf('django.db.models.fields.TextField')(default=''))
def backwards(self, orm):
# Deleting model 'Alignment'
db.delete_table('badges_alignment')
# Deleting model 'Issuer'
db.delete_table('badges_issuer')
# Deleting model 'Revocation'
db.delete_table('badges_revocation')
# Deleting model 'Tag'
db.delete_table('badges_tag')
# Deleting model 'Identity'
db.delete_table('badges_identity')
# Deleting field 'Award.uuid'
db.delete_column('badges_award', 'uuid')
# Deleting field 'Award.evidence'
db.delete_column('badges_award', 'evidence')
# Deleting field 'Award.image'
db.delete_column('badges_award', 'image')
# Deleting field 'Award.expires'
db.delete_column('badges_award', 'expires')
# Deleting field 'Badge.criteria'
db.delete_column('badges_badge', 'criteria')
# Removing M2M table for field alignments on 'Badge'
db.delete_table('badges_badge_alignments')
# Removing M2M table for field tags on 'Badge'
db.delete_table('badges_badge_tags')
# Changing field 'Badge.description'
db.alter_column('badges_badge', 'description', self.gf('django.db.models.fields.TextField')(null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'badges.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.award': {
'Meta': {'ordering': "['-modified', '-awarded']", 'unique_together': "(('user', 'badge'),)", 'object_name': 'Award'},
'awarded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'awards_set'", 'to': "orm['badges.Badge']"}),
'evidence': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_awards'", 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'9d225c2c-9bc4-11e2-8863-b499ba561baa'", 'max_length': '255', 'db_index': 'True'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'alignments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'alignments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Alignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'tags'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'badges.identity': {
'Meta': {'object_name': 'Identity'},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'hashed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'identity'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'badges.issuer': {
'Meta': {'object_name': 'Issuer'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.revocation': {
'Meta': {'object_name': 'Revocation'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revocations'", 'to': "orm['badges.Award']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['badges']
|
OpenMOOC/moocng
|
moocng/badges/migrations/0002_auto__add_alignment__add_issuer__add_revocation__add_tag__add_identity.py
|
Python
|
apache-2.0
| 14,656
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A command line tool lists out the Splunk logging categories and their
current logging level."""
from __future__ import absolute_import
from __future__ import print_function
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import splunklib.client as client
try:
from utils import parse
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
def main(argv):
usage = "usage: %prog [options]"
opts = parse(argv, {}, ".splunkrc", usage=usage)
service = client.connect(**opts.kwargs)
for logger in service.loggers:
print("%s (%s)" % (logger.name, logger['level']))
if __name__ == "__main__":
main(sys.argv[1:])
|
splunk/splunk-sdk-python
|
examples/loggers.py
|
Python
|
apache-2.0
| 1,407
|
import os
import os.path
import shutil
def get_buildifier():
return os.getenv("BUILDIFIER_BIN") or (
os.path.expandvars("$GOPATH/bin/buildifier")
if os.getenv("GOPATH") else shutil.which("buildifier"))
def get_buildozer():
return os.getenv("BUILDOZER_BIN") or (
os.path.expandvars("$GOPATH/bin/buildozer")
if os.getenv("GOPATH") else shutil.which("buildozer"))
|
envoyproxy/envoy
|
tools/code_format/paths.py
|
Python
|
apache-2.0
| 405
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from __future__ import absolute_import
from pychron.managers.motion_controller_managers.motion_controller_manager import (
MotionControllerManager,
)
# ============= local library imports ==========================
class NewportMotionControllerManager(MotionControllerManager):
"""
G{classtree}
"""
pass
# ============= views ===================================
# ============= EOF ====================================
|
USGSDenverPychron/pychron
|
pychron/managers/motion_controller_managers/newport_motion_controller_manager.py
|
Python
|
apache-2.0
| 1,316
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Use the GitHub API to download built artifacts."""
import datetime
import json
import os
import os.path
import sys
import time
import zipfile
import requests
def download_url(url, filename):
"""Download a file from `url` to `filename`."""
response = requests.get(url, stream=True)
if response.status_code == 200:
with open(filename, "wb") as f:
for chunk in response.iter_content(16*1024):
f.write(chunk)
else:
raise Exception(f"Fetching {url} produced: status={response.status_code}")
def unpack_zipfile(filename):
"""Unpack a zipfile, using the names in the zip."""
with open(filename, "rb") as fzip:
z = zipfile.ZipFile(fzip)
for name in z.namelist():
print(f" extracting {name}")
z.extract(name)
def utc2local(timestring):
"""Convert a UTC time into local time in a more readable form.
For example: '20201208T122900Z' to '2020-12-08 07:29:00'.
"""
dt = datetime.datetime
utc = dt.fromisoformat(timestring.rstrip("Z"))
epoch = time.mktime(utc.timetuple())
offset = dt.fromtimestamp(epoch) - dt.utcfromtimestamp(epoch)
local = utc + offset
return local.strftime("%Y-%m-%d %H:%M:%S")
dest = "dist"
repo_owner = sys.argv[1]
temp_zip = "artifacts.zip"
os.makedirs(dest, exist_ok=True)
os.chdir(dest)
r = requests.get(f"https://api.github.com/repos/{repo_owner}/actions/artifacts")
if r.status_code == 200:
dists = [a for a in r.json()["artifacts"] if a["name"] == "dist"]
if not dists:
print("No recent dists!")
else:
latest = max(dists, key=lambda a: a["created_at"])
print(f"Artifacts created at {utc2local(latest['created_at'])}")
download_url(latest["archive_download_url"], temp_zip)
unpack_zipfile(temp_zip)
os.remove(temp_zip)
else:
print(f"Fetching artifacts returned status {r.status_code}:")
print(json.dumps(r.json(), indent=4))
sys.exit(1)
|
nedbat/coveragepy
|
ci/download_gha_artifacts.py
|
Python
|
apache-2.0
| 2,137
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import multiprocessing
import android.adb.commands
from swift_build_support.swift_build_support import arguments
from swift_build_support.swift_build_support import host
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = "Debug"
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = "Ninja"
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError("error: --ios-all is unavailable in open-source "
"Swift.\nUse --ios to skip iOS device tests.")
if args.tvos_all:
raise ValueError("error: --tvos-all is unavailable in open-source "
"Swift.\nUse --tvos to skip tvOS device tests.")
if args.watchos_all:
raise ValueError("error: --watchos-all is unavailable in open-source "
"Swift.\nUse --watchos to skip watchOS device tests.")
# Propagate global --skip-build
if args.skip_build:
args.build_linux = False
args.build_freebsd = False
args.build_cygwin = False
args.build_osx = False
args.build_ios = False
args.build_tvos = False
args.build_watchos = False
args.build_android = False
args.build_benchmarks = False
args.build_external_benchmarks = False
args.build_lldb = False
args.build_llbuild = False
args.build_swiftpm = False
args.build_xctest = False
args.build_foundation = False
args.build_libdispatch = False
args.build_libicu = False
args.build_playgroundlogger = False
args.build_playgroundsupport = False
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
parser.add_argument(
"-n", "--dry-run",
help="print the commands that would be executed, but do not execute "
"them",
action="store_true",
default=False)
parser.add_argument(
"--no-legacy-impl", dest="legacy_impl",
help="avoid legacy implementation",
action="store_false",
default=True)
targets_group = parser.add_argument_group(
title="Host and cross-compilation targets")
targets_group.add_argument(
"--host-target",
help="The host target. LLVM, Clang, and Swift will be built for this "
"target. The built LLVM and Clang will be used to compile Swift "
"for the cross-compilation targets.",
default=StdlibDeploymentTarget.host_target().name)
targets_group.add_argument(
"--cross-compile-hosts",
help="A space separated list of targets to cross-compile host Swift "
"tools for. Can be used multiple times.",
action=arguments.action.concat, type=arguments.type.shell_split,
default=[])
targets_group.add_argument(
"--stdlib-deployment-targets",
help="list of targets to compile or cross-compile the Swift standard "
"library for. %(default)s by default.",
action=arguments.action.concat, type=arguments.type.shell_split,
default=None)
targets_group.add_argument(
"--build-stdlib-deployment-targets",
help="A space-separated list that filters which of the configured "
"targets to build the Swift standard library for, or 'all'.",
type=arguments.type.shell_split, default=["all"])
projects_group = parser.add_argument_group(
title="Options to select projects")
projects_group.add_argument(
"-l", "--lldb",
help="build LLDB",
action="store_true",
dest="build_lldb")
projects_group.add_argument(
"-b", "--llbuild",
help="build llbuild",
action="store_true",
dest="build_llbuild")
projects_group.add_argument(
"-p", "--swiftpm",
help="build swiftpm",
action="store_true",
dest="build_swiftpm")
projects_group.add_argument(
"--xctest",
help="build xctest",
action=arguments.action.enable,
dest="build_xctest")
projects_group.add_argument(
"--foundation",
help="build foundation",
action=arguments.action.enable,
dest="build_foundation")
projects_group.add_argument(
"--libdispatch",
help="build libdispatch",
action=arguments.action.enable,
dest="build_libdispatch")
projects_group.add_argument(
"--libicu",
help="build libicu",
action=arguments.action.enable,
dest="build_libicu")
projects_group.add_argument(
"--playgroundlogger",
help="build playgroundlogger",
action="store_true",
dest="build_playgroundlogger")
projects_group.add_argument(
"--playgroundsupport",
help="build PlaygroundSupport",
action="store_true",
dest="build_playgroundsupport")
projects_group.add_argument(
"--build-ninja",
help="build the Ninja tool",
action=arguments.action.enable)
extra_actions_group = parser.add_argument_group(
title="Extra actions to perform before or in addition to building")
extra_actions_group.add_argument(
"-c", "--clean",
help="do a clean build",
action="store_true")
extra_actions_group.add_argument(
"--export-compile-commands",
help="generate compilation databases in addition to building",
action=arguments.action.enable)
extra_actions_group.add_argument(
"--symbols-package",
metavar="PATH",
help="if provided, an archive of the symbols directory will be "
"generated at this path")
build_variant_group = parser.add_mutually_exclusive_group(required=False)
build_variant_group.add_argument(
"-d", "--debug",
help="build the Debug variant of everything (LLVM, Clang, Swift host "
"tools, target Swift standard libraries, LLDB (if enabled) "
"(default)",
action="store_const",
const="Debug",
dest="build_variant")
build_variant_group.add_argument(
"-r", "--release-debuginfo",
help="build the RelWithDebInfo variant of everything (default is "
"Debug)",
action="store_const",
const="RelWithDebInfo",
dest="build_variant")
build_variant_group.add_argument(
"-R", "--release",
help="build the Release variant of everything (default is Debug)",
action="store_const",
const="Release",
dest="build_variant")
build_variant_override_group = parser.add_argument_group(
title="Override build variant for a specific project")
build_variant_override_group.add_argument(
"--debug-llvm",
help="build the Debug variant of LLVM",
action="store_const",
const="Debug",
dest="llvm_build_variant")
build_variant_override_group.add_argument(
"--debug-swift",
help="build the Debug variant of Swift host tools",
action="store_const",
const="Debug",
dest="swift_build_variant")
build_variant_override_group.add_argument(
"--debug-swift-stdlib",
help="build the Debug variant of the Swift standard library and SDK "
"overlay",
action="store_const",
const="Debug",
dest="swift_stdlib_build_variant")
build_variant_override_group.add_argument(
"--debug-lldb",
help="build the Debug variant of LLDB",
action="store_const",
const="Debug",
dest="lldb_build_variant")
build_variant_override_group.add_argument(
"--debug-cmark",
help="build the Debug variant of CommonMark",
action="store_const",
const="Debug",
dest="cmark_build_variant")
build_variant_override_group.add_argument(
"--debug-foundation",
help="build the Debug variant of Foundation",
action="store_const",
const="Debug",
dest="foundation_build_variant")
build_variant_override_group.add_argument(
"--debug-libdispatch",
help="build the Debug variant of libdispatch",
action="store_const",
const="Debug",
dest="libdispatch_build_variant")
build_variant_override_group.add_argument(
"--debug-libicu",
help="build the Debug variant of libicu",
action="store_const",
const="Debug",
dest="libicu_build_variant")
assertions_group = parser.add_mutually_exclusive_group(required=False)
assertions_group.add_argument(
"--assertions",
help="enable assertions in all projects",
action="store_const",
const=True,
dest="assertions")
assertions_group.add_argument(
"--no-assertions",
help="disable assertions in all projects",
action="store_const",
const=False,
dest="assertions")
assertions_override_group = parser.add_argument_group(
title="Control assertions in a specific project")
assertions_override_group.add_argument(
"--cmark-assertions",
help="enable assertions in CommonMark",
action="store_const",
const=True,
dest="cmark_assertions")
assertions_override_group.add_argument(
"--llvm-assertions",
help="enable assertions in LLVM",
action="store_const",
const=True,
dest="llvm_assertions")
assertions_override_group.add_argument(
"--no-llvm-assertions",
help="disable assertions in LLVM",
action="store_const",
const=False,
dest="llvm_assertions")
assertions_override_group.add_argument(
"--swift-assertions",
help="enable assertions in Swift",
action="store_const",
const=True,
dest="swift_assertions")
assertions_override_group.add_argument(
"--no-swift-assertions",
help="disable assertions in Swift",
action="store_const",
const=False,
dest="swift_assertions")
assertions_override_group.add_argument(
"--swift-stdlib-assertions",
help="enable assertions in the Swift standard library",
action="store_const",
const=True,
dest="swift_stdlib_assertions")
assertions_override_group.add_argument(
"--no-swift-stdlib-assertions",
help="disable assertions in the Swift standard library",
action="store_const",
const=False,
dest="swift_stdlib_assertions")
assertions_override_group.add_argument(
"--lldb-assertions",
help="enable assertions in LLDB",
action="store_const",
const=True,
dest="lldb_assertions")
assertions_override_group.add_argument(
"--no-lldb-assertions",
help="disable assertions in LLDB",
action="store_const",
const=False,
dest="lldb_assertions")
# FIXME: This should be one option using choices=[...]
cmake_generator_group = parser.add_argument_group(
title="Select the CMake generator")
cmake_generator_group.add_argument(
"-x", "--xcode",
help="use CMake's Xcode generator (default is Ninja)",
action="store_const",
const="Xcode",
dest="cmake_generator")
cmake_generator_group.add_argument(
"-m", "--make",
help="use CMake's Makefile generator (default is Ninja)",
action="store_const",
const="Unix Makefiles",
dest="cmake_generator")
cmake_generator_group.add_argument(
"-e", "--eclipse",
help="use CMake's Eclipse generator (default is Ninja)",
action="store_const",
const="Eclipse CDT4 - Ninja",
dest="cmake_generator")
run_tests_group = parser.add_argument_group(
title="Run tests")
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
run_tests_group.add_argument(
"-t",
help="test Swift after building",
action="store_const",
const=True,
dest="test")
run_tests_group.add_argument(
"--test",
help="test Swift after building",
action=arguments.action.enable)
run_tests_group.add_argument(
"-T",
help="run the validation test suite (implies --test)",
action="store_const",
const=True,
dest="validation_test")
run_tests_group.add_argument(
"--validation-test",
help="run the validation test suite (implies --test)",
action=arguments.action.enable)
run_tests_group.add_argument(
"--test-paths",
help="run tests located in specific directories and/or files \
(implies --test and/or --validation-test)",
action=arguments.action.concat, type=arguments.type.shell_split,
default=[])
run_tests_group.add_argument(
"-o",
help="run the test suite in optimized mode too (implies --test)",
action="store_const",
const=True,
dest="test_optimized")
run_tests_group.add_argument(
"--test-optimized",
help="run the test suite in optimized mode too (implies --test)",
action=arguments.action.enable)
run_tests_group.add_argument(
"-s",
help="run the test suite in optimize for size mode too \
(implies --test)",
action="store_const",
const=True,
dest="test_optimize_for_size")
run_tests_group.add_argument(
"--test-optimize-for-size",
help="run the test suite in optimize for size mode too \
(implies --test)",
action=arguments.action.enable)
run_tests_group.add_argument(
"--long-test",
help="run the long test suite",
action=arguments.action.enable)
run_tests_group.add_argument(
"--host-test",
help="run executable tests on host devices (such as iOS or tvOS)",
action=arguments.action.enable)
run_tests_group.add_argument(
"-B", "--benchmark",
help="run the Swift Benchmark Suite after building",
action="store_true")
run_tests_group.add_argument(
"--benchmark-num-o-iterations",
help="if the Swift Benchmark Suite is run after building, run N \
iterations with -O",
metavar='N', type=int, default=3)
run_tests_group.add_argument(
"--benchmark-num-onone-iterations",
help="if the Swift Benchmark Suite is run after building, run N \
iterations with -Onone", metavar='N', type=int, default=3)
run_tests_group.add_argument(
"--skip-test-osx",
dest='test_osx',
action=arguments.action.disable,
help="skip testing Swift stdlibs for Mac OS X")
run_tests_group.add_argument(
"--skip-test-linux",
dest='test_linux',
action=arguments.action.disable,
help="skip testing Swift stdlibs for Linux")
run_tests_group.add_argument(
"--skip-test-freebsd",
dest='test_freebsd',
action=arguments.action.disable,
help="skip testing Swift stdlibs for FreeBSD")
run_tests_group.add_argument(
"--skip-test-cygwin",
dest='test_cygwin',
action=arguments.action.disable,
help="skip testing Swift stdlibs for Cygwin")
parser.add_argument(
"--build-runtime-with-host-compiler",
help="Use the host compiler, not the self-built one to compile the "
"Swift runtime",
action=arguments.action.enable)
run_build_group = parser.add_argument_group(
title="Run build")
run_build_group.add_argument(
"--build-swift-dynamic-stdlib",
help="build dynamic variants of the Swift standard library",
action=arguments.action.enable,
default=True)
run_build_group.add_argument(
"--build-swift-static-stdlib",
help="build static variants of the Swift standard library",
action=arguments.action.enable)
run_build_group.add_argument(
"--build-swift-dynamic-sdk-overlay",
help="build dynamic variants of the Swift SDK overlay",
action=arguments.action.enable,
default=True)
run_build_group.add_argument(
"--build-swift-static-sdk-overlay",
help="build static variants of the Swift SDK overlay",
action=arguments.action.enable)
run_build_group.add_argument(
"--build-swift-stdlib-unittest-extra",
help="Build optional StdlibUnittest components",
action=arguments.action.enable)
run_build_group.add_argument(
"-S", "--skip-build",
help="generate build directory only without building",
action="store_true")
run_build_group.add_argument(
"--skip-build-linux",
dest='build_linux',
action=arguments.action.disable,
help="skip building Swift stdlibs for Linux")
run_build_group.add_argument(
"--skip-build-freebsd",
dest='build_freebsd',
action=arguments.action.disable,
help="skip building Swift stdlibs for FreeBSD")
run_build_group.add_argument(
"--skip-build-cygwin",
dest='build_cygwin',
action=arguments.action.disable,
help="skip building Swift stdlibs for Cygwin")
run_build_group.add_argument(
"--skip-build-osx",
dest='build_osx',
action=arguments.action.disable,
help="skip building Swift stdlibs for MacOSX")
run_build_group.add_argument(
"--skip-build-ios",
dest='build_ios',
action=arguments.action.disable,
help="skip building Swift stdlibs for iOS")
run_build_group.add_argument(
"--skip-build-ios-device",
dest='build_ios_device',
action=arguments.action.disable,
help="skip building Swift stdlibs for iOS devices "
"(i.e. build simulators only)")
run_build_group.add_argument(
"--skip-build-ios-simulator",
dest='build_ios_simulator',
action=arguments.action.disable,
help="skip building Swift stdlibs for iOS simulator "
"(i.e. build devices only)")
run_build_group.add_argument(
"--skip-build-tvos",
dest='build_tvos',
action=arguments.action.disable,
help="skip building Swift stdlibs for tvOS")
run_build_group.add_argument(
"--skip-build-tvos-device",
dest='build_tvos_device',
action=arguments.action.disable,
help="skip building Swift stdlibs for tvOS devices "
"(i.e. build simulators only)")
run_build_group.add_argument(
"--skip-build-tvos-simulator",
dest='build_tvos_simulator',
action=arguments.action.disable,
help="skip building Swift stdlibs for tvOS simulator "
"(i.e. build devices only)")
run_build_group.add_argument(
"--skip-build-watchos",
dest='build_watchos',
action=arguments.action.disable,
help="skip building Swift stdlibs for watchOS")
run_build_group.add_argument(
"--skip-build-watchos-device",
dest='build_watchos_device',
action=arguments.action.disable,
help="skip building Swift stdlibs for watchOS devices "
"(i.e. build simulators only)")
run_build_group.add_argument(
"--skip-build-watchos-simulator",
dest='build_watchos_simulator',
action=arguments.action.disable,
help="skip building Swift stdlibs for watchOS simulator "
"(i.e. build devices only)")
run_build_group.add_argument(
"--skip-build-android",
dest='build_android',
action=arguments.action.disable,
help="skip building Swift stdlibs for Android")
run_build_group.add_argument(
"--skip-build-benchmarks",
dest='build_benchmarks',
action=arguments.action.disable,
help="skip building Swift Benchmark Suite")
run_build_group.add_argument(
"--build-external-benchmarks",
dest='build_external_benchmarks',
action=arguments.action.enable,
help="skip building Swift Benchmark Suite")
skip_test_group = parser.add_argument_group(
title="Skip testing specified targets")
skip_test_group.add_argument(
"--skip-test-ios",
dest='test_ios',
action=arguments.action.disable,
help="skip testing all iOS targets. Equivalent to specifying both "
"--skip-test-ios-simulator and --skip-test-ios-host")
skip_test_group.add_argument(
"--skip-test-ios-simulator",
dest='test_ios_simulator',
action=arguments.action.disable,
help="skip testing iOS simulator targets")
skip_test_group.add_argument(
"--skip-test-ios-32bit-simulator",
dest='test_ios_32bit_simulator',
action=arguments.action.disable,
help="skip testing iOS 32 bit simulator targets")
skip_test_group.add_argument(
"--skip-test-ios-host",
dest='test_ios_host',
action=arguments.action.disable,
help="skip testing iOS device targets on the host machine (the phone "
"itself)")
skip_test_group.add_argument(
"--skip-test-tvos",
dest='test_tvos',
action=arguments.action.disable,
help="skip testing all tvOS targets. Equivalent to specifying both "
"--skip-test-tvos-simulator and --skip-test-tvos-host")
skip_test_group.add_argument(
"--skip-test-tvos-simulator",
dest='test_tvos_simulator',
action=arguments.action.disable,
help="skip testing tvOS simulator targets")
skip_test_group.add_argument(
"--skip-test-tvos-host",
dest='test_tvos_host',
action=arguments.action.disable,
help="skip testing tvOS device targets on the host machine (the TV "
"itself)")
skip_test_group.add_argument(
"--skip-test-watchos",
dest='test_watchos',
action=arguments.action.disable,
help="skip testing all tvOS targets. Equivalent to specifying both "
"--skip-test-watchos-simulator and --skip-test-watchos-host")
skip_test_group.add_argument(
"--skip-test-watchos-simulator",
dest='test_watchos_simulator',
action=arguments.action.disable,
help="skip testing watchOS simulator targets")
skip_test_group.add_argument(
"--skip-test-watchos-host",
dest='test_watchos_host',
action=arguments.action.disable,
help="skip testing watchOS device targets on the host machine (the "
"watch itself)")
skip_test_group.add_argument(
"--skip-test-android-host",
dest='test_android_host',
action=arguments.action.disable,
help="skip testing Android device targets on the host machine (the "
"phone itself)")
parser.add_argument(
"-i", "--ios",
help="also build for iOS, but disallow tests that require an iOS "
"device",
action="store_true")
parser.add_argument(
"-I", "--ios-all",
help="also build for iOS, and allow all iOS tests",
action="store_true",
dest="ios_all")
parser.add_argument(
"--skip-ios",
help="set to skip everything iOS-related",
dest="ios",
action="store_false")
parser.add_argument(
"--tvos",
help="also build for tvOS, but disallow tests that require a tvos "
"device",
action=arguments.action.enable)
parser.add_argument(
"--tvos-all",
help="also build for tvOS, and allow all tvOS tests",
action=arguments.action.enable,
dest="tvos_all")
parser.add_argument(
"--skip-tvos",
help="set to skip everything tvOS-related",
dest="tvos",
action="store_false")
parser.add_argument(
"--watchos",
help="also build for watchOS, but disallow tests that require an "
"watchOS device",
action=arguments.action.enable)
parser.add_argument(
"--watchos-all",
help="also build for Apple watchOS, and allow all Apple watchOS tests",
action=arguments.action.enable,
dest="watchos_all")
parser.add_argument(
"--skip-watchos",
help="set to skip everything watchOS-related",
dest="watchos",
action="store_false")
parser.add_argument(
"--android",
help="also build for Android",
action=arguments.action.enable)
parser.add_argument(
"--swift-analyze-code-coverage",
help="enable code coverage analysis in Swift (false, not-merged, "
"merged).",
choices=["false", "not-merged", "merged"],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
dest="swift_analyze_code_coverage")
parser.add_argument(
"--build-subdir",
help="name of the directory under $SWIFT_BUILD_ROOT where the build "
"products will be placed",
metavar="PATH")
parser.add_argument(
"--install-prefix",
help="The installation prefix. This is where built Swift products "
"(like bin, lib, and include) will be installed.",
metavar="PATH",
default=targets.install_prefix())
parser.add_argument(
"--install-symroot",
help="the path to install debug symbols into",
metavar="PATH")
parser.add_argument(
"-j", "--jobs",
help="the number of parallel build jobs to use",
type=int,
dest="build_jobs",
default=multiprocessing.cpu_count())
parser.add_argument(
"--darwin-xcrun-toolchain",
help="the name of the toolchain to use on Darwin",
default=defaults.DARWIN_XCRUN_TOOLCHAIN)
parser.add_argument(
"--cmake",
help="the path to a CMake executable that will be used to build "
"Swift",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--show-sdks",
help="print installed Xcode and SDK versions",
action=arguments.action.enable)
parser.add_argument(
"--extra-swift-args",
help="Pass through extra flags to swift in the form of a cmake list "
"'module_regexp;flag'. Can be called multiple times to add "
"multiple such module_regexp flag pairs. All semicolons in flags "
"must be escaped with a '\\'",
action="append", dest="extra_swift_args", default=[])
llvm_group = parser.add_argument_group(
title="Build settings specific for LLVM")
llvm_group.add_argument(
'--llvm-targets-to-build',
help='LLVM target generators to build',
default="X86;ARM;AArch64;PowerPC;SystemZ;Mips")
android_group = parser.add_argument_group(
title="Build settings for Android")
android_group.add_argument(
"--android-ndk",
help="An absolute path to the NDK that will be used as a libc "
"implementation for Android builds",
metavar="PATH")
android_group.add_argument(
"--android-api-level",
help="The Android API level to target when building for Android. "
"Currently only 21 or above is supported",
default="21")
android_group.add_argument(
"--android-ndk-gcc-version",
help="The GCC version to use when building for Android. Currently "
"only 4.9 is supported. %(default)s is also the default value. "
"This option may be used when experimenting with versions "
"of the Android NDK not officially supported by Swift",
choices=["4.8", "4.9"],
default="4.9")
android_group.add_argument(
"--android-icu-uc",
help="Path to a directory containing libicuuc.so",
metavar="PATH")
android_group.add_argument(
"--android-icu-uc-include",
help="Path to a directory containing headers for libicuuc",
metavar="PATH")
android_group.add_argument(
"--android-icu-i18n",
help="Path to a directory containing libicui18n.so",
metavar="PATH")
android_group.add_argument(
"--android-icu-i18n-include",
help="Path to a directory containing headers libicui18n",
metavar="PATH")
android_group.add_argument(
"--android-deploy-device-path",
help="Path on an Android device to which built Swift stdlib products "
"will be deployed. If running host tests, specify the '{}' "
"directory.".format(android.adb.commands.DEVICE_TEMP_DIR),
default=android.adb.commands.DEVICE_TEMP_DIR,
metavar="PATH")
parser.add_argument(
"--host-cc",
help="the absolute path to CC, the 'clang' compiler for the host "
"platform. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--host-cxx",
help="the absolute path to CXX, the 'clang++' compiler for the host "
"platform. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--host-lipo",
help="the absolute path to lipo. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--host-libtool",
help="the absolute path to libtool. Default is auto detected.",
type=arguments.type.executable,
metavar="PATH")
parser.add_argument(
"--distcc",
help="use distcc in pump mode",
action=arguments.action.enable)
parser.add_argument(
"--enable-asan",
help="enable Address Sanitizer",
action=arguments.action.enable)
parser.add_argument(
"--enable-ubsan",
help="enable Undefined Behavior Sanitizer",
action=arguments.action.enable)
parser.add_argument(
"--enable-tsan",
help="enable Thread Sanitizer for swift tools",
action=arguments.action.enable)
parser.add_argument(
"--enable-tsan-runtime",
help="enable Thread Sanitizer on the swift runtime")
parser.add_argument(
"--enable-lsan",
help="enable Leak Sanitizer for swift tools",
action=arguments.action.enable)
parser.add_argument(
"--compiler-vendor",
choices=["none", "apple"],
default=defaults.COMPILER_VENDOR,
help="Compiler vendor name")
parser.add_argument(
"--clang-compiler-version",
help="string that indicates a compiler version for Clang",
type=arguments.type.clang_compiler_version,
metavar="MAJOR.MINOR.PATCH")
parser.add_argument(
"--clang-user-visible-version",
help="User-visible version of the embedded Clang and LLVM compilers",
type=arguments.type.clang_compiler_version,
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar="MAJOR.MINOR.PATCH")
parser.add_argument(
"--swift-compiler-version",
help="string that indicates a compiler version for Swift",
type=arguments.type.swift_compiler_version,
metavar="MAJOR.MINOR")
parser.add_argument(
"--swift-user-visible-version",
help="User-visible version of the embedded Swift compiler",
type=arguments.type.swift_compiler_version,
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar="MAJOR.MINOR")
parser.add_argument(
"--darwin-deployment-version-osx",
help="minimum deployment target version for OS X",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX)
parser.add_argument(
"--darwin-deployment-version-ios",
help="minimum deployment target version for iOS",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS)
parser.add_argument(
"--darwin-deployment-version-tvos",
help="minimum deployment target version for tvOS",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS)
parser.add_argument(
"--darwin-deployment-version-watchos",
help="minimum deployment target version for watchOS",
metavar="MAJOR.MINOR",
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS)
parser.add_argument(
"--extra-cmake-options",
help="Pass through extra options to CMake in the form of comma "
"separated options '-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp'. Can be "
"called multiple times to add multiple such options.",
action=arguments.action.concat,
type=arguments.type.shell_split,
default=[])
parser.add_argument(
"--build-args",
help="arguments to the build tool. This would be prepended to the "
"default argument that is '-j8' when CMake generator is "
"\"Ninja\".",
type=arguments.type.shell_split,
default=[])
parser.add_argument(
"--verbose-build",
help="print the commands executed during the build",
action=arguments.action.enable)
parser.add_argument(
"--lto",
help="use lto optimization on llvm/swift tools. This does not "
"imply using lto on the swift standard library or runtime. "
"Options: thin, full. If no optional arg is provided, full is "
"chosen by default",
metavar="LTO_TYPE",
nargs='?',
choices=['thin', 'full'],
default=None,
const='full',
dest='lto_type')
parser.add_argument(
"--clang-profile-instr-use",
help="profile file to use for clang PGO",
metavar="PATH")
default_max_lto_link_job_counts = host.max_lto_link_job_counts()
parser.add_argument(
"--llvm-max-parallel-lto-link-jobs",
help="the maximum number of parallel link jobs to use when compiling "
"llvm",
metavar="COUNT",
default=default_max_lto_link_job_counts['llvm'])
parser.add_argument(
"--swift-tools-max-parallel-lto-link-jobs",
help="the maximum number of parallel link jobs to use when compiling "
"swift tools.",
metavar="COUNT",
default=default_max_lto_link_job_counts['swift'])
parser.add_argument("--enable-sil-ownership",
help="Enable the SIL ownership model",
action='store_true')
parser.add_argument("--force-optimized-typechecker",
help="Force the type checker to be built with "
"optimization",
action='store_true')
parser.add_argument(
# Explicitly unavailable options here.
"--build-jobs",
"--common-cmake-options",
"--only-execute",
"--skip-test-optimize-for-size",
"--skip-test-optimized",
action=arguments.action.unavailable)
parser.add_argument(
"--lit-args",
help="lit args to use when testing",
metavar="LITARGS",
default="-sv")
parser.add_argument(
"--coverage-db",
help="coverage database to use when prioritizing testing",
metavar="PATH")
return parser
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
|
OscarSwanros/swift
|
utils/build_swift/driver_arguments.py
|
Python
|
apache-2.0
| 47,708
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import json
import logging
from typing import Iterator, Tuple
import yaml
from werkzeug.utils import secure_filename
from superset.commands.export import ExportModelsCommand
from superset.connectors.sqla.models import SqlaTable
from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.datasets.dao import DatasetDAO
from superset.utils.dict_import_export import EXPORT_VERSION
logger = logging.getLogger(__name__)
JSON_KEYS = {"params", "template_params", "extra"}
class ExportDatasetsCommand(ExportModelsCommand):
dao = DatasetDAO
not_found = DatasetNotFoundError
@staticmethod
def _export(model: SqlaTable) -> Iterator[Tuple[str, str]]:
database_slug = secure_filename(model.database.database_name)
dataset_slug = secure_filename(model.table_name)
file_name = f"datasets/{database_slug}/{dataset_slug}.yaml"
payload = model.export_to_dict(
recursive=True,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
# TODO (betodealmeida): move this logic to export_to_dict once this
# becomes the default export endpoint
for key in JSON_KEYS:
if payload.get(key):
try:
payload[key] = json.loads(payload[key])
except json.decoder.JSONDecodeError:
logger.info("Unable to decode `%s` field: %s", key, payload[key])
for key in ("metrics", "columns"):
for attributes in payload.get(key, []):
if attributes.get("extra"):
try:
attributes["extra"] = json.loads(attributes["extra"])
except json.decoder.JSONDecodeError:
logger.info(
"Unable to decode `extra` field: %s", attributes["extra"]
)
payload["version"] = EXPORT_VERSION
payload["database_uuid"] = str(model.database.uuid)
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_name, file_content
# include database as well
file_name = f"databases/{database_slug}.yaml"
payload = model.database.export_to_dict(
recursive=False,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
# TODO (betodealmeida): move this logic to export_to_dict once this
# becomes the default export endpoint
if payload.get("extra"):
try:
payload["extra"] = json.loads(payload["extra"])
except json.decoder.JSONDecodeError:
logger.info("Unable to decode `extra` field: %s", payload["extra"])
payload["version"] = EXPORT_VERSION
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_name, file_content
|
apache/incubator-superset
|
superset/datasets/commands/export.py
|
Python
|
apache-2.0
| 3,734
|
"""
Unit tests for variant objects. This is used for all tests
that can be performed in isolation from input data.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import unittest
import ga4gh.protocol as protocol
import ga4gh.datarepo as datarepo
import ga4gh.datamodel.variants as variants
import ga4gh.datamodel.datasets as datasets
import tests.paths as paths
class TestHtslibVariantAnnotationSet(unittest.TestCase):
"""
Unit tests for the abstract variant set.
"""
def _createVariantAnnotationSet(self, vcfDir):
"""
Creates a VariantAnnotationSet from the specified directory of
VCF files.
"""
self._variantSetName = "testVariantSet"
self._repo = datarepo.SqlDataRepository(paths.testDataRepo)
self._repo.open(datarepo.MODE_READ)
self._dataset = datasets.Dataset("testDs")
self._variantSet = variants.HtslibVariantSet(
self._dataset, self._variantSetName)
self._variantSet.populateFromDirectory(vcfDir)
self._variantAnnotationSet = variants.HtslibVariantAnnotationSet(
self._variantSet, "testVAs")
self._variantAnnotationSet.setOntology(
self._repo.getOntologyByName(paths.ontologyName))
def setUp(self):
vcfDir = "tests/data/datasets/dataset1/variants/WASH7P_annotation"
self._createVariantAnnotationSet(vcfDir)
def testConvertLocation(self):
loc = protocol.AlleleLocation()
loc.start = 150
pos = "151/305"
testLoc = self._variantAnnotationSet.convertLocation(pos)
self.assertEqual(testLoc, loc)
def testThousandGenomesAnnotation(self):
vcfDir = "tests/data/datasets/dataset1/variants/1kg.3.annotations"
self._createVariantAnnotationSet(vcfDir)
self.assertTrue(self._variantSet.isAnnotated())
def testConvertLocationHgvsC(self):
loc = protocol.AlleleLocation()
loc.start = 430
loc.reference_sequence = "T"
loc.alternate_sequence = "A"
hgvsC = "NM_001005484.1:c.431T>A"
testLoc = self._variantAnnotationSet.convertLocationHgvsC(hgvsC)
self.assertEqual(testLoc, loc)
def testConvertLocationHgvsP(self):
loc = protocol.AlleleLocation()
loc.start = 143
loc.alternate_sequence = "Asn"
loc.reference_sequence = "Ile"
hgvsP = "NM_001005484.1:p.Ile144Asn"
testLoc = self._variantAnnotationSet.convertLocationHgvsP(hgvsP)
self.assertEqual(testLoc, loc)
def testAddLocations(self):
effect = protocol.TranscriptEffect()
effect.hgvs_annotation.protein = "NM_001005484.1:p.Ile144Asn"
effect.hgvs_annotation.transcript = "NM_001005484.1:c.431T>A"
effect.protein_location.alternate_sequence = "Asn"
effect.protein_location.reference_sequence = "Ile"
effect.protein_location.start = 143
effect.cds_location.alternate_sequence = "A"
effect.cds_location.reference_sequence = "T"
effect.cds_location.start = 430
effect.cdna_location.start = 430
protPos = "144/305"
cdnaPos = "431/918"
testEffect = self._variantAnnotationSet.addLocations(
effect, protPos, cdnaPos)
self.assertEqual(testEffect, effect)
def testHashVariantAnnotation(self):
annotation = protocol.VariantAnnotation()
variant = protocol.Variant()
expected = hashlib.md5('\t()\t[]\t').hexdigest()
hashed = self._variantAnnotationSet.hashVariantAnnotation(
variant, annotation)
self.assertEqual(hashed, expected)
def testGetTranscriptEffectId(self):
effect = protocol.TranscriptEffect()
expected = hashlib.md5("\t\t[]\t").hexdigest()
hashed = self._variantAnnotationSet.getTranscriptEffectId(effect)
self.assertEqual(hashed, expected)
|
macieksmuga/server
|
tests/unit/test_variant_annotations.py
|
Python
|
apache-2.0
| 3,956
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import os
import yaml
class FilteringUtils(object):
def __init__(self):
self.main_folder = os.path.dirname(os.path.dirname(
os.path.split(__file__)[0]))
def load_file(self, file_name):
file_path = os.path.join(self.main_folder, file_name.lstrip('/'))
with open(file_path, "r") as f:
filter_dict = yaml.load(f)
return [filter_dict, file_path]
def filter_vms(self, src_data_list):
loaded_data = self.load_file('configs/filter.yaml')
filter_dict = loaded_data[0]
popped_vm_list = []
if 'instances' not in filter_dict:
return [src_data_list, []]
for vm in src_data_list[:]:
if vm.id not in filter_dict['instances']['id']:
popped_vm_list.append(vm)
index = src_data_list.index(vm)
src_data_list.pop(index)
return [src_data_list, popped_vm_list]
def filter_images(self, src_data_list):
loaded_data = self.load_file('configs/filter.yaml')
filter_dict = loaded_data[0]
popped_img_list = []
default_img = 'Cirros 0.3.0 x86_64'
if 'images' not in filter_dict:
return [src_data_list, []]
for img in src_data_list[:]:
if img.id not in filter_dict['images']['images_list']:
if img.name != default_img:
popped_img_list.append(img)
index = src_data_list.index(img)
src_data_list.pop(index)
return [src_data_list, popped_img_list]
def filter_tenants(self, src_data_list):
loaded_data = self.load_file('configs/filter.yaml')
filter_dict = loaded_data[0]
popped_tenant_list = []
if 'tenants' not in filter_dict:
return [src_data_list, []]
for tenant in src_data_list:
if tenant.id not in filter_dict['tenants']['tenant_id']:
popped_tenant_list.append(tenant)
index = src_data_list.index(tenant)
src_data_list.pop(index)
return [src_data_list, popped_tenant_list]
@staticmethod
def get_vm_fip(vm):
for net in vm.addresses:
for addr in vm.addresses[net]:
if addr['OS-EXT-IPS:type'] == 'floating':
return addr['addr']
raise RuntimeError('VM with name {} and id {} doesnt have fip'.format(
vm.name, vm.id))
|
japaniel/CloudFerry
|
devlab/tests/filtering_utils.py
|
Python
|
apache-2.0
| 3,024
|
"""Support for Tellstick sensors."""
from __future__ import annotations
from collections import namedtuple
import logging
from tellcore import telldus
import tellcore.constants as tellcore_constants
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
)
from homeassistant.const import (
CONF_ID,
CONF_NAME,
CONF_PROTOCOL,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
DatatypeDescription = namedtuple(
"DatatypeDescription", ["name", "unit", "device_class"]
)
CONF_DATATYPE_MASK = "datatype_mask"
CONF_ONLY_NAMED = "only_named"
CONF_TEMPERATURE_SCALE = "temperature_scale"
CONF_MODEL = "model"
DEFAULT_DATATYPE_MASK = 127
DEFAULT_TEMPERATURE_SCALE = TEMP_CELSIUS
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_TEMPERATURE_SCALE, default=DEFAULT_TEMPERATURE_SCALE
): cv.string,
vol.Optional(
CONF_DATATYPE_MASK, default=DEFAULT_DATATYPE_MASK
): cv.positive_int,
vol.Optional(CONF_ONLY_NAMED, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PROTOCOL): cv.string,
vol.Optional(CONF_MODEL): cv.string,
}
)
],
),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Tellstick sensors."""
sensor_value_descriptions = {
tellcore_constants.TELLSTICK_TEMPERATURE: DatatypeDescription(
"temperature",
config.get(CONF_TEMPERATURE_SCALE),
SensorDeviceClass.TEMPERATURE,
),
tellcore_constants.TELLSTICK_HUMIDITY: DatatypeDescription(
"humidity",
PERCENTAGE,
SensorDeviceClass.HUMIDITY,
),
tellcore_constants.TELLSTICK_RAINRATE: DatatypeDescription(
"rain rate", "", None
),
tellcore_constants.TELLSTICK_RAINTOTAL: DatatypeDescription(
"rain total", "", None
),
tellcore_constants.TELLSTICK_WINDDIRECTION: DatatypeDescription(
"wind direction", "", None
),
tellcore_constants.TELLSTICK_WINDAVERAGE: DatatypeDescription(
"wind average", "", None
),
tellcore_constants.TELLSTICK_WINDGUST: DatatypeDescription(
"wind gust", "", None
),
}
try:
tellcore_lib = telldus.TelldusCore()
except OSError:
_LOGGER.exception("Could not initialize Tellstick")
return
sensors = []
datatype_mask = config.get(CONF_DATATYPE_MASK)
if config[CONF_ONLY_NAMED]:
named_sensors = {}
for named_sensor in config[CONF_ONLY_NAMED]:
name = named_sensor[CONF_NAME]
proto = named_sensor.get(CONF_PROTOCOL)
model = named_sensor.get(CONF_MODEL)
id_ = named_sensor[CONF_ID]
if proto is not None:
if model is not None:
named_sensors[f"{proto}{model}{id_}"] = name
else:
named_sensors[f"{proto}{id_}"] = name
else:
named_sensors[id_] = name
for tellcore_sensor in tellcore_lib.sensors():
if not config[CONF_ONLY_NAMED]:
sensor_name = str(tellcore_sensor.id)
else:
proto_id = f"{tellcore_sensor.protocol}{tellcore_sensor.id}"
proto_model_id = "{}{}{}".format(
tellcore_sensor.protocol, tellcore_sensor.model, tellcore_sensor.id
)
if tellcore_sensor.id in named_sensors:
sensor_name = named_sensors[tellcore_sensor.id]
elif proto_id in named_sensors:
sensor_name = named_sensors[proto_id]
elif proto_model_id in named_sensors:
sensor_name = named_sensors[proto_model_id]
else:
continue
for datatype, sensor_info in sensor_value_descriptions.items():
if datatype & datatype_mask and tellcore_sensor.has_value(datatype):
sensors.append(
TellstickSensor(sensor_name, tellcore_sensor, datatype, sensor_info)
)
add_entities(sensors)
class TellstickSensor(SensorEntity):
"""Representation of a Tellstick sensor."""
def __init__(self, name, tellcore_sensor, datatype, sensor_info):
"""Initialize the sensor."""
self._datatype = datatype
self._tellcore_sensor = tellcore_sensor
self._attr_native_unit_of_measurement = sensor_info.unit or None
self._attr_name = f"{name} {sensor_info.name}"
def update(self):
"""Update tellstick sensor."""
self._attr_native_value = self._tellcore_sensor.value(self._datatype).value
|
mezz64/home-assistant
|
homeassistant/components/tellstick/sensor.py
|
Python
|
apache-2.0
| 5,393
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
def resnext_50_32x4d(): return nn.Sequential( # Sequential,
nn.Conv2d(3,64,(7, 7),(2, 2),(3, 3),1,1,bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3),(2, 2),(1, 1)),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(64,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
nn.Sequential( # Sequential,
nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,128,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(2, 2),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
nn.Sequential( # Sequential,
nn.Conv2d(256,512,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(2, 2),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
nn.Sequential( # Sequential,
nn.Conv2d(512,1024,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(2, 2),(1, 1),1,32,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
nn.Sequential( # Sequential,
nn.Conv2d(1024,2048,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,32,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.AdaptiveAvgPool2d(1),
Lambda(lambda x: x.view(x.size(0),-1)), # View,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2048,1000)), # Linear,
)
|
jmhsi/justin_tinker
|
data_science/courses/temp/weights/resnext_50_32x4d.py
|
Python
|
apache-2.0
| 10,738
|
#!/usr/bin/env python
import os,sys,json
from datetime import datetime
from filecmp import cmp
from subprocess import check_call
from boto.s3.key import Key
from boto.s3.connection import S3Connection
def ENV(x):
'''Promote an environment variable for global use returning its value'''
retval = os.environ.get(x, '')
globals()[x] = retval
return retval
ROOT_PATH = '/data/backup/docker-ci'
TODAY = str(datetime.today())[:10]
BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY)
BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH)
ENV('BACKUP_BUCKET')
ENV('BACKUP_AWS_ID')
ENV('BACKUP_AWS_SECRET')
'''Create full master buildbot backup, avoiding duplicates'''
# Ensure backup path exist
if not os.path.exists(ROOT_PATH):
os.makedirs(ROOT_PATH)
# Make actual backups
check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave'
' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True)
# remove previous dump if it is the same as the latest
if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and
os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE):
os.unlink(os.path._resolve_link(BACKUP_LINK))
# Recreate backup link pointing to latest backup
try:
os.unlink(BACKUP_LINK)
except:
pass
os.symlink(BACKUP_FILE, BACKUP_LINK)
# Make backup on S3
bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET)
k = Key(bucket)
k.key = BACKUP_FILE
k.set_contents_from_filename(BACKUP_FILE)
bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:])
|
proppy/docker
|
hack/infrastructure/docker-ci/tool/backup.py
|
Python
|
apache-2.0
| 1,554
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Backup code."""
import copy
import os
from unittest import mock
import uuid
import ddt
from eventlet import tpool
from os_brick.initiator.connectors import fake as fake_connectors
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_service import loopingcall
from oslo_utils import importutils
from oslo_utils import timeutils
import cinder
from cinder.backup import api
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder.tests import fake_driver
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder.tests.unit import utils
from cinder.volume import rpcapi as volume_rpcapi
CONF = cfg.CONF
class FakeBackupException(Exception):
pass
class BaseBackupTest(test.TestCase):
def setUp(self):
super(BaseBackupTest, self).setUp()
self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.backup_mgr.is_initialized = True
self.ctxt = context.get_admin_context()
paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot',
'cinder.volume.rpcapi.VolumeAPI.delete_volume',
'cinder.volume.rpcapi.VolumeAPI.detach_volume',
'cinder.volume.rpcapi.VolumeAPI.'
'secure_file_operations_enabled']
self.volume_patches = {}
self.volume_mocks = {}
for path in paths:
name = path.split('.')[-1]
self.volume_patches[name] = mock.patch(path)
self.volume_mocks[name] = self.volume_patches[name].start()
self.addCleanup(self.volume_patches[name].stop)
def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()),
restore_volume_id=None,
display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status=fields.BackupStatus.CREATING,
size=1,
object_count=0,
project_id=str(uuid.uuid4()),
service=None,
temp_volume_id=None,
temp_snapshot_id=None,
snapshot_id=None,
metadata=None,
parent_id=None,
encryption_key_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['restore_volume_id'] = restore_volume_id
kwargs['user_id'] = str(uuid.uuid4())
kwargs['project_id'] = project_id
kwargs['host'] = 'testhost'
kwargs['availability_zone'] = '1'
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['container'] = container
kwargs['status'] = status
kwargs['fail_reason'] = ''
kwargs['service'] = service or CONF.backup_driver
kwargs['snapshot_id'] = snapshot_id
kwargs['parent_id'] = parent_id
kwargs['size'] = size
kwargs['object_count'] = object_count
kwargs['temp_volume_id'] = temp_volume_id
kwargs['temp_snapshot_id'] = temp_snapshot_id
kwargs['metadata'] = metadata or {}
kwargs['encryption_key_id'] = encryption_key_id
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
previous_status='available',
size=1,
host='testhost',
encryption_key_id=None,
project_id=None):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = fake.USER_ID
vol['project_id'] = project_id or fake.PROJECT_ID
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = fields.VolumeAttachStatus.DETACHED
vol['availability_zone'] = '1'
vol['previous_status'] = previous_status
vol['encryption_key_id'] = encryption_key_id
vol['volume_type_id'] = fake.VOLUME_TYPE_ID
volume = objects.Volume(context=self.ctxt, **vol)
volume.create()
return volume.id
def _create_snapshot_db_entry(self, display_name='test_snapshot',
display_description='test snapshot',
status=fields.SnapshotStatus.AVAILABLE,
size=1,
volume_id=str(uuid.uuid4()),
provider_location=None):
"""Create a snapshot entry in the DB.
Return the entry ID.
"""
kwargs = {}
kwargs['size'] = size
kwargs['user_id'] = fake.USER_ID
kwargs['project_id'] = fake.PROJECT_ID
kwargs['status'] = status
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['volume_id'] = volume_id
kwargs['cgsnapshot_id'] = None
kwargs['volume_size'] = size
kwargs['metadata'] = {}
kwargs['provider_location'] = provider_location
kwargs['volume_type_id'] = fake.VOLUME_TYPE_ID
snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs)
snapshot_obj.create()
return snapshot_obj
def _create_volume_attach(self, volume_id):
values = {'volume_id': volume_id,
'attach_status': fields.VolumeAttachStatus.ATTACHED, }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'], None, 'testhost',
'/dev/vd0')
def _create_exported_record_entry(self, vol_size=1, exported_id=None):
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
if exported_id is not None:
backup.id = exported_id
export = self.backup_mgr.export_record(self.ctxt, backup)
return export
def _create_export_record_db_entry(self,
volume_id=str(uuid.uuid4()),
status=fields.BackupStatus.CREATING,
project_id=str(uuid.uuid4()),
backup_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['user_id'] = fake.USER_ID
kwargs['project_id'] = project_id
kwargs['status'] = status
if backup_id:
kwargs['id'] = backup_id
backup = objects.BackupImport(context=self.ctxt, **kwargs)
backup.create()
return backup
@ddt.ddt
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
@mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'set_initialized')
@mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'do_setup')
@mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'check_for_setup_error')
@mock.patch.object(cinder.db.sqlalchemy.api, '_volume_type_get_by_name',
v2_fakes.fake_volume_type_get)
@mock.patch('cinder.context.get_admin_context')
def test_init_host(self, mock_get_admin_context, mock_check, mock_setup,
mock_set_initialized):
"""Test stuck volumes and backups.
Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
def get_admin_context():
return self.ctxt
self.override_config('backup_service_inithost_offload', False)
self.override_config('periodic_interval', 0)
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
vol3_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol3_id, {'status': 'available'})
vol4_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
temp_vol_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'})
vol5_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'})
temp_snap = self._create_snapshot_db_entry()
temp_snap.status = fields.SnapshotStatus.AVAILABLE
temp_snap.save()
backup1 = self._create_backup_db_entry(
status=fields.BackupStatus.CREATING, volume_id=vol1_id)
backup2 = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING,
restore_volume_id=vol2_id)
backup3 = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol3_id)
self._create_backup_db_entry(status=fields.BackupStatus.CREATING,
volume_id=vol4_id,
temp_volume_id=temp_vol_id)
self._create_backup_db_entry(status=fields.BackupStatus.CREATING,
volume_id=vol5_id,
temp_snapshot_id=temp_snap.id)
mock_get_admin_context.side_effect = get_admin_context
self.volume = importutils.import_object(CONF.volume_manager)
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual('available', vol1['status'])
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual('error_restoring', vol2['status'])
vol3 = db.volume_get(self.ctxt, vol3_id)
self.assertEqual('available', vol3['status'])
vol4 = db.volume_get(self.ctxt, vol4_id)
self.assertEqual('available', vol4['status'])
vol5 = db.volume_get(self.ctxt, vol5_id)
self.assertEqual('available', vol5['status'])
backup1 = db.backup_get(self.ctxt, backup1.id)
self.assertEqual(fields.BackupStatus.ERROR, backup1['status'])
backup2 = db.backup_get(self.ctxt, backup2.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status'])
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup3.id)
temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id)
self.volume_mocks['delete_volume'].assert_called_once_with(
self.ctxt, temp_vol)
self.assertTrue(self.volume_mocks['detach_volume'].called)
@mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
@mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool')
def test_init_host_with_service_inithost_offload(self,
mock_add_threadpool,
mock_get_all_by_host):
vol1_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol1_id, {'status': 'available'})
backup1 = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol1_id)
vol2_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol2_id, {'status': 'available'})
backup2 = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol2_id)
mock_get_all_by_host.return_value = [backup1, backup2]
self.backup_mgr.init_host()
calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1),
mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)]
mock_add_threadpool.assert_has_calls(calls, any_order=True)
# 3 calls because 1 is always made to handle encryption key migration.
self.assertEqual(3, mock_add_threadpool.call_count)
@mock.patch('cinder.keymgr.migration.migrate_fixed_key')
@mock.patch('cinder.objects.BackupList.get_all_by_host')
@mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool')
def test_init_host_key_migration(self,
mock_add_threadpool,
mock_get_all_by_host,
mock_migrate_fixed_key):
self.backup_mgr.init_host()
mock_add_threadpool.assert_called_once_with(
mock_migrate_fixed_key,
backups=mock_get_all_by_host())
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
@ddt.data(123456, 654321)
def test_setup_backup_backend_uses_new_config(
self, new_cfg_value, mock_FILC):
# previously used CONF.periodic_interval; see Bug #1828748
new_cfg_name = 'backup_driver_init_check_interval'
self.addCleanup(CONF.clear_override, new_cfg_name)
CONF.set_override(new_cfg_name, new_cfg_value)
mock_init_loop = mock.MagicMock()
mock_init_loop.start.side_effect = loopingcall.LoopingCallDone()
mock_FILC.return_value = mock_init_loop
self.backup_mgr.setup_backup_backend(self.ctxt)
mock_init_loop.start.assert_called_once_with(interval=new_cfg_value)
@mock.patch('cinder.objects.service.Service.get_minimum_rpc_version')
@mock.patch('cinder.objects.service.Service.get_minimum_obj_version')
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3',
'cinder-volume': '1.7'})
def test_reset(self, get_min_obj, get_min_rpc):
old_version = objects.base.OBJ_VERSIONS.versions[-2]
with mock.patch('cinder.rpc.LAST_OBJ_VERSIONS',
{'cinder-volume': old_version,
'cinder-scheduler': old_version,
'cinder-backup': old_version}):
backup_mgr = manager.BackupManager()
backup_rpcapi = backup_mgr.backup_rpcapi
volume_rpcapi = backup_mgr.volume_rpcapi
self.assertEqual('1.3', backup_rpcapi.client.version_cap)
self.assertEqual(old_version,
backup_rpcapi.client.serializer._base.version_cap)
self.assertEqual('1.7', volume_rpcapi.client.version_cap)
self.assertEqual(old_version,
volume_rpcapi.client.serializer._base.version_cap)
get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current()
backup_mgr.reset()
backup_rpcapi = backup_mgr.backup_rpcapi
volume_rpcapi = backup_mgr.volume_rpcapi
self.assertEqual(get_min_rpc.return_value,
backup_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
backup_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(backup_rpcapi.client.serializer._base.manifest)
self.assertEqual(get_min_rpc.return_value,
volume_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
volume_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(volume_rpcapi.client.serializer._base.manifest)
@ddt.data(True, False)
def test_is_working(self, initialized):
self.backup_mgr.is_initialized = initialized
self.assertEqual(initialized, self.backup_mgr.is_working())
def test_cleanup_incomplete_backup_operations_with_exceptions(self):
"""Test cleanup resilience in the face of exceptions."""
fake_backup_list = [{'id': fake.BACKUP_ID},
{'id': fake.BACKUP2_ID},
{'id': fake.BACKUP3_ID}]
mock_backup_get_by_host = self.mock_object(
objects.BackupList, 'get_all_by_host')
mock_backup_get_by_host.return_value = fake_backup_list
mock_backup_cleanup = self.mock_object(
self.backup_mgr, '_cleanup_one_backup')
mock_backup_cleanup.side_effect = [Exception]
mock_temp_cleanup = self.mock_object(
self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup')
mock_temp_cleanup.side_effect = [Exception]
self.assertIsNone(
self.backup_mgr._cleanup_incomplete_backup_operations(
self.ctxt))
self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count)
self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count)
@mock.patch('cinder.objects.BackupList')
@mock.patch.object(manager.BackupManager, '_cleanup_one_backup')
@mock.patch.object(manager.BackupManager,
'_cleanup_temp_volumes_snapshots_for_one_backup')
def test_cleanup_non_primary_process(self, temp_cleanup_mock,
backup_cleanup_mock, backup_ovo_mock):
"""Test cleanup doesn't run on non primary processes."""
self.backup_mgr._process_number = 2
self.backup_mgr._cleanup_incomplete_backup_operations(self.ctxt)
backup_ovo_mock.get_all_by_host.assert_not_called()
backup_cleanup_mock.assert_not_called()
temp_cleanup_mock.assert_not_called()
def test_cleanup_one_backing_up_volume(self):
"""Test cleanup_one_volume for volume status 'backing-up'."""
volume_id = self._create_volume_db_entry(status='backing-up',
previous_status='available')
volume = db.volume_get(self.ctxt, volume_id)
self.backup_mgr._cleanup_one_volume(self.ctxt, volume)
volume = db.volume_get(self.ctxt, volume_id)
self.assertEqual('available', volume['status'])
def test_cleanup_one_restoring_backup_volume(self):
"""Test cleanup_one_volume for volume status 'restoring-backup'."""
volume_id = self._create_volume_db_entry(status='restoring-backup')
volume = db.volume_get(self.ctxt, volume_id)
self.backup_mgr._cleanup_one_volume(self.ctxt, volume)
volume = db.volume_get(self.ctxt, volume_id)
self.assertEqual('error_restoring', volume['status'])
def test_cleanup_one_creating_backup(self):
"""Test cleanup_one_backup for volume status 'creating'."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', })
backup = self._create_backup_db_entry(
status=fields.BackupStatus.CREATING,
volume_id=vol1_id)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
volume = objects.Volume.get_by_id(self.ctxt, vol1_id)
self.assertEqual('available', volume.status)
def test_cleanup_one_restoring_backup(self):
"""Test cleanup_one_backup for volume status 'restoring'."""
vol1_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', })
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING,
restore_volume_id=vol1_id)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
volume = objects.Volume.get_by_id(self.ctxt, vol1_id)
self.assertEqual('error_restoring', volume.status)
def test_cleanup_one_deleting_backup(self):
"""Test cleanup_one_backup for backup status 'deleting'."""
self.override_config('backup_service_inithost_offload', False)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup.id)
def test_cleanup_one_deleting_encrypted_backup(self):
"""Test cleanup of backup status 'deleting' (encrypted)."""
self.override_config('backup_service_inithost_offload', False)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING,
encryption_key_id=fake.ENCRYPTION_KEY_ID)
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertIsNotNone(backup)
self.assertEqual(fields.BackupStatus.ERROR_DELETING,
backup.status)
def test_detach_all_attachments_handles_exceptions(self):
"""Test detach_all_attachments with exceptions."""
mock_log = self.mock_object(manager, 'LOG')
self.volume_mocks['detach_volume'].side_effect = [Exception]
fake_attachments = [
{
'id': fake.ATTACHMENT_ID,
'attached_host': 'testhost',
'instance_uuid': None,
},
{
'id': fake.ATTACHMENT2_ID,
'attached_host': 'testhost',
'instance_uuid': None,
}
]
fake_volume = {
'id': fake.VOLUME3_ID,
'volume_attachment': fake_attachments
}
self.backup_mgr._detach_all_attachments(self.ctxt,
fake_volume)
self.assertEqual(len(fake_attachments), mock_log.exception.call_count)
@ddt.data(KeyError, exception.VolumeNotFound)
def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found(
self, err):
"""Ensure we handle missing volume for a backup."""
mock_volume_get = self.mock_object(db, 'volume_get')
mock_volume_get.side_effect = [err]
backup = self._create_backup_db_entry(
status=fields.BackupStatus.CREATING)
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
def test_cleanup_temp_snapshot_for_one_backup_not_found(self):
"""Ensure we handle missing temp snapshot for a backup."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup = self._create_backup_db_entry(
status=fields.BackupStatus.ERROR,
volume_id=vol1_id,
temp_snapshot_id=fake.SNAPSHOT_ID)
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
self.assertFalse(self.volume_mocks['delete_snapshot'].called)
self.assertIsNone(backup.temp_snapshot_id)
backup.destroy()
db.volume_destroy(self.ctxt, vol1_id)
def test_cleanup_temp_volume_for_one_backup_not_found(self):
"""Ensure we handle missing temp volume for a backup."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR,
volume_id=vol1_id,
temp_volume_id=fake.VOLUME4_ID)
self.assertIsNone(
self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(
self.ctxt,
backup))
self.assertFalse(self.volume_mocks['delete_volume'].called)
self.assertIsNone(backup.temp_volume_id)
backup.destroy()
db.volume_destroy(self.ctxt, vol1_id)
def test_create_backup_with_bad_volume_status(self):
"""Test creating a backup from a volume with a bad status."""
vol_id = self._create_volume_db_entry(status='restoring', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
backup)
def test_create_backup_with_bad_backup_status(self):
"""Test creating a backup with a backup with a bad status."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
backup)
def test_create_backup_with_error(self):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
mock_run_backup = self.mock_object(self.backup_mgr, '_start_backup')
mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4()))
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('error_backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
self.assertTrue(mock_run_backup.called)
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
def test_create_backup_aborted(self, start_backup_mock):
"""Test error handling when abort occurs during backup creation."""
def my_start_backup(*args, **kwargs):
backup.destroy()
with backup.as_read_deleted():
original_refresh()
start_backup_mock.side_effect = my_start_backup
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
original_refresh = backup.refresh
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.backup_mgr.create_backup(self.ctxt, backup)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.backup_mgr._finish_backup(self.ctxt, backup, vol, {})
self.assertTrue(start_backup_mock.called)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('available', vol.status)
self.assertEqual('backing-up', vol['previous_status'])
# Make sure we didn't set the backup to available after it was deleted
with backup.as_read_deleted():
backup.refresh()
self.assertEqual(fields.BackupStatus.DELETED, backup.status)
@mock.patch('cinder.backup.manager.BackupManager._start_backup',
side_effect=FakeBackupException(str(uuid.uuid4())))
def test_create_backup_with_snapshot_error(self, mock_start_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
snapshot = self._create_snapshot_db_entry(status='backing-up',
volume_id=vol_id)
backup = self._create_backup_db_entry(volume_id=vol_id,
snapshot_id=snapshot.id)
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup)
snapshot.refresh()
self.assertEqual('available', snapshot.status)
backup.refresh()
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
self.assertTrue(mock_start_backup.called)
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open', wraps=open)
@mock.patch.object(os.path, 'isdir', return_value=False)
def test_create_backup(self, mock_isdir, mock_open, mock_temporary_chown,
mock_get_backup_device, mock_get_conn):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
backup_device_dict = {'backup_device': vol, 'secure_enabled': False,
'is_snapshot': False, }
mock_backup_device = (
objects.BackupDeviceInfo.from_primitive(backup_device_dict,
self.ctxt,
['admin_metadata',
'metadata']))
attach_info = {'device': {'path': '/dev/null'}}
mock_detach_device = self.mock_object(self.backup_mgr,
'_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_get_conn.return_value = properties
self.backup_mgr.create_backup(self.ctxt, backup)
self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device)
mock_temporary_chown.assert_called_once_with('/dev/null')
mock_attach_device.assert_called_once_with(self.ctxt, vol,
properties, False)
mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol)
mock_get_conn.assert_called_once_with()
mock_detach_device.assert_called_once_with(self.ctxt, attach_info,
vol, properties, False,
force=True,
ignore_errors=True)
mock_open.assert_called_once_with('/dev/null', 'rb')
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size'])
self.assertIsNone(backup.encryption_key_id)
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_create_backup_set_parent_id_to_none(self, mock_isdir, mock_open,
mock_chown,
mock_backup_device,
mock_brick):
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id,
parent_id='mock')
with mock.patch.object(self.backup_mgr, 'service') as \
mock_service:
mock_service.return_value.backup.return_value = (
{'parent_id': None})
with mock.patch.object(self.backup_mgr, '_detach_device'):
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_brick.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
mock_brick.return_value = properties
self.backup_mgr.create_backup(self.ctxt, backup)
self.backup_mgr.continue_backup(self.ctxt, backup,
mock_backup_device)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertEqual(vol_size, backup.size)
self.assertIsNone(backup.parent_id)
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_create_backup_set_parent_id(self, mock_isdir, mock_open,
mock_chown, mock_backup_device,
mock_brick):
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
parent_backup = self._create_backup_db_entry(size=vol_size)
with mock.patch.object(self.backup_mgr, 'service') as \
mock_service:
mock_service.return_value.backup.return_value = (
{'parent_id': parent_backup.id})
with mock.patch.object(self.backup_mgr, '_detach_device'):
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_brick.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
mock_brick.return_value = properties
self.backup_mgr.create_backup(self.ctxt, backup)
self.backup_mgr.continue_backup(self.ctxt, backup,
mock_backup_device)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertEqual(vol_size, backup.size)
self.assertEqual(parent_backup.id, backup.parent_id)
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_create_backup_fail_with_excep(self, mock_isdir, mock_open,
mock_chown, mock_backup_device,
mock_brick):
vol_id = self._create_volume_db_entry()
backup = self._create_backup_db_entry(volume_id=vol_id)
# These are set in create_backup, but we are calling
# continue_backup
self.ctxt.message_resource_id = backup.id
self.ctxt.message_resource_type = message_field.Resource.VOLUME_BACKUP
self.ctxt.message_action = message_field.Action.BACKUP_CREATE
with mock.patch.object(self.backup_mgr, 'service') as \
mock_service:
mock_service.return_value.backup.side_effect = (
FakeBackupException('fake'))
with mock.patch.object(self.backup_mgr, '_detach_device'):
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
properties = {}
mock_brick.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
mock_brick.return_value = properties
self.assertRaises(FakeBackupException,
self.backup_mgr.continue_backup,
self.ctxt, backup, mock_backup_device)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol.status)
self.assertEqual('error_backing-up', vol.previous_status)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
@mock.patch('cinder.backup.manager.BackupManager._finish_backup')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=True)
def test_run_backup_with_dir_device_path(self, mock_isdir,
mock_open,
mock_chown,
mock_backup_device,
mock_brick,
mock_finish):
backup_service = mock.Mock()
backup_service.backup = mock.Mock(
return_value=mock.sentinel.backup_update)
self.backup_mgr.service = lambda x: backup_service
vol_id = self._create_volume_db_entry()
backup = self._create_backup_db_entry(volume_id=vol_id)
volume = objects.Volume.get_by_id(self.ctxt, vol_id)
# device_path is represented by a directory
device_path = '/fake/disk/path/'
attach_info = {'device': {'path': device_path}}
self.backup_mgr._attach_device = mock.Mock(
return_value=attach_info)
self.backup_mgr._detach_device = mock.Mock()
self.backup_mgr.continue_backup(self.ctxt, backup,
mock_backup_device)
mock_chown.assert_not_called()
mock_open.assert_not_called()
backup_service.backup.assert_called_once_with(
backup, device_path)
mock_finish.called_once_with(self.ctxt, backup, volume,
mock.sentinel.backup_update)
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
@ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'),
(fields.SnapshotStatus.BACKING_UP, 'in-use'),
(fields.SnapshotStatus.AVAILABLE, 'available'),
(fields.SnapshotStatus.AVAILABLE, 'in-use'))
@ddt.unpack
def test_create_backup_with_snapshot(self, snapshot_status, volume_status,
mock_start_backup):
vol_id = self._create_volume_db_entry(status=volume_status)
snapshot = self._create_snapshot_db_entry(volume_id=vol_id,
status=snapshot_status)
backup = self._create_backup_db_entry(volume_id=vol_id,
snapshot_id=snapshot.id)
if snapshot_status == fields.SnapshotStatus.BACKING_UP:
self.backup_mgr.create_backup(self.ctxt, backup)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.backup_mgr._finish_backup(self.ctxt, backup, vol, {})
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual(volume_status, vol.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status)
else:
self.assertRaises(exception.InvalidSnapshot,
self.backup_mgr.create_backup, self.ctxt, backup)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.remove_export_snapshot')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open')
@mock.patch.object(os.path, 'isdir', return_value=False)
def test_create_backup_with_temp_snapshot(self, mock_isdir,
mock_open,
mock_temporary_chown,
mock_get_backup_device,
mock_get_conn,
mock_remove_export_snapshot):
"""Test backup in-use volume using temp snapshot."""
self.override_config('backup_use_same_host', True)
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
backup = self._create_backup_db_entry(volume_id=vol_id)
snap = self._create_snapshot_db_entry(volume_id=vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
mock_backup_device = (
objects.BackupDeviceInfo.from_primitive({
'backup_device': snap, 'secure_enabled': False,
'is_snapshot': True, },
self.ctxt, expected_attrs=['metadata']))
attach_info = {
'device': {'path': '/dev/null'},
'conn': {'data': {}},
'connector': fake_connectors.FakeConnector(None)}
mock_terminate_connection_snapshot = self.mock_object(
volume_rpcapi.VolumeAPI,
'terminate_connection_snapshot')
mock_initialize_connection_snapshot = self.mock_object(
volume_rpcapi.VolumeAPI,
'initialize_connection_snapshot')
mock_connect_device = self.mock_object(
manager.BackupManager,
'_connect_device')
mock_connect_device.return_value = attach_info
properties = {}
mock_get_conn.return_value = properties
mock_open.return_value = open('/dev/null', 'rb')
self.backup_mgr.create_backup(self.ctxt, backup)
self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device)
mock_temporary_chown.assert_called_once_with('/dev/null')
mock_initialize_connection_snapshot.assert_called_once_with(
self.ctxt, snap, properties)
mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol)
mock_get_conn.assert_called_once_with()
mock_terminate_connection_snapshot.assert_called_once_with(
self.ctxt, snap, properties, force=True)
mock_remove_export_snapshot.assert_called_once_with(
self.ctxt, mock.ANY, sync=True)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('backing-up', vol['previous_status'])
backup = objects.Backup.get_by_id(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertEqual(vol_size, backup.size)
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_snapshot')
def test_create_temp_snapshot(self, mock_create_snapshot):
volume_manager = importutils.import_object(CONF.volume_manager)
volume_manager.driver.set_initialized()
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
mock_create_snapshot.return_value = {'provider_id':
'fake_provider_id'}
temp_snap = volume_manager.driver._create_temp_snapshot(
self.ctxt, vol)
self.assertEqual('available', temp_snap['status'])
self.assertEqual('fake_provider_id', temp_snap['provider_id'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'create_cloned_volume')
def test_create_temp_cloned_volume(self, mock_create_cloned_volume):
volume_manager = importutils.import_object(CONF.volume_manager)
volume_manager.driver.set_initialized()
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
mock_create_cloned_volume.return_value = {'provider_id':
'fake_provider_id'}
temp_vol = volume_manager.driver._create_temp_cloned_volume(
self.ctxt, vol)
self.assertEqual('available', temp_vol['status'])
self.assertEqual('fake_provider_id', temp_vol['provider_id'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'create_volume_from_snapshot')
def test_create_temp_volume_from_snapshot(self, mock_create_vol_from_snap):
volume_manager = importutils.import_object(CONF.volume_manager)
volume_manager.driver.set_initialized()
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size,
previous_status='in-use')
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
snap = self._create_snapshot_db_entry(volume_id=vol_id)
mock_create_vol_from_snap.return_value = {'provider_id':
'fake_provider_id'}
temp_vol = volume_manager.driver._create_temp_volume_from_snapshot(
self.ctxt, vol, snap)
self.assertEqual('available', temp_vol['status'])
self.assertEqual('fake_provider_id', temp_vol['provider_id'])
@mock.patch('cinder.volume.volume_utils.notify_about_backup_usage')
def test_create_backup_with_notify(self, notify):
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.mock_object(self.backup_mgr, '_start_backup')
self.backup_mgr.create_backup(self.ctxt, backup)
self.assertEqual(1, notify.call_count)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.volume.volume_utils.clone_encryption_key')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
def test_create_backup_encrypted_volume(self,
mock_connector_properties,
mock_clone_encryption_key,
mock_get_backup_device):
"""Test backup of encrypted volume.
Test whether the volume's encryption key ID is cloned and
saved in the backup.
"""
vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
mock_clone_encryption_key.return_value = fake.UUID2
self.backup_mgr.create_backup(self.ctxt, backup)
mock_clone_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID1)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fake.UUID2, backup.encryption_key_id)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
@mock.patch('cinder.volume.volume_utils.clone_encryption_key')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
def test_create_backup_encrypted_volume_again(self,
mock_connector_properties,
mock_clone_encryption_key,
mock_get_backup_device):
"""Test backup of encrypted volume.
Test when the backup already has a clone of the volume's encryption
key ID.
"""
vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(volume_id=vol_id,
encryption_key_id=fake.UUID2)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
self.backup_mgr.create_backup(self.ctxt, backup)
mock_clone_encryption_key.assert_not_called()
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling.
Test error handling when restoring a backup to a volume
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
backup = db.backup_get(self.ctxt, backup.id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
def test_restore_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when restoring a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_restore_backup_with_driver_error(self):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
mock_run_restore = self.mock_object(
self.backup_mgr,
'_run_restore')
mock_run_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertTrue(mock_run_restore.called)
def test_restore_backup_with_driver_cancellation(self):
"""Test error handling when a restore is cancelled."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
mock_run_restore = self.mock_object(
self.backup_mgr,
'_run_restore')
mock_run_restore.side_effect = exception.BackupRestoreCancel(
vol_id=vol_id, back_id=backup.id)
# We shouldn't raise an exception on the call, it's OK to cancel
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('error', vol.status)
backup.refresh()
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertTrue(mock_run_restore.called)
def test_restore_backup_with_creating_volume(self):
"""Test restore backup with a creating volume."""
vol_id = self._create_volume_db_entry(
status=fields.VolumeStatus.CREATING,
size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
mock_run_restore = self.mock_object(
self.backup_mgr,
'_run_restore')
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual(fields.VolumeStatus.AVAILABLE, vol.status)
self.assertIsNotNone(vol.launched_at)
backup.refresh()
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertTrue(mock_run_restore.called)
def test_restore_backup_canceled_with_creating_volume(self):
"""Test restore backup with a creating volume."""
vol_id = self._create_volume_db_entry(
status=fields.VolumeStatus.CREATING,
size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
mock_run_restore = self.mock_object(
self.backup_mgr,
'_run_restore')
mock_run_restore.side_effect = exception.BackupRestoreCancel(
vol_id=vol_id, back_id=backup.id)
# We shouldn't raise an exception on the call, it's OK to cancel
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual(fields.VolumeStatus.ERROR, vol.status)
backup.refresh()
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
self.assertTrue(mock_run_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a restore of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open', wraps=open)
@mock.patch.object(os.path, 'isdir', return_value=False)
@ddt.data({'os_name': 'nt', 'exp_open_mode': 'rb+'},
{'os_name': 'posix', 'exp_open_mode': 'wb'})
@ddt.unpack
def test_restore_backup(self, mock_isdir, mock_open,
mock_temporary_chown, mock_get_conn,
os_name, exp_open_mode):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
properties = {}
mock_get_conn.return_value = properties
mock_secure_enabled = (
self.volume_mocks['secure_file_operations_enabled'])
mock_secure_enabled.return_value = False
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
attach_info = {'device': {'path': '/dev/null'}}
mock_detach_device = self.mock_object(self.backup_mgr,
'_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = attach_info
with mock.patch('os.name', os_name):
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
mock_open.assert_called_once_with('/dev/null', exp_open_mode)
mock_temporary_chown.assert_called_once_with('/dev/null')
mock_get_conn.assert_called_once_with()
vol.status = 'available'
vol.obj_reset_changes()
mock_secure_enabled.assert_called_once_with(self.ctxt, vol)
mock_attach_device.assert_called_once_with(self.ctxt, vol,
properties)
mock_detach_device.assert_called_once_with(self.ctxt, attach_info,
vol, properties, force=True)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertNotEqual(backup.id, vol.metadata.get('src_backup_id'))
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('builtins.open', wraps=open)
@mock.patch.object(os.path, 'isdir', return_value=False)
@ddt.data({'os_name': 'nt', 'exp_open_mode': 'rb+'},
{'os_name': 'posix', 'exp_open_mode': 'wb'})
@ddt.unpack
def test_restore_backup_new_volume(self,
mock_isdir,
mock_open,
mock_temporary_chown,
mock_get_conn,
os_name,
exp_open_mode):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(
status='restoring-backup', size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
vol2_id = self._create_volume_db_entry(
status='restoring-backup', size=vol_size)
backup2 = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol2_id)
vol2 = objects.Volume.get_by_id(self.ctxt, vol2_id)
properties = {}
mock_get_conn.return_value = properties
mock_secure_enabled = (
self.volume_mocks['secure_file_operations_enabled'])
mock_secure_enabled.return_value = False
new_vol_id = self._create_volume_db_entry(
status='restoring-backup', size=vol_size)
vol = objects.Volume.get_by_id(self.ctxt, new_vol_id)
attach_info = {'device': {'path': '/dev/null'}}
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device.return_value = attach_info
with mock.patch('os.name', os_name):
self.backup_mgr.restore_backup(self.ctxt, backup, new_vol_id)
backup.status = "restoring"
db.backup_update(self.ctxt, backup.id, {"status": "restoring"})
vol.status = 'available'
vol.obj_reset_changes()
with mock.patch('os.name', os_name):
self.backup_mgr.restore_backup(self.ctxt, backup, vol2_id)
vol2.refresh()
old_src_backup_id = vol2.metadata["src_backup_id"]
self.assertEqual(backup.id, old_src_backup_id)
vol2.status = 'restoring-backup'
db.volume_update(self.ctxt, vol2.id, {"status": "restoring-backup"})
vol2.obj_reset_changes()
with mock.patch('os.name', os_name):
self.backup_mgr.restore_backup(self.ctxt, backup2, vol2_id)
vol2.status = 'available'
vol2.obj_reset_changes()
vol.refresh()
vol2.refresh()
self.assertEqual('available', vol.status)
backup.refresh()
self.assertEqual(backup.id, vol.metadata["src_backup_id"])
self.assertNotEqual(old_src_backup_id, vol2.metadata["src_backup_id"])
self.assertEqual(backup2.id, vol2.metadata["src_backup_id"])
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
@mock.patch('cinder.volume.volume_utils.notify_about_backup_usage')
def test_restore_backup_with_notify(self, notify):
"""Test normal backup restoration with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
self.backup_mgr._run_restore = mock.Mock()
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
self.assertEqual(2, notify.call_count)
@mock.patch('cinder.volume.volume_utils.clone_encryption_key')
@mock.patch('cinder.volume.volume_utils.delete_encryption_key')
@mock.patch(
'cinder.tests.unit.backup.fake_service.FakeBackupService.restore')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
def test_restore_backup_encrypted_volume(self,
mock_connector_properties,
mock_backup_driver_restore,
mock_delete_encryption_key,
mock_clone_encryption_key):
"""Test restore of encrypted volume.
Test restoring a volume from its own backup. In this situation,
the volume's encryption key ID shouldn't change.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.RESTORING,
encryption_key_id=fake.UUID2)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
volume = db.volume_get(self.ctxt, vol_id)
self.assertEqual(fake.UUID1, volume.encryption_key_id)
mock_clone_encryption_key.assert_not_called()
mock_delete_encryption_key.assert_not_called()
@mock.patch('cinder.volume.volume_utils.clone_encryption_key')
@mock.patch('cinder.volume.volume_utils.delete_encryption_key')
@mock.patch(
'cinder.tests.unit.backup.fake_service.FakeBackupService.restore')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
def test_restore_backup_new_encrypted_volume(self,
mock_connector_properties,
mock_backup_driver_restore,
mock_delete_encryption_key,
mock_clone_encryption_key):
"""Test restore of encrypted volume.
Test handling of encryption key IDs when retoring to another
encrypted volume, i.e. a volume whose key ID is different from
the volume originally backed up.
- The volume's prior encryption key ID is deleted.
- The volume is assigned a fresh clone of the backup's encryption
key ID.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.RESTORING,
encryption_key_id=fake.UUID2)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
mock_clone_encryption_key.return_value = fake.UUID3
# Mimic the driver's side effect where it updates the volume's
# metadata. For backups of encrypted volumes, this will essentially
# overwrite the volume's encryption key ID prior to the restore.
def restore_side_effect(backup, volume_id, volume_file):
db.volume_update(self.ctxt,
volume_id,
{'encryption_key_id': fake.UUID4})
mock_backup_driver_restore.side_effect = restore_side_effect
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
# Volume's original encryption key ID should be deleted
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID1)
# Backup's encryption key ID should have been cloned
mock_clone_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID2)
# Volume should have the cloned backup key ID
volume = db.volume_get(self.ctxt, vol_id)
self.assertEqual(fake.UUID3, volume.encryption_key_id)
# Backup's key ID should not have changed
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fake.UUID2, backup.encryption_key_id)
@mock.patch('cinder.volume.volume_utils.clone_encryption_key')
@mock.patch('cinder.volume.volume_utils.delete_encryption_key')
@mock.patch(
'cinder.tests.unit.backup.fake_service.FakeBackupService.restore')
@mock.patch('cinder.volume.volume_utils.brick_get_connector_properties')
def test_restore_backup_glean_key_id(self,
mock_connector_properties,
mock_backup_driver_restore,
mock_delete_encryption_key,
mock_clone_encryption_key):
"""Test restore of encrypted volume.
Test restoring a backup that was created prior to when the encryption
key ID is saved in the backup DB. The backup encryption key ID is
gleaned from the restored volume.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.RESTORING)
self.mock_object(self.backup_mgr, '_detach_device')
mock_attach_device = self.mock_object(self.backup_mgr,
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
mock_clone_encryption_key.return_value = fake.UUID3
# Mimic the driver's side effect where it updates the volume's
# metadata. For backups of encrypted volumes, this will essentially
# overwrite the volume's encryption key ID prior to the restore.
def restore_side_effect(backup, volume_id, volume_file):
db.volume_update(self.ctxt,
volume_id,
{'encryption_key_id': fake.UUID4})
mock_backup_driver_restore.side_effect = restore_side_effect
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
# Volume's original encryption key ID should be deleted
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID1)
# Backup's encryption key ID should have been cloned from
# the value restored from the metadata.
mock_clone_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID4)
# Volume should have the cloned backup key ID
volume = db.volume_get(self.ctxt, vol_id)
self.assertEqual(fake.UUID3, volume.encryption_key_id)
# Backup's key ID should have been gleaned from value restored
# from the backup's metadata
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fake.UUID4, backup.encryption_key_id)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when deleting a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING,
display_name='fail_on_delete', volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_delete_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_delete_backup_with_no_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id)
backup.service = None
backup.save()
self.backup_mgr.delete_backup(self.ctxt, backup)
def test_delete_backup(self):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id,
service='cinder.tests.unit.backup.fake_service.FakeBackupService')
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup.id)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertTrue(backup.deleted)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(fields.BackupStatus.DELETED, backup.status)
@mock.patch('cinder.volume.volume_utils.delete_encryption_key')
def test_delete_backup_of_encrypted_volume(self,
mock_delete_encryption_key):
"""Test deletion of backup of encrypted volume"""
vol_id = self._create_volume_db_entry(
encryption_key_id=fake.UUID1)
backup = self._create_backup_db_entry(
volume_id=vol_id,
status=fields.BackupStatus.DELETING,
encryption_key_id=fake.UUID2)
self.backup_mgr.delete_backup(self.ctxt, backup)
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
mock.ANY,
fake.UUID2)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertTrue(backup.deleted)
self.assertIsNone(backup.encryption_key_id)
@mock.patch('cinder.volume.volume_utils.notify_about_backup_usage')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.DELETING, volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
project_id = fake.PROJECT_ID
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(0, len(backups))
self._create_backup_db_entry()
b2 = self._create_backup_db_entry(project_id=project_id)
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(1, len(backups))
self.assertEqual(b2.id, backups[0].id)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
"""
project_id = fake.PROJECT2_ID
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry(project_id=project_id)
backup = self._create_backup_db_entry(project_id=project_id)
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_project(self.ctxt, project_id)
self.assertEqual(1, len(backups))
self.assertEqual(backup_keep.id, backups[0].id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id)
self.assertEqual(2, len(backups))
def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
"""
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry()
backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(1, len(backups))
self.assertEqual(backup_keep.id, backups[0].id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
self.assertEqual(2, len(backups))
def test_export_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an export of a backup
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
def test_export_record_with_bad_backup_status(self):
"""Test error handling.
Test error handling when exporting a backup record with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR,
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
def test_export_record(self):
"""Test normal backup record export."""
service = 'cinder.tests.unit.backup.fake_service.FakeBackupService'
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(
status=fields.BackupStatus.AVAILABLE, volume_id=vol_id,
service=service)
export = self.backup_mgr.export_record(self.ctxt, backup)
self.assertEqual(service, export['backup_service'])
self.assertIn('backup_url', export)
def test_import_record_with_verify_not_implemented(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
backup_id = fake.BACKUP4_ID
export = self._create_exported_record_entry(vol_size=vol_size,
exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
self.assertEqual(vol_size, backup['size'])
def test_import_record_with_wrong_id(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
def test_import_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record with a different service to that used to create the backup.
"""
export = self._create_exported_record_entry()
export['backup_service'] = 'cinder.tests.unit.backup.bad_service'
imported_record = self._create_export_record_db_entry()
# Test the case where the additional hosts list is empty
backup_hosts = []
self.assertRaises(exception.ServiceNotFound,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
# Test that the import backup keeps calling other hosts to find a
# suitable host for the backup service
backup_hosts = ['fake1', 'fake2']
backup_hosts_expect = list(backup_hosts)
BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
with mock.patch(BackupAPI_import) as _mock_backup_import:
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
next_host = backup_hosts_expect.pop()
_mock_backup_import.assert_called_once_with(
self.ctxt,
next_host,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts_expect)
def test_import_record_with_invalid_backup(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
export = self._create_exported_record_entry()
backup_driver = self.backup_mgr.service(self.ctxt)
_mock_record_import_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'import_record'))
imported_record = self._create_export_record_db_entry()
backup_hosts = []
with mock.patch(_mock_record_import_class) as _mock_record_import:
_mock_record_import.side_effect = FakeBackupException('fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
def test_not_supported_driver_to_force_delete(self):
"""Test force delete check method for not supported drivers."""
self.override_config('backup_driver',
'cinder.backup.drivers.ceph.CephBackupDriver')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertFalse(result)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_init_backup_repo_path', return_value=None)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'check_for_setup_error', return_value=None)
def test_check_support_to_force_delete(self, mock_check_configuration,
mock_init_backup_repo_path):
"""Test force delete check method for supported drivers."""
self.override_config('backup_driver',
'cinder.backup.drivers.nfs.NFSBackupDriver')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertTrue(result)
def test_backup_has_dependent_backups(self):
"""Test backup has dependent backups.
Test the query of has_dependent_backups in backup object is correct.
"""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertFalse(backup.has_dependent_backups)
def test_default_tpool_size(self):
"""Test we can set custom tpool size."""
tpool._nthreads = 20
self.assertListEqual([], tpool._threads)
self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.assertEqual(60, tpool._nthreads)
self.assertListEqual([], tpool._threads)
def test_tpool_size(self):
"""Test we can set custom tpool size."""
self.assertNotEqual(100, tpool._nthreads)
self.assertListEqual([], tpool._threads)
self.override_config('backup_native_threads_pool_size', 100)
self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.assertEqual(100, tpool._nthreads)
self.assertListEqual([], tpool._threads)
@mock.patch('cinder.backup.manager.BackupManager._run_restore')
def test_backup_max_operations_restore(self, mock_restore):
mock_sem = self.mock_object(self.backup_mgr, '_semaphore')
vol_id = self._create_volume_db_entry(
status=fields.VolumeStatus.RESTORING_BACKUP)
backup = self._create_backup_db_entry(
volume_id=vol_id, status=fields.BackupStatus.RESTORING)
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
self.assertEqual(1, mock_sem.__enter__.call_count)
self.assertEqual(1, mock_restore.call_count)
self.assertEqual(1, mock_sem.__exit__.call_count)
@mock.patch('cinder.backup.manager.BackupManager._start_backup')
def test_backup_max_operations_backup(self, mock_backup):
mock_sem = self.mock_object(self.backup_mgr, '_semaphore')
vol_id = self._create_volume_db_entry(
status=fields.VolumeStatus.BACKING_UP)
backup = self._create_backup_db_entry(
volume_id=vol_id, status=fields.BackupStatus.CREATING)
self.backup_mgr.create_backup(self.ctxt, backup)
self.assertEqual(1, mock_sem.__enter__.call_count)
self.assertEqual(1, mock_backup.call_count)
self.assertEqual(1, mock_sem.__exit__.call_count)
@ddt.ddt
class BackupAPITestCase(BaseBackupTest):
def setUp(self):
super(BackupAPITestCase, self).setUp()
self.api = api.API()
def test_get_all_wrong_all_tenants_value(self):
self.assertRaises(exception.InvalidParameterValue,
self.api.get_all, self.ctxt, {'all_tenants': 'bad'})
@mock.patch.object(objects, 'BackupList')
def test_get_all_no_all_tenants_value(self, mock_backuplist):
result = self.api.get_all(self.ctxt, {'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(False, 'false', '0', 0, 'no')
def test_get_all_false_value_all_tenants(
self, false_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': false_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(True, 'true', '1', 1, 'yes')
def test_get_all_true_value_all_tenants(
self, true_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': true_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all_by_project.called)
self.assertEqual(mock_backuplist.get_all.return_value,
result)
mock_backuplist.get_all.assert_called_once_with(
self.ctxt, {'key': 'value'}, None, None, None, None, None)
@mock.patch.object(objects, 'BackupList')
def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist):
ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4())
result = self.api.get_all(ctxt, {'all_tenants': '1',
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None,
None)
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch.object(db, 'backup_create',
side_effect=db_exc.DBError())
def test_create_when_failed_to_create_backup_object(
self, mock_create,
mock_get_service):
# Create volume in admin context
volume_id = utils.create_volume(self.ctxt)['id']
# Will try to backup from a different context
new_context = copy.copy(self.ctxt)
new_context.user_id = fake.USER3_ID
new_context.project_id = fake.USER3_ID
# The opposite side of this test case is a "NotImplementedError:
# Cannot load 'id' in the base class" being raised.
# More detailed, in the try clause, if backup.create() failed
# with DB exception, backup.id won't be assigned. However,
# in the except clause, backup.destroy() is invoked to do cleanup,
# which internally tries to access backup.id.
self.assertRaises(db_exc.DBError, self.api.create,
context=new_context,
name="test_backup",
description="test backup description",
volume_id=volume_id,
container='volumebackups')
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch.object(objects.Backup, '__init__',
side_effect=exception.InvalidInput(
reason='Failed to new'))
def test_create_when_failed_to_new_backup_object(self, mock_new,
mock_get_service):
volume_id = utils.create_volume(self.ctxt)['id']
# The opposite side of this test case is that a "UnboundLocalError:
# local variable 'backup' referenced before assignment" is raised.
# More detailed, in the try clause, backup = objects.Backup(...)
# raises exception, so 'backup' is not assigned. But in the except
# clause, 'backup' is referenced to invoke cleanup methods.
self.assertRaises(exception.InvalidInput, self.api.create,
context=self.ctxt,
name="test_backup",
description="test backup description",
volume_id=volume_id,
container='volumebackups')
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
def test_create_backup_from_snapshot_with_volume_in_use(
self, mock_create, mock_get_service):
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='in-use')
snapshot = self._create_snapshot_db_entry(volume_id=volume_id)
backup = self.api.create(self.ctxt, None, None, volume_id, None,
snapshot_id=snapshot.id)
self.assertEqual(fields.BackupStatus.CREATING, backup.status)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status)
self.assertEqual('in-use', volume.status)
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@ddt.data(True, False)
def test_create_backup_resource_status(self, is_snapshot, mock_create,
mock_get_service):
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='available')
snapshot = self._create_snapshot_db_entry(volume_id=volume_id)
if is_snapshot:
self.api.create(self.ctxt, None, None, volume_id, None,
snapshot_id=snapshot.id)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual('backing-up', snapshot.status)
self.assertEqual('available', volume.status)
else:
self.api.create(self.ctxt, None, None, volume_id, None)
volume = objects.Volume.get_by_id(self.ctxt, volume_id)
snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id)
self.assertEqual('available', snapshot.status)
self.assertEqual('backing-up', volume.status)
@mock.patch('cinder.backup.api.API._get_available_backup_service_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup')
def test_restore_volume(self,
mock_rpcapi_restore,
mock_get_backup_host):
volume_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(size=1,
status='available')
mock_get_backup_host.return_value = 'testhost'
self.api.restore(self.ctxt, backup.id, volume_id)
backup = objects.Backup.get_by_id(self.ctxt, backup.id)
self.assertEqual(volume_id, backup.restore_volume_id)
@mock.patch.object(objects.Backup, 'decode_record')
@mock.patch.object(quota.QUOTAS, 'commit')
@mock.patch.object(quota.QUOTAS, 'rollback')
@mock.patch.object(quota.QUOTAS, 'reserve')
def test__get_import_backup_invalid_backup(
self, mock_reserve, mock_rollback, mock_commit, mock_decode):
backup = self._create_backup_db_entry(size=1,
status='available')
mock_decode.return_value = {'id': backup.id,
'project_id': backup.project_id,
'user_id': backup.user_id,
'volume_id': backup.volume_id,
'size': 1}
mock_reserve.return_value = 'fake_reservation'
self.assertRaises(exception.InvalidBackup,
self.api._get_import_backup,
self.ctxt, 'fake_backup_url')
mock_reserve.assert_called_with(
self.ctxt, backups=1, backup_gigabytes=1)
mock_rollback.assert_called_with(self.ctxt, "fake_reservation")
@mock.patch('cinder.objects.BackupList.get_all_by_volume')
@mock.patch.object(quota.QUOTAS, 'rollback')
@mock.patch.object(quota.QUOTAS, 'reserve')
def test_create_backup_failed_with_empty_backup_objects(
self, mock_reserve, mock_rollback, mock_get_backups):
backups = mock.Mock()
backups.objects = []
mock_get_backups.return_value = backups
is_incremental = True
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
mock_reserve.return_value = 'fake_reservation'
volume_id = self._create_volume_db_entry(status='available',
host='testhost#rbd',
size=1,
project_id="vol_proj_id")
self.assertRaises(exception.InvalidBackup,
self.api.create,
self.ctxt,
None, None,
volume_id, None,
incremental=is_incremental)
mock_rollback.assert_called_with(self.ctxt, "fake_reservation")
mock_get_backups.assert_called_once_with(
self.ctxt, volume_id, 'vol_proj_id',
filters={'project_id': 'fake_project'})
@mock.patch('cinder.db.backup_get_all_by_volume',
return_value=[v2_fakes.fake_backup('fake-1')])
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch.object(quota.QUOTAS, 'rollback')
@mock.patch.object(quota.QUOTAS, 'reserve')
def test_create_backup_failed_with_backup_status_not_available(
self, mock_reserve, mock_rollback, mock_get_service,
mock_createi, mock_get_backups):
is_incremental = True
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
mock_reserve.return_value = 'fake_reservation'
volume_id = self._create_volume_db_entry(status='available',
host='testhost#rbd',
size=1)
self.assertRaises(exception.InvalidBackup,
self.api.create,
self.ctxt,
None, None,
volume_id, None,
incremental=is_incremental)
mock_rollback.assert_called_with(self.ctxt, "fake_reservation")
|
mahak/cinder
|
cinder/tests/unit/backup/test_backup.py
|
Python
|
apache-2.0
| 101,556
|
from base import Base as BaseTestCase
from roletester.actions.swift import swift_container_create
from roletester.actions.swift import swift_container_delete
from roletester.actions.swift import swift_container_add_metadata
from roletester.actions.swift import swift_object_put
from roletester.actions.swift import swift_object_delete
from roletester.actions.swift import swift_object_get
from roletester.exc import SwiftClientException
from roletester.scenario import ScenarioFactory as Factory
from roletester.utils import randomname
from roletester.exc import SwiftForbidden
from roletester.log import logging
logger = logging.getLogger("roletester.glance")
class SampleFactory(Factory):
_ACTIONS = [
swift_container_create,
swift_container_add_metadata,
swift_object_put,
swift_object_get,
swift_object_delete,
swift_container_delete
]
SWIFT_CONTAINER_CREATE = 0
SWIFT_CONTAINER_ADD_METADATA = 1
SWIFT_OBJECT_PUT = 2
SWIFT_OBJECT_GET = 3
SWIFT_OBJECT_DELETE = 4
SWIFT_CONTAINER_DELETE = 5
class SwiftCreateFactory(Factory):
_ACTIONS = [
swift_container_create,
swift_container_add_metadata,
swift_object_put,
swift_object_get,
]
SWIFT_CONTAINER_CREATE = 0
SWIFT_CONTAINER_ADD_METADATA = 1
SWIFT_OBJECT_PUT = 2
SWIFT_OBJECT_GET = 3
class SwiftContainerFactory(Factory):
_ACTIONS = [
swift_container_create,
]
SWIFT_CONTAINER_CREATE = 0
class SwiftObjectCreateFactory(Factory):
_ACTIONS = [
swift_container_create,
swift_object_put,
]
SWIFT_CONTAINER_CREATE = 0
SWIFT_OBJECT_PUT = 1
class TestSample(BaseTestCase):
project = randomname()
def test_cloud_admin_all(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SampleFactory(cloud_admin) \
.produce() \
.run(context=self.context)
def test_cloud_admin_create_all(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
SwiftCreateFactory(cloud_admin) \
.produce() \
.run(context=self.context)
def test_bu_admin_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
SampleFactory(bu_admin) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'bu-admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-admin'
)
SampleFactory(bu_admin) \
.set(SampleFactory.SWIFT_CONTAINER_CREATE,
clients=creator) \
.set(SampleFactory.SWIFT_CONTAINER_ADD_METADATA,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_PUT,
clients=creator) \
.set(SampleFactory.SWIFT_OBJECT_GET,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_DELETE,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_CONTAINER_DELETE,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser'
)
SampleFactory(bu_admin) \
.produce() \
.run(context=self.context)
def test_bu_poweruser_different_domain(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'bu-poweruser'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-poweruser'
)
SampleFactory(bu_admin) \
.set(SampleFactory.SWIFT_CONTAINER_CREATE,
clients=creator) \
.set(SampleFactory.SWIFT_CONTAINER_ADD_METADATA,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_PUT,
clients=creator) \
.set(SampleFactory.SWIFT_OBJECT_GET,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_DELETE,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_CONTAINER_DELETE,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_cloud_support_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
SampleFactory(bu_admin) \
.produce() \
.run(context=self.context)
def test_cloud_support_different_domain(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-support'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'cloud-support'
)
SampleFactory(bu_admin) \
.set(SampleFactory.SWIFT_CONTAINER_CREATE,
clients=creator) \
.set(SampleFactory.SWIFT_CONTAINER_ADD_METADATA,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_PUT,
clients=creator) \
.set(SampleFactory.SWIFT_OBJECT_GET,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_DELETE,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_CONTAINER_DELETE,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_bu_brt_all(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
SampleFactory(bu_admin) \
.produce() \
.run(context=self.context)
def test_bu_brt_different_domain(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'bu-brt'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'bu-brt'
)
SampleFactory(bu_admin) \
.set(SampleFactory.SWIFT_CONTAINER_CREATE,
clients=creator) \
.set(SampleFactory.SWIFT_CONTAINER_ADD_METADATA,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_PUT,
clients=creator) \
.set(SampleFactory.SWIFT_OBJECT_GET,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_DELETE,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_CONTAINER_DELETE,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_cirt_create(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
SwiftContainerFactory(bu_admin) \
.set(SwiftContainerFactory.SWIFT_CONTAINER_CREATE, expected_exceptions=[SwiftForbidden]) \
.produce() \
.run(context=self.context)
def test_cirt_add_metadata(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
SwiftCreateFactory(creator) \
.set(SwiftCreateFactory.SWIFT_CONTAINER_ADD_METADATA, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_cirt_add_object(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
SwiftObjectCreateFactory(creator) \
.set(SwiftObjectCreateFactory.SWIFT_OBJECT_PUT, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_cirt_get_object(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
SampleFactory(creator) \
.set(SampleFactory.SWIFT_OBJECT_GET, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_cirt_delete_container(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'cirt'
)
SampleFactory(creator) \
.set(SampleFactory.SWIFT_CONTAINER_DELETE, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_DELETE, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_bu_user_create(self):
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
SwiftContainerFactory(bu_admin) \
.set(SwiftContainerFactory.SWIFT_CONTAINER_CREATE, expected_exceptions=[SwiftForbidden]) \
.produce() \
.run(context=self.context)
def test_bu_user_add_metadata(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
SwiftCreateFactory(creator) \
.set(SwiftCreateFactory.SWIFT_CONTAINER_ADD_METADATA, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_bu_user_add_object(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
SwiftObjectCreateFactory(creator) \
.set(SwiftObjectCreateFactory.SWIFT_OBJECT_PUT, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_bu_user_get_object(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
SampleFactory(creator) \
.set(SampleFactory.SWIFT_OBJECT_GET, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
def test_bu_user_delete_container(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'cloud-admin'
)
bu_admin = self.km.find_user_credentials(
'Default', self.project, 'bu-user'
)
SampleFactory(creator) \
.set(SampleFactory.SWIFT_CONTAINER_DELETE, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.set(SampleFactory.SWIFT_OBJECT_DELETE, clients=bu_admin,
expected_exceptions=[SwiftClientException]) \
.produce() \
.run(context=self.context)
|
chalupaul/roletester
|
roletester/roletests/swift_tests.py
|
Python
|
apache-2.0
| 12,214
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import random
import time
from oslo.config import cfg
import six
from neutron_lbaas.openstack.common._i18n import _, _LE, _LI
from neutron_lbaas.openstack.common import log as logging
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
time.sleep(0)
return idle_for
|
citrix-openstack-build/neutron-lbaas
|
neutron_lbaas/openstack/common/periodic_task.py
|
Python
|
apache-2.0
| 7,750
|
"""Provides device triggers for sensors."""
import voluptuous as vol
import homeassistant.components.automation.numeric_state as numeric_state_automation
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ABOVE,
CONF_BELOW,
CONF_ENTITY_ID,
CONF_FOR,
CONF_TYPE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
)
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers import config_validation as cv
from . import DOMAIN
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_BATTERY_LEVEL = "battery_level"
CONF_HUMIDITY = "humidity"
CONF_ILLUMINANCE = "illuminance"
CONF_POWER = "power"
CONF_PRESSURE = "pressure"
CONF_SIGNAL_STRENGTH = "signal_strength"
CONF_TEMPERATURE = "temperature"
CONF_TIMESTAMP = "timestamp"
CONF_VALUE = "value"
ENTITY_TRIGGERS = {
DEVICE_CLASS_BATTERY: [{CONF_TYPE: CONF_BATTERY_LEVEL}],
DEVICE_CLASS_HUMIDITY: [{CONF_TYPE: CONF_HUMIDITY}],
DEVICE_CLASS_ILLUMINANCE: [{CONF_TYPE: CONF_ILLUMINANCE}],
DEVICE_CLASS_POWER: [{CONF_TYPE: CONF_POWER}],
DEVICE_CLASS_PRESSURE: [{CONF_TYPE: CONF_PRESSURE}],
DEVICE_CLASS_SIGNAL_STRENGTH: [{CONF_TYPE: CONF_SIGNAL_STRENGTH}],
DEVICE_CLASS_TEMPERATURE: [{CONF_TYPE: CONF_TEMPERATURE}],
DEVICE_CLASS_TIMESTAMP: [{CONF_TYPE: CONF_TIMESTAMP}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_VALUE}],
}
TRIGGER_SCHEMA = vol.All(
TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(
[
CONF_BATTERY_LEVEL,
CONF_HUMIDITY,
CONF_ILLUMINANCE,
CONF_POWER,
CONF_PRESSURE,
CONF_SIGNAL_STRENGTH,
CONF_TEMPERATURE,
CONF_TIMESTAMP,
CONF_VALUE,
]
),
vol.Optional(CONF_BELOW): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_ABOVE): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
numeric_state_config = {
numeric_state_automation.CONF_PLATFORM: "numeric_state",
numeric_state_automation.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
if CONF_ABOVE in config:
numeric_state_config[numeric_state_automation.CONF_ABOVE] = config[CONF_ABOVE]
if CONF_BELOW in config:
numeric_state_config[numeric_state_automation.CONF_BELOW] = config[CONF_BELOW]
if CONF_FOR in config:
numeric_state_config[CONF_FOR] = config[CONF_FOR]
numeric_state_config = numeric_state_automation.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_automation.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers."""
triggers = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = DEVICE_CLASS_NONE
state = hass.states.get(entry.entity_id)
unit_of_measurement = (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if state else None
)
if not state or not unit_of_measurement:
continue
if ATTR_DEVICE_CLASS in state.attributes:
device_class = state.attributes[ATTR_DEVICE_CLASS]
templates = ENTITY_TRIGGERS.get(
device_class, ENTITY_TRIGGERS[DEVICE_CLASS_NONE]
)
triggers.extend(
(
{
**automation,
"platform": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for automation in templates
)
)
return triggers
async def async_get_trigger_capabilities(hass, config):
"""List trigger capabilities."""
state = hass.states.get(config[CONF_ENTITY_ID])
unit_of_measurement = (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if state else None
)
if not state or not unit_of_measurement:
raise InvalidDeviceAutomationConfig
return {
"extra_fields": vol.Schema(
{
vol.Optional(
CONF_ABOVE, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(
CONF_BELOW, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
}
|
joopert/home-assistant
|
homeassistant/components/sensor/device_trigger.py
|
Python
|
apache-2.0
| 5,504
|
import collections
import toml
from dcos import util
from dcos.errors import DCOSException
def load_from_path(path, mutable=False):
"""Loads a TOML file from the path
:param path: Path to the TOML file
:type path: str
:param mutable: True if the returned Toml object should be mutable
:type mutable: boolean
:returns: Map for the configuration file
:rtype: Toml | MutableToml
"""
util.ensure_file_exists(path)
with util.open_file(path, 'r') as config_file:
try:
toml_obj = toml.loads(config_file.read())
except Exception as e:
raise DCOSException(
'Error parsing config file at [{}]: {}'.format(path, e))
return (MutableToml if mutable else Toml)(toml_obj)
def save(toml_config):
"""
:param toml_config: TOML configuration object
:type toml_config: MutableToml or Toml
"""
serial = toml.dumps(toml_config._dictionary)
path = util.get_config_path()
with util.open_file(path, 'w') as config_file:
config_file.write(serial)
def _get_path(config, path):
"""
:param config: Dict with the configuration values
:type config: dict
:param path: Path to the value. E.g. 'path.to.value'
:type path: str
:returns: Value stored at the given path
:rtype: double, int, str, list or dict
"""
for section in path.split('.'):
config = config[section]
return config
def _iterator(parent, dictionary):
"""
:param parent: Path to the value parameter
:type parent: str
:param dictionary: Value of the key
:type dictionary: collection.Mapping
:returns: An iterator of tuples for each property and value
:rtype: iterator of (str, any) where any can be str, int, double, list
"""
for key, value in dictionary.items():
new_key = key
if parent is not None:
new_key = "{}.{}".format(parent, key)
if not isinstance(value, collections.Mapping):
yield (new_key, value)
else:
for x in _iterator(new_key, value):
yield x
class Toml(collections.Mapping):
"""Class for getting value from TOML.
:param dictionary: configuration dictionary
:type dictionary: dict
"""
def __init__(self, dictionary):
self._dictionary = dictionary
def __getitem__(self, path):
"""
:param path: Path to the value. E.g. 'path.to.value'
:type path: str
:returns: Value stored at the given path
:rtype: double, int, str, list or dict
"""
config = _get_path(self._dictionary, path)
if isinstance(config, collections.Mapping):
return Toml(config)
else:
return config
def __iter__(self):
"""
:returns: Dictionary iterator
:rtype: iterator
"""
return iter(self._dictionary)
def property_items(self):
"""Iterator for full-path keys and values
:returns: Iterator for pull-path keys and values
:rtype: iterator of tuples
"""
return _iterator(None, self._dictionary)
def __len__(self):
"""
:returns: The length of the dictionary
:rtype: int
"""
return len(self._dictionary)
class MutableToml(collections.MutableMapping):
"""Class for managing CLI configuration through TOML.
:param dictionary: configuration dictionary
:type dictionary: dict
"""
def __init__(self, dictionary):
self._dictionary = dictionary
def __getitem__(self, path):
"""
:param path: Path to the value. E.g. 'path.to.value'
:type path: str
:returns: Value stored at the given path
:rtype: double, int, str, list or dict
"""
config = _get_path(self._dictionary, path)
if isinstance(config, collections.MutableMapping):
return MutableToml(config)
else:
return config
def __iter__(self):
"""
:returns: Dictionary iterator
:rtype: iterator
"""
return iter(self._dictionary)
def property_items(self):
"""Iterator for full-path keys and values
:returns: Iterator for pull-path keys and values
:rtype: iterator of tuples
"""
return _iterator(None, self._dictionary)
def __len__(self):
"""
:returns: The length of the dictionary
:rtype: int
"""
return len(self._dictionary)
def __setitem__(self, path, value):
"""
:param path: Path to set
:type path: str
:param value: Value to store
:type value: double, int, str, list or dict
"""
config = self._dictionary
sections = path.split('.')
for section in sections[:-1]:
config = config.setdefault(section, {})
config[sections[-1]] = value
def __delitem__(self, path):
"""
:param path: Path to delete
:type path: str
"""
config = self._dictionary
sections = path.split('.')
for section in sections[:-1]:
config = config[section]
del config[sections[-1]]
|
Yhgenomics/dcos-cli
|
dcos/config.py
|
Python
|
apache-2.0
| 5,246
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova common internal object model"""
import collections
import contextlib
import copy
import datetime
import functools
import traceback
import netaddr
from oslo import messaging
from oslo.utils import timeutils
import six
from nova import context
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.objects import fields
from nova.openstack.common import log as logging
from nova.openstack.common import versionutils
from nova import utils
LOG = logging.getLogger('object')
class NotSpecifiedSentinel:
pass
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_' + name
def make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.iteritems():
if not isinstance(field, fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr})
raise
setattr(cls, name, property(getter, setter))
class NovaObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This means this is a base class using the metaclass. I.e.,
# the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
return
def _vers_tuple(obj):
return tuple([int(x) for x in obj.VERSION.split(".")])
# Add the subclass to NovaObject._obj_classes. If the
# same version already exists, replace it. Otherwise,
# keep the list with newest version first.
make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(cls._obj_classes[obj_name]):
if cls.VERSION == obj.VERSION:
cls._obj_classes[obj_name][i] = cls
# Update nova.objects with this newer class.
setattr(objects, obj_name, cls)
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
cls._obj_classes[obj_name].insert(i, cls)
if i == 0:
# Later version than we've seen before. Update
# nova.objects.
setattr(objects, obj_name, cls)
break
else:
cls._obj_classes[obj_name].append(cls)
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen. Update nova.objects
# only if it's the first time we've seen this object name.
if not hasattr(objects, obj_name):
setattr(objects, obj_name, cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
ctxt = self._context
try:
if isinstance(args[0], (context.RequestContext)):
ctxt = args[0]
args = args[1:]
except IndexError:
pass
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
# Force this to be set if it wasn't before.
self._context = ctxt
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since NovaObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, NovaObject):
self[key] = value
else:
self[key] = field.from_primitive(self, key, value)
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, ctxt, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
@six.add_metaclass(NovaObjectMetaclass)
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': fields.IntegerField(),
# 'bar': fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
return '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
@classmethod
def obj_name(cls):
"""Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in cls._obj_classes:
LOG.error(_LE('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
versionutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = cls._obj_classes[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = primitive['nova_object.data']
changes = primitive.get('nova_object.changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
def _do_backport(to_version):
obj = getattr(self, field)
if not obj:
return
if isinstance(obj, NovaObject):
obj.obj_make_compatible(
primitive[field]['nova_object.data'],
to_version)
primitive[field]['nova_object.version'] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible(
primitive[field][i]['nova_object.data'],
to_version)
primitive[field][i]['nova_object.version'] = to_version
target_version = utils.convert_version_to_tuple(target_version)
for index, versions in enumerate(self.obj_relationships[field]):
my_version, child_version = versions
my_version = utils.convert_version_to_tuple(my_version)
if target_version < my_version:
if index == 0:
# We're backporting to a version from before this
# subobject was added: delete it from the primitive.
del primitive[field]
else:
# We're in the gap between index-1 and index, so
# backport to the older version
last_child_version = \
self.obj_relationships[field][index - 1][1]
_do_backport(last_child_version)
return
elif target_version == my_version:
# This is the first mapping that satisfies the
# target_version request: backport the object.
_do_backport(child_version)
return
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
of the object
:raises: nova.exception.UnsupportedObjectError if conversion
is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (fields.ObjectField,
fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
if key not in self.obj_relationships:
# NOTE(danms): This is really a coding error and shouldn't
# happen unless we miss something
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % key)
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_to_primitive(self, target_version=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
if target_version:
self.obj_make_compatible(primitive, target_version)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': target_version or self.VERSION,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != fields.UnspecifiedDefault]
for attr in attrs:
default = self.fields[attr].default
if default is fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should use self._conductor, and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError('Cannot save anything in the base class')
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set(self._changed_fields)
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), NovaObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None):
"""Reset the list of fields that have been changed.
Note that this is NOT "revert to previous values"
"""
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, get_attrname(attrname))
@property
def obj_fields(self):
return self.fields.keys() + self.obj_extra_fields
class NovaObjectDictCompat(object):
"""Mix-in to provide dictionary key access compat
If an object needs to support attribute access using
dictionary items instead of object attributes, inherit
from this class. This should only be used as a temporary
measure until all callers are converted to use modern
attribute access.
NOTE(berrange) This class will eventually be deleted.
"""
# dictish syntactic sugar
def iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name, getattr(self, name)
items = lambda self: list(self.iteritems())
def __getitem__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
return getattr(self, name)
def __setitem__(self, name, value):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
setattr(self, name, value)
def __contains__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
try:
return self.obj_attr_is_set(name)
except AttributeError:
return False
def get(self, key, value=NotSpecifiedSentinel):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
if key not in self.obj_fields:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__, key))
if value != NotSpecifiedSentinel and not self.obj_attr_is_set(key):
return value
else:
return getattr(self, key)
def update(self, updates):
"""For backwards-compatibility with dict-base objects.
NOTE(danms): May be removed in the future.
"""
for key, value in updates.items():
setattr(self, key, value)
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for all persistent objects.
"""
fields = {
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'deleted': fields.BooleanField(default=False),
}
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class ObjectListBase(object):
"""Mixin class for lists of objects.
This mixin class can be added as a base class for an object that
is implementing a list of objects. It adds a single field of 'objects',
which is the list store, and behaves like a list itself. It supports
serialization of the list of objects automatically.
"""
fields = {
'objects': fields.ListOfObjectsField('NovaObject'),
}
# This is a dictionary of my_version:child_version mappings so that
# we can support backleveling our contents based on the version
# requested of the list object.
child_versions = {}
def __init__(self, *args, **kwargs):
super(ObjectListBase, self).__init__(*args, **kwargs)
if 'objects' not in kwargs:
self.objects = []
self._changed_fields.discard('objects')
def __iter__(self):
"""List iterator interface."""
return iter(self.objects)
def __len__(self):
"""List length."""
return len(self.objects)
def __getitem__(self, index):
"""List index access."""
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a NovaObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def __contains__(self, value):
"""List membership test."""
return value in self.objects
def count(self, value):
"""List count of value occurrences."""
return self.objects.count(value)
def index(self, value):
"""List index of value."""
return self.objects.index(value)
def sort(self, cmp=None, key=None, reverse=False):
self.objects.sort(cmp=cmp, key=key, reverse=reverse)
def obj_make_compatible(self, primitive, target_version):
primitives = primitive['objects']
child_target_version = self.child_versions.get(target_version, '1.0')
for index, item in enumerate(self.objects):
self.objects[index].obj_make_compatible(
primitives[index]['nova_object.data'],
child_target_version)
primitives[index]['nova_object.version'] = child_target_version
def obj_what_changed(self):
changes = set(self._changed_fields)
for child in self.objects:
if child.obj_what_changed():
changes.add('objects')
return changes
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except exception.IncompatibleObjectVersion as e:
objver = objprim['nova_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['nova_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
objinst = self.conductor.object_backport(context, objprim,
e.kwargs['supported'])
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms): A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't
# send them over RPC anyway.
if iterable == set:
iterable = tuple
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
for kw in kwargs:
value_arg = kwargs.get(kw)
if kw == 'exc_val' and value_arg:
kwargs[kw] = str(value_arg)
elif kw == 'exc_tb' and (
not isinstance(value_arg, six.string_types) and value_arg):
kwargs[kw] = ''.join(traceback.format_tb(value_arg))
elif isinstance(value_arg, datetime.datetime):
kwargs[kw] = timeutils.isotime(value_arg)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
|
Metaswitch/calico-nova
|
nova/objects/base.py
|
Python
|
apache-2.0
| 34,663
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import logging
from operator import attrgetter
import sys
from django import forms
from django import template
from django.conf import settings
from django.contrib import messages
from django.core import urlresolvers
from django.utils import http
from django.utils.datastructures import SortedDict
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from horizon import exceptions
from .actions import FilterAction, LinkAction
LOG = logging.getLogger(__name__)
STRING_SEPARATOR = "__"
class Column(object):
""" A class which represents a single column in a :class:`.DataTable`.
.. attribute:: transform
A string or callable. If ``transform`` is a string, it should be the
name of the attribute on the underlying data class which
should be displayed in this column. If it is a callable, it
will be passed the current row's data at render-time and should
return the contents of the cell. Required.
.. attribute:: verbose_name
The name for this column which should be used for display purposes.
Defaults to the value of ``transform`` with the first letter
of each word capitalized.
.. attribute:: sortable
Boolean to determine whether this column should be sortable or not.
Defaults to False.
.. attribute:: hidden
Boolean to determine whether or not this column should be displayed
when rendering the table. Default: ``False``.
.. attribute:: link
A string or callable which returns a URL which will be wrapped around
this column's text as a link.
.. attribute:: status
Boolean designating whether or not this column represents a status
(i.e. "enabled/disabled", "up/down", "active/inactive").
Default: ``False``.
.. attribute:: status_choices
A tuple of tuples representing the possible data values for the
status column and their associated boolean equivalent. Positive
states should equate to ``True``, negative states should equate
to ``False``, and indeterminate states should be ``None``.
Values are compared in a case-insensitive manner.
Example (these are also the default values)::
status_choices = (
('enabled', True),
('true', True)
('up', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('off', False),
)
.. attribute:: empty_value
A string to be used for cells which have no data. Defaults to an
empty string.
.. attribute:: filters
A list of functions (often template filters) to be applied to the
value of the data for this column prior to output. This is effectively
a shortcut for writing a custom ``transform`` function in simple cases.
"""
# Used to retain order when instantiating columns on a table
creation_counter = 0
# Used for special auto-generated columns
auto = None
transform = None
name = None
verbose_name = None
status_choices = (
('enabled', True),
('true', True),
('up', True),
('active', True),
('on', True),
('none', None),
('unknown', None),
('', None),
('disabled', False),
('down', False),
('false', False),
('inactive', False),
('off', False),
)
def __init__(self, transform, verbose_name=None, sortable=False,
link=None, hidden=False, attrs=None, status=False,
status_choices=None, empty_value=None, filters=None):
if callable(transform):
self.transform = transform
self.name = transform.__name__
else:
self.transform = unicode(transform)
self.name = self.transform
self.sortable = sortable
# Empty string is a valid value for verbose_name
if verbose_name is None:
verbose_name = self.transform.title()
else:
verbose_name = verbose_name
self.verbose_name = unicode(verbose_name)
self.link = link
self.hidden = hidden
self.status = status
self.empty_value = empty_value or ''
self.filters = filters or []
if status_choices:
self.status_choices = status_choices
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
self.attrs = {"classes": []}
self.attrs.update(attrs or {})
# Make sure we have a mutable list.
self.attrs['classes'] = list(self.attrs['classes'])
if self.sortable:
self.attrs['classes'].append("sortable")
if self.hidden:
self.attrs['classes'].append("hide")
def __unicode__(self):
return self.verbose_name
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
def get_data(self, datum):
"""
Returns the appropriate data for this column from the given input.
The return value will be either the attribute specified for this column
or the return value of the attr:`~horizon.tables.Column.transform`
method for this column.
"""
datum_id = self.table.get_object_id(datum)
if datum_id in self.table._data_cache[self]:
return self.table._data_cache[self][datum_id]
# Callable transformations
if callable(self.transform):
data = self.transform(datum)
# Basic object lookups
elif hasattr(datum, self.transform):
data = getattr(datum, self.transform, None)
# Dict lookups
elif isinstance(datum, collections.Iterable) and \
self.transform in datum:
data = datum.get(self.transform)
else:
if settings.DEBUG:
messages.error(self.table._meta.request,
_("The attribute %(attr)s doesn't exist on "
"%(obj)s.") % {'attr': self.transform,
'obj': datum})
data = None
for filter_func in self.filters:
data = filter_func(data)
self.table._data_cache[self][datum_id] = data
return self.table._data_cache[self][datum_id]
def get_classes(self):
""" Returns a flattened string of the column's CSS classes. """
return " ".join(self.attrs['classes'])
def get_link_url(self, datum):
""" Returns the final value for the column's ``link`` property.
If ``link`` is a callable, it will be passed the current data object
and should return a URL. Otherwise ``get_link_url`` will attempt to
call ``reverse`` on ``link`` with the object's id as a parameter.
Failing that, it will simply return the value of ``link``.
"""
obj_id = self.table.get_object_id(datum)
if callable(self.link):
return self.link(datum)
try:
return urlresolvers.reverse(self.link, args=(obj_id,))
except urlresolvers.NoReverseMatch:
return self.link
class Row(object):
""" Represents a row in the table.
When iterated, the ``Row`` instance will yield each of its cells.
.. attribute:: table
The table which this row belongs to.
.. attribute:: datum
The data object which this row represents.
.. attribute:: id
A string uniquely representing this row composed of the table name
and the row data object's identifier.
.. attribute:: cells
The cells belonging to this row stored in a ``SortedDict`` object.
This attribute is populated during instantiation.
.. attribute:: status
Boolean value representing the status of this row according
to the value of the table's ``status_column`` value if it is set.
.. attribute:: status_class
Returns a css class for the status of the row based on ``status``.
"""
def __init__(self, table, datum):
self.table = table
self.datum = datum
id_vals = {"table": self.table.name,
"sep": STRING_SEPARATOR,
"id": table.get_object_id(datum)}
self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals
# Compile all the cells on instantiation
cells = []
for column in table.columns.values():
if column.auto == "multi_select":
widget = forms.CheckboxInput(check_test=False)
# Convert value to string to avoid accidental type conversion
data = widget.render('object_ids',
str(table.get_object_id(datum)))
table._data_cache[column][table.get_object_id(datum)] = data
elif column.auto == "actions":
data = table.render_row_actions(datum)
table._data_cache[column][table.get_object_id(datum)] = data
else:
data = column.get_data(datum)
cell = Cell(datum, data, column, self)
cells.append((column.name or column.auto, cell))
self.cells = SortedDict(cells)
@property
def status(self):
column_name = self.table._meta.status_column
if column_name:
return self.cells[column_name].status
@property
def status_class(self):
column_name = self.table._meta.status_column
if column_name:
return self.cells[column_name].get_status_class(self.status)
else:
return ''
def get_cells(self):
""" Returns the bound cells for this row in order. """
return self.cells.values()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.id)
def __iter__(self):
return iter(self.cells.values())
class Cell(object):
""" Represents a single cell in the table. """
def __init__(self, datum, data, column, row, attrs=None):
self.datum = datum
self.data = data
self.column = column
self.row = row
self.attrs = {'classes': []}
self.attrs.update(attrs or {})
def __repr__(self):
return '<%s: %s, %s>' % (self.__class__.__name__,
self.column.name,
self.row.id)
@property
def value(self):
"""
Returns a formatted version of the data for final output.
This takes into consideration the
:attr:`~horizon.tables.Column.link`` and
:attr:`~horizon.tables.Column.empty_value`
attributes.
"""
try:
data = self.column.get_data(self.datum) or self.column.empty_value
except Exception as ex:
data = None
exc_info = sys.exc_info()
raise template.TemplateSyntaxError, exc_info[1], exc_info[2]
if self.column.link:
url = self.column.get_link_url(self.datum)
# Escape the data inside while allowing our HTML to render
data = mark_safe('<a href="%s">%s</a>' % (url, escape(data)))
return data
@property
def status(self):
""" Gets the status for the column based on the cell's data. """
# Deal with status column mechanics based in this cell's data
if hasattr(self, '_status'):
return self._status
self._status = None
if self.column.status or \
self.column.table._meta.status_column == self.column.name:
status_matches = [status[1] for status in
self.column.status_choices if
str(self.data).lower() == status[0]]
try:
self._status = status_matches[0]
except IndexError:
LOG.exception('The value "%s" of the data in the status '
'column didn\'t match any value in '
'status_choices' % str(self.data).lower())
return self._status
def get_status_class(self, status):
""" Returns a css class name determined by the status value. """
if status is True:
return "status_up"
elif status is False:
return "status_down"
else:
return "status_unknown"
def get_classes(self):
""" Returns a flattened string of the cell's CSS classes. """
union = set(self.attrs['classes']) | set(self.column.attrs['classes'])
if self.column.status:
union.add(self.get_status_class(self.status))
return " ".join(union)
class DataTableOptions(object):
""" Contains options for :class:`.DataTable` objects.
.. attribute:: name
A short name or slug for the table.
.. attribute:: verbose_name
A more verbose name for the table meant for display purposes.
.. attribute:: columns
A list of column objects or column names. Controls ordering/display
of the columns in the table.
.. attribute:: table_actions
A list of action classes derived from the :class:`.Action` class.
These actions will handle tasks such as bulk deletion, etc. for
multiple objects at once.
.. attribute:: row_actions
A list similar to ``table_actions`` except tailored to appear for
each row. These actions act on a single object at a time.
.. attribute:: actions_column
Boolean value to control rendering of an additional column containing
the various actions for each row. Defaults to ``True`` if any actions
are specified in the ``row_actions`` option.
.. attribute:: multi_select
Boolean value to control rendering of an extra column with checkboxes
for selecting multiple objects in the table. Defaults to ``True`` if
any actions are specified in the ``table_actions`` option.
.. attribute:: filter
Boolean value to control the display of the "filter" search box
in the table actions. By default it checks whether or not an instance
of :class:`.FilterAction` is in :attr:`.table_actions`.
.. attribute:: template
String containing the template which should be used to render the
table. Defaults to ``"horizon/common/_data_table.html"``.
.. attribute:: context_var_name
The name of the context variable which will contain the table when
it is rendered. Defaults to ``"table"``.
.. attribute:: status_column
The name of a column on this table which represents the "state"
of the data object being represented. The collumn must already be
designated as a status column by passing the ``status=True``
parameter to the column.
If ``status_column`` is set, when the rows are rendered the value
of this column will be used to add an extra class to the row in
the form of ``"status_up"`` or ``"status_down"`` for that row's
data.
This is useful for displaying the enabled/disabled status of a
service, for example.
"""
def __init__(self, options):
self.name = getattr(options, 'name', self.__class__.__name__)
verbose_name = getattr(options, 'verbose_name', None) \
or self.name.title()
self.verbose_name = unicode(verbose_name)
self.columns = getattr(options, 'columns', None)
self.status_column = getattr(options, 'status_column', None)
self.table_actions = getattr(options, 'table_actions', [])
self.row_actions = getattr(options, 'row_actions', [])
# Set self.filter if we have any FilterActions
filter_actions = [action for action in self.table_actions if
issubclass(action, FilterAction)]
if len(filter_actions) > 1:
raise NotImplementedError("Multiple filter actions is not "
"currently supported.")
self.filter = getattr(options, 'filter', len(filter_actions) > 0)
if len(filter_actions) == 1:
self._filter_action = filter_actions.pop()
else:
self._filter_action = None
self.template = 'horizon/common/_data_table.html'
self.row_actions_template = \
'horizon/common/_data_table_row_actions.html'
self.table_actions_template = \
'horizon/common/_data_table_table_actions.html'
self.context_var_name = unicode(getattr(options,
'context_var_nam',
'table'))
self.actions_column = getattr(options,
'actions_column',
len(self.row_actions) > 0)
self.multi_select = getattr(options,
'multi_select',
len(self.table_actions) > 0)
# Set runtime table defaults; not configurable.
self.has_more_data = False
class DataTableMetaclass(type):
""" Metaclass to add options to DataTable class and collect columns. """
def __new__(mcs, name, bases, attrs):
# Process options from Meta
attrs["_meta"] = opts = DataTableOptions(attrs.get("Meta", None))
# Gather columns; this prevents the column from being an attribute
# on the DataTable class and avoids naming conflicts.
columns = [(column_name, attrs.pop(column_name)) for \
column_name, obj in attrs.items() \
if isinstance(obj, Column)]
# add a name attribute to each column
for column_name, column in columns:
column.name = column_name
columns.sort(key=lambda x: x[1].creation_counter)
# Iterate in reverse to preserve final order
for base in bases[::-1]:
if hasattr(base, 'base_columns'):
columns = base.base_columns.items() + columns
attrs['base_columns'] = SortedDict(columns)
if opts.columns:
# Remove any columns that weren't declared if we're being explicit
# NOTE: we're iterating a COPY of the list here!
for column_data in columns[:]:
if column_data[0] not in opts.columns:
columns.pop(columns.index(column_data))
# Re-order based on declared columns
columns.sort(key=lambda x: attrs['_meta'].columns.index(x[0]))
# Add in our auto-generated columns
if opts.multi_select:
multi_select = Column("multi_select",
verbose_name="",
attrs={'classes': ('multi_select_column',)})
multi_select.auto = "multi_select"
columns.insert(0, ("multi_select", multi_select))
if opts.actions_column:
actions_column = Column("actions",
attrs={'classes': ('actions_column',)})
actions_column.auto = "actions"
columns.append(("actions", actions_column))
attrs['columns'] = SortedDict(columns)
# Gather and register actions for later access since we only want
# to instantiate them once.
# (list() call gives deterministic sort order, which sets don't have.)
actions = list(set(opts.row_actions) | set(opts.table_actions))
actions.sort(key=attrgetter('name'))
actions_dict = SortedDict([(action.name, action()) \
for action in actions])
attrs['base_actions'] = actions_dict
if opts._filter_action:
# Replace our filter action with the instantiated version
opts._filter_action = actions_dict[opts._filter_action.name]
# Create our new class!
return type.__new__(mcs, name, bases, attrs)
class DataTable(object):
""" A class which defines a table with all data and associated actions.
.. attribute:: name
String. Read-only access to the name specified in the
table's Meta options.
.. attribute:: multi_select
Boolean. Read-only access to whether or not this table
should display a column for multi-select checkboxes.
.. attribute:: data
Read-only access to the data this table represents.
.. attribute:: filtered_data
Read-only access to the data this table represents, filtered by
the :meth:`~horizon.tables.FilterAction.filter` method of the table's
:class:`~horizon.tables.FilterAction` class (if one is provided)
using the current request's query parameters.
"""
__metaclass__ = DataTableMetaclass
def __init__(self, request, data, **kwargs):
self._meta.request = request
self._meta.data = data
self._populate_data_cache()
self.kwargs = kwargs
for column in self.columns.values():
column.table = self
# Associate these actions with this table
for action in self.base_actions.values():
action.table = self
if self._meta._filter_action:
param_name = self._meta._filter_action.get_param_name()
q = self._meta.request.POST.get(param_name, '')
self._meta._filter_action.filter_string = q
def __unicode__(self):
return self._meta.verbose_name
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
@property
def name(self):
return self._meta.name
@property
def data(self):
return self._meta.data
@property
def multi_select(self):
return self._meta.multi_select
@property
def filtered_data(self):
if not hasattr(self, '_filtered_data'):
self._filtered_data = self.data
if self._meta.filter and self._meta._filter_action:
action = self._meta._filter_action
self._filtered_data = action.filter(self,
self.data,
action.filter_string)
return self._filtered_data
def _populate_data_cache(self):
self._data_cache = {}
# Set up hash tables to store data points for each column
for column in self.get_columns():
self._data_cache[column] = {}
def _filter_action(self, action, request, datum=None):
try:
# Catch user errors in permission functions here
return action.allowed(request, datum)
except Exception:
LOG.exception("Error while checking action permissions.")
return None
def render(self):
""" Renders the table using the template from the table options. """
table_template = template.loader.get_template(self._meta.template)
extra_context = {self._meta.context_var_name: self}
context = template.RequestContext(self._meta.request, extra_context)
return table_template.render(context)
def get_absolute_url(self):
""" Returns the canonical URL for this table.
This is used for the POST action attribute on the form element
wrapping the table. In many cases it is also useful for redirecting
after a successful action on the table.
For convenience it defaults to the value of
``request.get_full_path()``, e.g. the path at which the table
was requested.
"""
return self._meta.request.get_full_path()
def get_empty_message(self):
""" Returns the message to be displayed when there is no data. """
return _("No items to display.")
def get_object_by_id(self, lookup):
"""
Returns the data object from the table's dataset which matches
the ``lookup`` parameter specified. An error will be raised if
the match is not a single data object.
Uses :meth:`~horizon.tables.DataTable.get_object_id` internally.
"""
matches = [datum for datum in self.data if
self.get_object_id(datum) == lookup]
if len(matches) > 1:
raise ValueError("Multiple matches were returned for that id: %s."
% matches)
if not matches:
raise exceptions.Http302(self.get_absolute_url(),
_('No match returned for the id "%s".')
% lookup)
return matches[0]
def get_table_actions(self):
""" Returns a list of the action instances for this table. """
bound_actions = [self.base_actions[action.name] for
action in self._meta.table_actions]
return [action for action in bound_actions if
self._filter_action(action, self._meta.request)]
def get_row_actions(self, datum):
""" Returns a list of the action instances for a specific row. """
bound_actions = []
for action in self._meta.row_actions:
# Copy to allow modifying properties per row
bound_action = copy.copy(self.base_actions[action.name])
# Remove disallowed actions.
if not self._filter_action(bound_action,
self._meta.request,
datum):
continue
# Hook for modifying actions based on data. No-op by default.
bound_action.update(self._meta.request, datum)
# Pre-create the URL for this link with appropriate parameters
if isinstance(bound_action, LinkAction):
bound_action.bound_url = bound_action.get_link_url(datum)
bound_actions.append(bound_action)
return bound_actions
def render_table_actions(self):
""" Renders the actions specified in ``Meta.table_actions``. """
template_path = self._meta.table_actions_template
table_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_table_actions()
extra_context = {"table_actions": bound_actions}
if self._meta.filter:
extra_context["filter"] = self._meta._filter_action
context = template.RequestContext(self._meta.request, extra_context)
return table_actions_template.render(context)
def render_row_actions(self, datum):
"""
Renders the actions specified in ``Meta.row_actions`` using the
current row data. """
template_path = self._meta.row_actions_template
row_actions_template = template.loader.get_template(template_path)
bound_actions = self.get_row_actions(datum)
extra_context = {"row_actions": bound_actions,
"row_id": self.get_object_id(datum)}
context = template.RequestContext(self._meta.request, extra_context)
return row_actions_template.render(context)
def parse_action(self, action_string):
"""
Parses the ``action`` parameter (a string) sent back with the
POST data. By default this parses a string formatted as
``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns
each of the pieces. The ``row_id`` is optional.
"""
if action_string:
bits = action_string.split(STRING_SEPARATOR)
bits.reverse()
table = bits.pop()
action = bits.pop()
try:
object_id = bits.pop()
except IndexError:
object_id = None
return table, action, object_id
def take_action(self, action_name, obj_id=None, obj_ids=None):
"""
Locates the appropriate action and routes the object
data to it. The action should return an HTTP redirect
if successful, or a value which evaluates to ``False``
if unsuccessful.
"""
# See if we have a list of ids
obj_ids = obj_ids or self._meta.request.POST.getlist('object_ids')
action = self.base_actions.get(action_name, None)
if action and (not action.requires_input or obj_id or obj_ids):
if obj_id:
obj_id = self.sanitize_id(obj_id)
if obj_ids:
obj_ids = [self.sanitize_id(i) for i in obj_ids]
# Single handling is easy
if not action.handles_multiple:
response = action.single(self, self._meta.request, obj_id)
# Otherwise figure out what to pass along
else:
if obj_id and not obj_ids:
obj_ids = [obj_id]
response = action.multiple(self, self._meta.request, obj_ids)
return response
elif action and action.requires_input and not (obj_id or obj_ids):
messages.info(self._meta.request,
_("Please select a row before taking that action."))
return None
def maybe_handle(self):
""" Determine whether the request should be handled by this table. """
request = self._meta.request
if request.method == "POST":
action_string = request.POST.get('action', None)
if action_string:
table_id, action, obj_id = self.parse_action(action_string)
if table_id == self.name and action:
return self.take_action(action, obj_id)
return None
def sanitize_id(self, obj_id):
""" Override to modify an incoming obj_id to match existing
API data types or modify the format.
"""
return obj_id
def get_object_id(self, datum):
""" Returns the identifier for the object this row will represent.
By default this returns an ``id`` attribute on the given object,
but this can be overridden to return other values.
"""
return datum.id
def get_object_display(self, datum):
""" Returns a display name that identifies this object.
By default, this returns a ``name`` attribute from the given object,
but this can be overriden to return other values.
"""
return datum.name
def has_more_data(self):
"""
Returns a boolean value indicating whether there is more data
available to this table from the source (generally an API).
The method is largely meant for internal use, but if you want to
override it to provide custom behavior you can do so at your own risk.
"""
return self._meta.has_more_data
def get_marker(self):
"""
Returns the identifier for the last object in the current data set
for APIs that use marker/limit-based paging.
"""
return http.urlquote_plus(self.get_object_id(self.data[-1]))
def get_columns(self):
""" Returns this table's columns including auto-generated ones."""
return self.columns.values()
def get_rows(self):
""" Return the row data for this table broken out by columns. """
rows = []
try:
for datum in self.filtered_data:
rows.append(Row(self, datum))
except Exception, e:
# Exceptions can be swallowed at the template level here,
# re-raising as a TemplateSyntaxError makes them visible.
LOG.exception("Error while rendering table rows.")
exc_info = sys.exc_info()
raise template.TemplateSyntaxError, exc_info[1], exc_info[2]
return rows
|
rcbops/horizon-buildpackage
|
horizon/tables/base.py
|
Python
|
apache-2.0
| 32,799
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import threading
import unittest
from pants.base.worker_pool import Work, WorkerPool
from pants.base.workunit import WorkUnit
from pants.util.contextutil import temporary_dir
class FakeRunTracker:
def register_thread(self, one):
pass
def keyboard_interrupt_raiser():
raise KeyboardInterrupt()
class WorkerPoolTest(unittest.TestCase):
def test_keyboard_interrupts_propagated(self):
condition = threading.Condition()
condition.acquire()
with self.assertRaises(KeyboardInterrupt):
with temporary_dir() as rundir:
pool = WorkerPool(WorkUnit(rundir, None, "work"), FakeRunTracker(), 1, "test")
try:
pool.submit_async_work(Work(keyboard_interrupt_raiser, [()]))
condition.wait(2)
finally:
pool.abort()
|
tdyas/pants
|
tests/python/pants_test/base/test_worker_pool.py
|
Python
|
apache-2.0
| 999
|
from collections import defaultdict
from typing import Any, Dict, Set
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.util as util
class FormatMemo:
"""
Class used to manage memoization of intermediate ibis expression format
results in ExprFormatter.
Parameters
----------
get_text_repr: bool
Defaults to ``False``. Determines whether or not the memoization
should use proper alias names. Using the same alias names for
equivalent expressions is more optimal for memoization / recursion
but does not accurately display aliases in the representation
"""
def __init__(self, get_text_repr: bool = False):
self.formatted: Dict[str, Any] = {}
self.aliases: Dict[str, str] = {}
self.ops: Dict[str, ops.Node] = {}
self.counts: Dict[str, int] = defaultdict(int)
self._repr_memo: Dict[ir.Expr, str] = {}
self.subexprs: Dict[ops.Node, str] = {}
self.visit_memo: Set[ops.Node] = set()
self.get_text_repr = get_text_repr
def __contains__(self, obj):
return self._key(obj) in self.formatted
def _key(self, expr) -> str:
memo = self._repr_memo
try:
result = memo[expr]
except KeyError:
result = memo[expr] = self._format(expr)
return result
def _format(self, expr):
return expr.op()._repr(memo=self)
def observe(self, expr, formatter=None):
if formatter is None:
formatter = self._format
key = self._key(expr)
if key not in self.formatted:
self.aliases[key] = f'r{len(self.formatted):d}'
self.formatted[key] = formatter(expr)
self.ops[key] = expr.op()
self.counts[key] += 1
def count(self, expr):
return self.counts[self._key(expr)]
def get_alias(self, expr):
return self.aliases[self._key(expr)]
def get_formatted(self, expr):
return self.formatted[self._key(expr)]
class ExprFormatter:
"""For creating a nice tree-like representation of an expression graph.
Notes
-----
TODO: detect reused DAG nodes and do not display redundant information
"""
def __init__(
self,
expr,
indent_size: int = 2,
base_level: int = 0,
memo: FormatMemo = None,
memoize: bool = True,
):
self.expr = expr
self.indent_size = indent_size
self.base_level = base_level
self.memoize = memoize
# For tracking "extracted" objects, like tables, that we don't want to
# print out more than once, and simply alias in the expression tree
if memo is None:
memo = FormatMemo()
self.memo = memo
def get_result(self):
what = self.expr.op()
if self.memoize:
self._memoize_tables()
if isinstance(what, ops.TableNode) and what.has_schema():
# This should also catch aggregations
if not self.memoize and self.expr in self.memo:
text = f"ref: {self.memo.get_alias(self.expr)}"
elif isinstance(what, ops.PhysicalTable):
text = self._format_table(self.expr)
else:
# Any other node type
text = self._format_node(self.expr)
elif isinstance(what, ops.TableColumn):
text = self._format_column(self.expr)
elif isinstance(what, ops.Literal):
text = f"value: {self._get_type_display()} = {what.value!r}"
elif isinstance(what, ops.ScalarParameter):
text = self._get_type_display()
elif isinstance(what, ops.Node):
text = self._format_node(self.expr)
if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:
raw_name = self.expr.get_name()
if isinstance(self.expr.op(), ops.ScalarParameter):
name = f"$({raw_name})"
else:
name = raw_name
text = f'{name}: {text}'
if self.memoize:
alias_to_text = sorted(
(
self.memo.aliases[x],
self.memo.formatted[x],
self.memo.ops[x],
)
for x in self.memo.formatted
)
# A hack to suppress printing out of a ref that is the result of
# the top level expression
refs = [y for _, y, op in alias_to_text if not op.equals(what)]
text = '\n\n'.join(refs + [text])
return self._indent(text, self.base_level)
def _memoize_tables(self):
table_memo_ops = (ops.Aggregation, ops.Selection, ops.SelfReference)
expr = self.expr
if expr.op() in self.memo.visit_memo:
return
stack = [expr]
seen = set()
memo = self.memo
while stack:
e = stack.pop()
op = e.op()
if op not in seen:
seen.add(op)
if isinstance(op, ops.PhysicalTable):
memo.observe(e, self._format_table)
elif isinstance(op, ops.Node):
stack.extend(
arg
for arg in reversed(op.args)
if isinstance(arg, ir.Expr)
)
if isinstance(op, table_memo_ops):
memo.observe(e, self._format_node)
elif isinstance(op, ops.TableNode) and op.has_schema():
memo.observe(e, self._format_table)
memo.visit_memo.add(op)
def _indent(self, text, indents: int = 1):
return util.indent(text, self.indent_size * indents)
def _format_table(self, expr):
table = expr.op()
column_names = table.schema.names
max_name_len = max(map(len, column_names))
# format the schema
rows = list(
map(
"{} {}".format,
(name.ljust(max_name_len) for name in column_names),
table.schema.types,
)
)
opname = type(table).__name__
opline = f'{opname}[{self.memo.get_alias(expr)}, name={table.name}]'
return '{}\n{}'.format(opline, self._indent('\n'.join(rows)))
def _format_column(self, expr):
# HACK: if column is pulled from a Filter of another table, this parent
# will not be found in the memo
col = expr.op()
parent = col.parent()
if parent not in self.memo:
self.memo.observe(parent, formatter=self._format_node)
table_formatted = self.memo.get_alias(parent)
type_display = self._get_type_display(self.expr)
return f"{type_display} = {table_formatted}.{col.name}"
def _format_node(self, expr):
op = expr.op()
formatted_args = []
def visit(what, extra_indents=0):
if isinstance(what, ir.Expr):
result = self._format_subexpr(what)
else:
result = self._indent(str(what))
if extra_indents > 0:
result = util.indent(result, self.indent_size)
formatted_args.append(result)
arg_names = getattr(op, 'display_argnames', op.argnames)
if not arg_names:
for arg in op.flat_args():
visit(arg)
else:
arg_name_pairs = zip(op.args, arg_names)
for arg, name in arg_name_pairs:
if name == 'arg' and isinstance(op, ops.ValueOp):
# don't display first argument's name in repr
name = None
if name is not None:
name = self._indent(f'{name}:')
if util.is_iterable(arg):
if name is not None and len(arg) > 0:
formatted_args.append(name)
indents = 1
else:
indents = 0
for x in arg:
visit(x, extra_indents=indents)
else:
if name is not None:
formatted_args.append(name)
indents = 1
else:
indents = 0
visit(arg, extra_indents=indents)
opname = type(op).__name__
type_display = self._get_type_display(expr)
if isinstance(op, ops.TableNode):
try:
opline = f"{opname}[{self.memo.get_alias(expr)}]"
except KeyError:
opline = opname
else:
opline = f"{type_display} = {opname}"
return '\n'.join([opline] + formatted_args)
def _format_subexpr(self, expr):
subexprs = self.memo.subexprs
if self.memo.get_text_repr:
key = expr._key
else:
key = expr.op()
try:
result = subexprs[key]
except KeyError:
formatter = ExprFormatter(expr, memo=self.memo, memoize=False)
result = subexprs[key] = self._indent(formatter.get_result(), 1)
return result
def _get_type_display(self, expr=None):
if expr is None:
expr = self.expr
return expr._type_display()
|
cpcloud/ibis
|
ibis/expr/format.py
|
Python
|
apache-2.0
| 9,362
|
# -*- coding: utf-8 -*-
"""The single process extraction engine."""
import collections
import os
import pdb
import threading
import time
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.resolver import resolver
from plaso.containers import counts
from plaso.containers import event_sources
from plaso.engine import engine
from plaso.engine import extractors
from plaso.engine import logger
from plaso.engine import process_info
from plaso.engine import worker
from plaso.lib import definitions
from plaso.parsers import mediator as parsers_mediator
class SingleProcessEngine(engine.BaseEngine):
"""Single process extraction engine."""
# Maximum number of dfVFS file system objects to cache.
_FILE_SYSTEM_CACHE_SIZE = 3
def __init__(self):
"""Initializes a single process extraction engine."""
super(SingleProcessEngine, self).__init__()
self._current_display_name = ''
self._extraction_worker = None
self._file_system_cache = []
self._number_of_consumed_sources = 0
self._parser_mediator = None
self._parsers_counter = None
self._path_spec_extractor = extractors.PathSpecExtractor()
self._pid = os.getpid()
self._process_information = process_info.ProcessInfo(self._pid)
self._processing_configuration = None
self._resolver_context = None
self._status = definitions.STATUS_INDICATOR_IDLE
self._status_update_active = False
self._status_update_callback = None
self._status_update_thread = None
self._storage_writer = None
def _CacheFileSystem(self, path_spec):
"""Caches a dfVFS file system object.
Keeping and additional reference to a dfVFS file system object causes the
object to remain cached in the resolver context. This minimizes the number
times the file system is re-opened.
Args:
path_spec (dfvfs.PathSpec): path specification.
"""
if (path_spec and not path_spec.IsSystemLevel() and
path_spec.type_indicator != dfvfs_definitions.TYPE_INDICATOR_GZIP):
file_system = resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=self._resolver_context)
if file_system not in self._file_system_cache:
if len(self._file_system_cache) == self._FILE_SYSTEM_CACHE_SIZE:
self._file_system_cache.pop(0)
self._file_system_cache.append(file_system)
elif len(self._file_system_cache) == self._FILE_SYSTEM_CACHE_SIZE:
# Move the file system to the end of the list to preserve the most
# recently file system object.
self._file_system_cache.remove(file_system)
self._file_system_cache.append(file_system)
def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):
"""Processes a path specification.
Args:
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
path_spec (dfvfs.PathSpec): path specification.
"""
self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
path_spec)
self._CacheFileSystem(path_spec)
excluded_find_specs = None
if self.collection_filters_helper:
excluded_find_specs = (
self.collection_filters_helper.excluded_file_system_find_specs)
try:
extraction_worker.ProcessPathSpec(
parser_mediator, path_spec, excluded_find_specs=excluded_find_specs)
except KeyboardInterrupt:
self._abort = True
self._processing_status.aborted = True
if self._status_update_callback:
self._status_update_callback(self._processing_status)
# We cannot recover from a CacheFullError and abort processing when
# it is raised.
except dfvfs_errors.CacheFullError:
# TODO: signal engine of failure.
self._abort = True
logger.error((
'ABORT: detected cache full error while processing '
'path spec: {0:s}').format(self._current_display_name))
# All exceptions need to be caught here to prevent the worker
# from being killed by an uncaught exception.
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'unable to process path specification with error: '
'{0!s}').format(exception), path_spec=path_spec)
if getattr(self._processing_configuration, 'debug_output', False):
self._StopStatusUpdateThread()
logger.warning(
'Unhandled exception while processing path spec: {0:s}.'.format(
self._current_display_name))
logger.exception(exception)
pdb.post_mortem()
self._StartStatusUpdateThread()
def _ProcessSources(self, source_configurations, parser_mediator):
"""Processes the sources.
Args:
source_configurations (list[SourceConfigurationArtifact]): configurations
of the sources to process.
parser_mediator (ParserMediator): parser mediator.
"""
if self._processing_profiler:
self._processing_profiler.StartTiming('process_sources')
self._status = definitions.STATUS_INDICATOR_COLLECTING
self._current_display_name = ''
self._number_of_consumed_sources = 0
find_specs = None
if self.collection_filters_helper:
find_specs = (
self.collection_filters_helper.included_file_system_find_specs)
source_path_specs = [
configuration.path_spec for configuration in source_configurations]
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
source_path_specs, find_specs=find_specs, recurse_file_system=False,
resolver_context=self._resolver_context)
for path_spec in path_spec_generator:
if self._abort:
break
self._status = definitions.STATUS_INDICATOR_COLLECTING
self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
path_spec)
# TODO: determine if event sources should be DataStream or FileEntry
# or both.
event_source = event_sources.FileEntryEventSource(path_spec=path_spec)
parser_mediator.ProduceEventSource(event_source)
self._status = definitions.STATUS_INDICATOR_RUNNING
if self._processing_profiler:
self._processing_profiler.StartTiming('get_event_source')
event_source = self._storage_writer.GetFirstWrittenEventSource()
if self._processing_profiler:
self._processing_profiler.StopTiming('get_event_source')
while event_source:
if self._abort:
break
self._ProcessPathSpec(
self._extraction_worker, parser_mediator, event_source.path_spec)
self._number_of_consumed_sources += 1
if self._processing_profiler:
self._processing_profiler.StartTiming('get_event_source')
event_source = self._storage_writer.GetNextWrittenEventSource()
if self._processing_profiler:
self._processing_profiler.StopTiming('get_event_source')
if self._abort:
self._status = definitions.STATUS_INDICATOR_ABORTED
else:
self._status = definitions.STATUS_INDICATOR_COMPLETED
if self._processing_profiler:
self._processing_profiler.StopTiming('process_sources')
def _StartStatusUpdateThread(self):
"""Starts the status update thread."""
self._status_update_active = True
self._status_update_thread = threading.Thread(
name='Status update', target=self._StatusUpdateThreadMain)
self._status_update_thread.start()
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
while self._status_update_active:
self._UpdateStatus()
time.sleep(self._STATUS_UPDATE_INTERVAL)
def _StopStatusUpdateThread(self):
"""Stops the status update thread."""
if self._status_update_thread:
self._status_update_active = False
if self._status_update_thread.is_alive():
self._status_update_thread.join()
self._status_update_thread = None
def _UpdateStatus(self):
"""Updates the processing status."""
status = self._extraction_worker.processing_status
if status == definitions.STATUS_INDICATOR_IDLE:
status = self._status
if status == definitions.STATUS_INDICATOR_IDLE:
status = definitions.STATUS_INDICATOR_RUNNING
used_memory = self._process_information.GetUsedMemory() or 0
self._processing_status.UpdateForemanStatus(
self._name, status, self._pid, used_memory, self._current_display_name,
self._number_of_consumed_sources,
self._parser_mediator.number_of_produced_event_sources,
0, self._parser_mediator.number_of_produced_events,
0, 0,
0, 0)
if self._status_update_callback:
self._status_update_callback(self._processing_status)
def _CreateParserMediator(
self, knowledge_base, resolver_context, processing_configuration):
"""Creates a parser mediator.
Args:
knowledge_base (KnowledgeBase): knowledge base which contains
information from the source data needed for parsing.
resolver_context (dfvfs.Context): resolver context.
processing_configuration (ProcessingConfiguration): processing
configuration.
Returns:
ParserMediator: parser mediator.
"""
parser_mediator = parsers_mediator.ParserMediator(
knowledge_base,
collection_filters_helper=self.collection_filters_helper,
resolver_context=resolver_context)
parser_mediator.SetExtractWinEvtResources(
processing_configuration.extraction.extract_winevt_resources)
parser_mediator.SetPreferredLanguage(
processing_configuration.preferred_language)
parser_mediator.SetPreferredTimeZone(
processing_configuration.preferred_time_zone)
parser_mediator.SetPreferredYear(
processing_configuration.preferred_year)
parser_mediator.SetTemporaryDirectory(
processing_configuration.temporary_directory)
parser_mediator.SetTextPrepend(
processing_configuration.text_prepend)
return parser_mediator
def ProcessSources(
self, source_configurations, storage_writer, resolver_context,
processing_configuration, force_parser=False,
status_update_callback=None):
"""Processes the sources.
Args:
source_configurations (list[SourceConfigurationArtifact]): configurations
of the sources to process.
storage_writer (StorageWriter): storage writer for a session storage.
resolver_context (dfvfs.Context): resolver context.
processing_configuration (ProcessingConfiguration): processing
configuration.
force_parser (Optional[bool]): True if a specified parser should be forced
to be used to extract events.
status_update_callback (Optional[function]): callback function for status
updates.
Returns:
ProcessingStatus: processing status.
"""
parser_mediator = self._CreateParserMediator(
self.knowledge_base, resolver_context, processing_configuration)
parser_mediator.SetStorageWriter(storage_writer)
self._extraction_worker = worker.EventExtractionWorker(
force_parser=force_parser, parser_filter_expression=(
processing_configuration.parser_filter_expression))
self._extraction_worker.SetExtractionConfiguration(
processing_configuration.extraction)
self._parser_mediator = parser_mediator
self._processing_configuration = processing_configuration
self._resolver_context = resolver_context
self._status_update_callback = status_update_callback
self._storage_writer = storage_writer
logger.debug('Processing started.')
parser_mediator.StartProfiling(
self._processing_configuration.profiling, self._name,
self._process_information)
self._StartProfiling(self._processing_configuration.profiling)
if self._analyzers_profiler:
self._extraction_worker.SetAnalyzersProfiler(self._analyzers_profiler)
if self._processing_profiler:
self._extraction_worker.SetProcessingProfiler(self._processing_profiler)
if self._serializers_profiler:
self._storage_writer.SetSerializersProfiler(self._serializers_profiler)
if self._storage_profiler:
self._storage_writer.SetStorageProfiler(self._storage_profiler)
self._StartStatusUpdateThread()
self._parsers_counter = collections.Counter({
parser_count.name: parser_count
for parser_count in self._storage_writer.GetAttributeContainers(
'parser_count')})
try:
self._ProcessSources(source_configurations, parser_mediator)
finally:
# Stop the status update thread after close of the storage writer
# so we include the storage sync to disk in the status updates.
self._StopStatusUpdateThread()
if self._analyzers_profiler:
self._extraction_worker.SetAnalyzersProfiler(None)
if self._processing_profiler:
self._extraction_worker.SetProcessingProfiler(None)
if self._serializers_profiler:
self._storage_writer.SetSerializersProfiler(None)
if self._storage_profiler:
self._storage_writer.SetStorageProfiler(None)
self._StopProfiling()
parser_mediator.StopProfiling()
for key, value in parser_mediator.parsers_counter.items():
parser_count = self._parsers_counter.get(key, None)
if parser_count:
parser_count.number_of_events += value
self._storage_writer.UpdateAttributeContainer(parser_count)
else:
parser_count = counts.ParserCount(name=key, number_of_events=value)
self._parsers_counter[key] = parser_count
self._storage_writer.AddAttributeContainer(parser_count)
if self._abort:
logger.debug('Processing aborted.')
self._processing_status.aborted = True
else:
logger.debug('Processing completed.')
# Update the status view one last time.
self._UpdateStatus()
self._extraction_worker = None
self._file_system_cache = []
self._parser_mediator = None
self._processing_configuration = None
self._resolver_context = None
self._status_update_callback = None
self._storage_writer = None
return self._processing_status
|
joachimmetz/plaso
|
plaso/single_process/extraction_engine.py
|
Python
|
apache-2.0
| 14,239
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""External script for generating Cloud Endpoints related files.
The gen_discovery_doc subcommand takes a list of fully qualified ProtoRPC
service names and calls a cloud service which generates a discovery document in
REST or RPC style.
Example:
endpointscfg.py gen_discovery_doc -o . -f rest postservice.GreetingsV1
The gen_client_lib subcommand takes a discovery document and calls a cloud
service to generate a client library for a target language (currently just Java)
Example:
endpointscfg.py gen_client_lib java -o . greetings-v0.1.discovery
The get_client_lib subcommand does both of the above commands at once.
Example:
endpointscfg.py get_client_lib java -o . -f rest postservice.GreetingsV1
The gen_api_config command outputs an .api configuration file for a service.
Example:
endpointscfg.py gen_api_config -o . -a /path/to/app \
--hostname myhost.appspot.com postservice.GreetingsV1
"""
from __future__ import with_statement
import collections
import contextlib
try:
import json
except ImportError:
import simplejson as json
import os
import re
import sys
import urllib
import urllib2
from protorpc import remote
from google.appengine.ext.endpoints import api_config
DISCOVERY_DOC_BASE = ('https://webapis-discovery.appspot.com/_ah/api/'
'discovery/v1/apis/generate/')
CLIENT_LIBRARY_BASE = 'https://google-api-client-libraries.appspot.com/generate'
class ServerRequestException(Exception):
"""Exception for problems with the request to a server."""
def __init__(self, http_error):
"""Create a ServerRequestException from a given urllib2.HTTPError.
Args:
http_error: The HTTPError that the ServerRequestException will be
based on.
"""
error_details = None
if http_error.fp:
try:
error_body = json.load(http_error.fp)
error_details = ['%s: %s' % (detail['message'], detail['debug_info'])
for detail in error_body['error']['errors']]
except (ValueError, TypeError, KeyError):
pass
if error_details:
error_message = ('HTTP %s (%s) error when communicating with URL: %s. '
'Details: %s' % (http_error.code, http_error.reason,
http_error.filename, error_details))
else:
error_message = ('HTTP %s (%s) error when communicating with URL: %s.' %
(http_error.code, http_error.reason,
http_error.filename))
super(ServerRequestException, self).__init__(error_message)
def _WriteFile(output_path, name, content):
"""Write given content to a file in a given directory.
Args:
output_path: The directory to store the file in.
name: The name of the file to store the content in.
content: The content to write to the file.close
Returns:
The full path to the written file.
"""
path = os.path.join(output_path, name)
with open(path, 'wb') as f:
f.write(content)
return path
def GenApiConfig(service_class_names, generator=None, hostname=None):
"""Write an API configuration for endpoints annotated ProtoRPC services.
Args:
service_class_names: A list of fully qualified ProtoRPC service classes.
generator: An generator object that produces API config strings using its
pretty_print_config_to_json method.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
Raises:
TypeError: If any service classes don't inherit from remote.Service.
messages.DefinitionNotFoundError: If a service can't be found.
Returns:
A map from service names to a string containing the API configuration of the
service in JSON format.
"""
api_service_map = collections.OrderedDict()
for service_class_name in service_class_names:
module_name, base_service_class_name = service_class_name.rsplit('.', 1)
module = __import__(module_name, fromlist=base_service_class_name)
service = getattr(module, base_service_class_name)
if not (isinstance(service, type) and issubclass(service, remote.Service)):
raise TypeError('%s is not a ProtoRPC service' % service_class_name)
services = api_service_map.setdefault((service.api_info.name,
service.api_info.version),
[])
services.append(service)
service_map = collections.OrderedDict()
generator = generator or api_config.ApiConfigGenerator()
for api_info, services in api_service_map.iteritems():
hostname = services[0].api_info.hostname or hostname
service_map['%s-%s' % api_info] = generator.pretty_print_config_to_json(
services, hostname=hostname)
return service_map
def GenDiscoveryDoc(service_class_names, doc_format,
output_path, hostname=None):
"""Write discovery documents generated from a cloud service to file.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
doc_format: The requested format for the discovery doc. (rest|rpc)
output_path: The directory to output the discovery docs to.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
Raises:
ServerRequestException: If fetching the generated discovery doc fails.
Returns:
A list of discovery doc filenames.
"""
output_files = []
service_configs = GenApiConfig(service_class_names, hostname=hostname)
for api_name_version, config in service_configs.iteritems():
body = json.dumps({'config': config}, indent=2, sort_keys=True)
request = urllib2.Request(DISCOVERY_DOC_BASE + doc_format, body)
request.add_header('content-type', 'application/json')
try:
with contextlib.closing(urllib2.urlopen(request)) as response:
content = response.read()
discovery_name = api_name_version + '.discovery'
output_files.append(_WriteFile(output_path, discovery_name, content))
except urllib2.HTTPError, error:
raise ServerRequestException(error)
return output_files
def GenClientLib(discovery_path, language, output_path):
"""Write a client library from a discovery doc, using a cloud service to file.
Args:
discovery_path: Path to the discovery doc used to generate the client
library.
language: The client library language to generate. (java)
output_path: The directory to output the client library zip to.
Raises:
IOError: If reading the discovery doc fails.
ServerRequestException: If fetching the generated client library fails.
Returns:
The path to the zipped client library.
"""
with open(discovery_path) as f:
discovery_doc = f.read()
client_name = re.sub(r'\.discovery$', '.zip',
os.path.basename(discovery_path))
_GenClientLibFromContents(discovery_doc, language, output_path, client_name)
def _GenClientLibFromContents(discovery_doc, language, output_path,
client_name):
"""Write a client library from a discovery doc, using a cloud service to file.
Args:
discovery_doc: A string, the contents of the discovery doc used to
generate the client library.
language: A string, the client library language to generate. (java)
output_path: A string, the directory to output the client library zip to.
client_name: A string, the filename used to save the client lib.
Raises:
IOError: If reading the discovery doc fails.
ServerRequestException: If fetching the generated client library fails.
Returns:
The path to the zipped client library.
"""
body = urllib.urlencode({'lang': language, 'content': discovery_doc})
request = urllib2.Request(CLIENT_LIBRARY_BASE, body)
try:
with contextlib.closing(urllib2.urlopen(request)) as response:
content = response.read()
return _WriteFile(output_path, client_name, content)
except urllib2.HTTPError, error:
raise ServerRequestException(error)
def GetClientLib(service_class_names, doc_format, language,
output_path, hostname=None):
"""Fetch discovery documents and client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
doc_format: The requested format for the discovery doc. (rest|rpc)
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
Returns:
A tuple (discovery_files, client_libs):
discovery_files: A list of paths to discovery documents.
client_libs: A list of paths to client libraries.
"""
discovery_files = GenDiscoveryDoc(service_class_names, doc_format,
output_path, hostname=hostname)
client_libs = []
for discovery_path in discovery_files:
client_libs.append(
GenClientLib(discovery_path, language, output_path))
return discovery_files, client_libs
def _GenApiConfigCallback(args, api_func=GenApiConfig):
"""Generate an api file.
Args:
args: An argparse.Namespace object to extract parameters from.
api_func: A function that generates and returns an API configuration
for a list of services.
"""
service_class_names, output_path, hostname = (
args.service, args.output, args.hostname)
service_configs = api_func(service_class_names, hostname=hostname)
for api_name_version, config in service_configs.iteritems():
api_name = api_name_version + '.api'
_WriteFile(output_path, api_name, config)
def _GetClientLibCallback(args,
client_func=GetClientLib):
"""Generate discovery docs and client libraries to files.
Args:
args: An argparse.Namespace object to extract parameters from.
client_func: A function that generates client libraries and stores them to
files, accepting a list of service names, a discovery doc format, a client
library language, and an output directory.
"""
service_class_names, doc_format, language, output_path, hostname = (
args.service, args.format, args.language, args.output, args.hostname)
discovery_paths, client_paths = client_func(
service_class_names, doc_format, language, output_path, hostname=hostname)
for discovery_path in discovery_paths:
print 'API discovery document written to %s' % discovery_path
for client_path in client_paths:
print 'API client library written to %s' % client_path
def _GenDiscoveryDocCallback(args, discovery_func=GenDiscoveryDoc):
"""Generate discovery docs to files.
Args:
args: An argparse.Namespace object to extract parameters from
discovery_func: A function that generates discovery docs and stores them to
files, accepting a list of service names, a discovery doc format, and an
output directory.
"""
services, doc_format, output_path, hostname = (
args.service, args.format, args.output, args.hostname)
discovery_paths = discovery_func(services, doc_format,
output_path, hostname=hostname)
for discovery_path in discovery_paths:
print 'API discovery document written to %s' % discovery_path
def _GenClientLibCallback(args, client_func=GenClientLib):
"""Generate a client library to file.
Args:
args: An argparse.Namespace object to extract parameters from
client_func: A function that generates client libraries and stores them to
files, accepting a path to a discovery doc, a client library language, and
an output directory.
"""
discovery_path, language, output_path = (args.discovery_doc[0], args.language,
args.output)
client_path = client_func(discovery_path, language, output_path)
print 'API client library written to %s' % client_path
def MakeParser(prog):
"""Create an argument parser.
Args:
prog: The name of the program to use when outputting help text.
Returns:
An argparse.ArgumentParser built to specification.
"""
import argparse
def AddStandardOptions(parser, *args):
"""Add common endpoints options to a parser.
Args:
parser: The parser to add options to.
*args: A list of option names to add. Possible names are: application,
format, output, language, service, and discovery_doc.
"""
if 'application' in args:
parser.add_argument('-a', '--application', default='.',
help='The path to the Python App Engine App')
if 'format' in args:
parser.add_argument('-f', '--format', default='rest',
choices=['rest', 'rpc'],
help='The requested API protocol type')
if 'hostname' in args:
help_text = ('Default application hostname, if none is specified '
'for API service.')
parser.add_argument('--hostname', help=help_text)
if 'output' in args:
parser.add_argument('-o', '--output', default='.',
help='The directory to store output files')
if 'language' in args:
parser.add_argument('language', choices=['java'],
help='The target output programming language')
if 'service' in args:
parser.add_argument('service', nargs='+',
help='Fully qualified service class name')
if 'discovery_doc' in args:
parser.add_argument('discovery_doc', nargs=1,
help='Path to the discovery document')
parser = argparse.ArgumentParser(prog=prog)
subparsers = parser.add_subparsers(title='subcommands')
get_client_lib = subparsers.add_parser(
'get_client_lib', help=('Generates discovery documents and client '
'libraries from service classes'))
get_client_lib.set_defaults(callback=_GetClientLibCallback)
AddStandardOptions(get_client_lib, 'application', 'format', 'hostname',
'output', 'language', 'service')
gen_api_config = subparsers.add_parser(
'gen_api_config', help=('Generates an .api file for the given service '
'classes'))
gen_api_config.set_defaults(callback=_GenApiConfigCallback)
AddStandardOptions(gen_api_config, 'application', 'hostname', 'output',
'service')
gen_discovery_doc = subparsers.add_parser(
'gen_discovery_doc',
help='Generates discovery documents from service classes')
gen_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(gen_discovery_doc, 'application', 'format', 'hostname',
'output', 'service')
gen_client_lib = subparsers.add_parser(
'gen_client_lib', help='Generates a client library from service classes')
gen_client_lib.set_defaults(callback=_GenClientLibCallback)
AddStandardOptions(gen_client_lib, 'output', 'language', 'discovery_doc')
return parser
def main(argv):
parser = MakeParser(argv[0])
args = parser.parse_args(argv[1:])
application_path = getattr(args, 'application', None)
if application_path is not None:
sys.path.insert(0, os.path.abspath(application_path))
args.callback(args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
dvliman/jaikuengine
|
.google_appengine/google/appengine/tools/endpointscfg.py
|
Python
|
apache-2.0
| 16,252
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/bigtable/admin_v2/proto/common.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/bigtable/admin_v2/proto/common.proto",
package="google.bigtable.admin.v2",
syntax="proto3",
serialized_pb=_b(
"\n1google/cloud/bigtable/admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3"
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_STORAGETYPE = _descriptor.EnumDescriptor(
name="StorageType",
full_name="google.bigtable.admin.v2.StorageType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STORAGE_TYPE_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SSD", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="HDD", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=142,
serialized_end=203,
)
_sym_db.RegisterEnumDescriptor(_STORAGETYPE)
StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE)
STORAGE_TYPE_UNSPECIFIED = 0
SSD = 1
HDD = 2
DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
"\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2"
),
)
# @@protoc_insertion_point(module_scope)
|
dhermes/google-cloud-python
|
bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py
|
Python
|
apache-2.0
| 2,932
|
from fontbakery.checkrunner import FAIL
from fontbakery.codetesting import (assert_PASS,
assert_results_contain,
CheckTester,
TEST_FILE)
from fontbakery.profiles import iso15008 as iso15008_profile
def test_check_iso15008_proportions():
"""Check if 0.65 => (H width / H height) => 0.80"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_proportions")
# Cabin has a proportion of 0.7, so that's good.
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
"with a good font...")
# Wonky Paths doesn't have an H
font = TEST_FILE("wonky_paths/WonkySourceSansPro-Regular.otf")
assert_results_contain(check(font),
FAIL, "glyph-not-present",
"with a font that does not have an 'H' glyph...")
# Covered By Your Grace is really tall (proportion 0.39)
font = TEST_FILE("coveredbyyourgrace/CoveredByYourGrace.ttf")
assert_results_contain(check(font),
FAIL, "invalid-proportion",
"with a very tall font (proportion of 'H' width to 'H' height)...")
def test_check_iso15008_stem_width():
"""Check if 0.10 <= (stem width / ascender) <= 0.82"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_stem_width")
font = TEST_FILE("cabin/Cabin-SemiBold.ttf")
assert_PASS(check(font),
"with a good font...")
# Wonky Paths doesn't have an 'l'
font = TEST_FILE("wonky_paths/WonkySourceSansPro-Regular.otf")
assert_results_contain(check(font),
FAIL, "no-stem-width",
"with a font lacking an 'l' glyph...")
# Cabin Regular is actually slightly too thin for displays
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(check(font),
FAIL, "invalid-proportion",
"with a too thin font (proportion of stem width to ascender)...")
def test_check_iso15008_intercharacter_spacing():
"""Check if spacing between characters is adequate for display use"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_intercharacter_spacing")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
"with a good font...")
font = TEST_FILE("cabin/Cabin-SemiBold.ttf")
# l stem width is 111, LSB at x-height is 59, RSB at x-Height is 83
# 142 / 111 = 128%, so this font is too tight.
assert_results_contain(check(font),
FAIL, "bad-vertical-vertical-spacing",
"with a too tight font (space between vertical strokes)...")
# v LSB is 5, lv kern is -6 (!) so lv distance is 83+5-6 = 82
# 82 / 111 = 0.73%, so that fails too.
assert_results_contain(check(font),
FAIL, "bad-vertical-diagonal-spacing",
"with bad spacing between vertical and diagonal strokes...")
font = TEST_FILE("montserrat/Montserrat-Black.ttf")
# vv touches
assert_results_contain(check(font),
FAIL, "bad-diagonal-diagonal-spacing",
"with diagonal strokes (vv) that are touching...")
def test_check_iso15008_interword_spacing():
"""Check if spacing between words is adequate for display use"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_interword_spacing")
font = TEST_FILE("cabin/CabinCondensed-Bold.ttf")
# lm space is 112; m+space+l space is 286; 286/112 = 255%
assert_PASS(check(font),
"with a good font...")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
# lm space is 147; m+space+l space is 341; 341/147 = 232%
assert_results_contain(check(font),
FAIL, "bad-interword-spacing",
"with bad interword space...")
def test_check_iso15008_interline_spacing():
"""Check if spacing between lines is adequate for display use"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_interline_spacing")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
"with a good font...")
font = TEST_FILE("source-sans-pro/TTF/SourceSansPro-Bold.ttf")
# 39 units at bottom of g + 49 units at top of h + no typolinegap = 88
# stem width = 147
assert_results_contain(check(font),
FAIL, "bad-interline-spacing",
"with bad interline space...")
|
googlefonts/fontbakery
|
tests/profiles/iso15008_test.py
|
Python
|
apache-2.0
| 4,828
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from datetime import datetime
from dateutil import parser
from desktop.lib.scheduler.lib.beat import CeleryBeatApi
from jobbrowser.apis.base_api import Api
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
class BeatApi(Api):
def apps(self, filters):
api = CeleryBeatApi(user=self.user)
tasks = api.list_tasks(self.user)
return {
'apps': [{
'id': 'celery-beat-%(id)s' % app,
'name': '%(name)s' % app,
'status': self._massage_status(app),
'apiStatus': self._api_status(self._massage_status(app)),
'type': 'celery-beat',
'user': app['description'], # No user id available yet
'progress': 50,
'queue': app['queue'],
'canWrite': self.user.username == app['description'],
'duration': ((datetime.now() - parser.parse(app['start_time']).replace(tzinfo=None)).seconds * 1000) if app['start_time'] else 1,
'submitted': app.get('date_changed')
} for app in tasks
],
'total': len(tasks)
}
def app(self, appid):
appid = appid.rsplit('-')[-1]
api = CeleryBeatApi(user=self.user)
app = api.list_task(appid)
return {
'id': 'celery-beat-%(id)s' % app,
'name': '%(name)s' % app,
'status': self._massage_status(app),
'apiStatus': self._api_status(self._massage_status(app)),
'type': 'celery-beat',
'user': app['description'],
'progress': 50,
'queue': app['queue'],
'duration': 1,
'canWrite': self.user.username == app['description'],
'submitted': app.get('date_changed'),
'properties': {
}
}
def action(self, app_ids, operation):
api = CeleryBeatApi(user=self.user)
operations = []
actual_app_ids = [app_id.replace('celery-beat-', '') for app_id in app_ids]
for app_id in actual_app_ids:
try:
api.action(app_id, operation['action'])
operations.append(app_id)
except Exception:
LOG.exception('Could not stop job %s' % app_id)
return {'kills': operations, 'status': len(app_ids) - len(operations), 'message': _('%s signal sent to %s') % (operation['action'], operations)}
def logs(self, appid, app_type, log_name=None, is_embeddable=False):
return {'logs': ''}
def profile(self, appid, app_type, app_property, app_filters):
appid = appid.rsplit('-')[-1]
if app_property == 'properties':
api = get_api(self.user)
return api.get_statements(appid)
else:
return {}
def _api_status(self, status):
if status == 'RUNNING':
return 'RUNNING'
else:
return 'PAUSED'
def _massage_status(self, task):
return 'RUNNING' if task['enabled'] else 'PAUSED'
class LivyJobApi(Api):
def apps(self, filters):
kwargs = {}
api = get_api(self.user)
jobs = api.list_jobs(**kwargs)
return {
'apps': [{
'id': app['jobId'],
'name': app['creationDate'],
'status': app['status'],
'apiStatus': self._api_status(app['status']),
'type': app['jobType'],
'user': '',
'progress': 100,
'duration': 10 * 3600,
'submitted': app['creationDate']
} for app in jobs['jobs']],
'total': len(jobs)
}
def app(self, appid):
handle = DataEng(self.user).describe_job(job_id=appid)
job = handle['job']
common = {
'id': job['jobId'],
'name': job['jobId'],
'status': job['status'],
'apiStatus': self._api_status(job['status']),
'progress': 50,
'duration': 10 * 3600,
'submitted': job['creationDate'],
'type': 'dataeng-job-%s' % job['jobType'],
}
common['properties'] = {
'properties': job
}
return common
def action(self, appid, action):
return {}
def logs(self, appid, app_type, log_name=None, is_embeddable=False):
return {'logs': ''}
def profile(self, appid, app_type, app_property):
return {}
def _api_status(self, status):
if status in ['CREATING', 'CREATED', 'TERMINATING']:
return 'RUNNING'
elif status in ['COMPLETED']:
return 'SUCCEEDED'
else:
return 'FAILED' # INTERRUPTED , KILLED, TERMINATED and FAILED
|
kawamon/hue
|
apps/jobbrowser/src/jobbrowser/apis/beat_api.py
|
Python
|
apache-2.0
| 5,168
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import copy
import logging
from Queue import Queue
from ambari_stomp.connect import BaseConnection
from ambari_stomp.protocol import Protocol12
from ambari_stomp.transport import Transport, DEFAULT_SSL_VERSION
from ambari_stomp.exception import StompException
from ambari_ws4py.client.threadedclient import WebSocketClient
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_TIMEOUT = 10
class QueuedWebSocketClient(WebSocketClient):
def __init__(self, *args, **kwargs):
WebSocketClient.__init__(self, *args, **kwargs)
self.messages = Queue()
def received_message(self, message):
"""
Override the base class to store the incoming message
in the `messages` queue.
"""
self.messages.put(copy.deepcopy(message))
def receive(self):
"""
Returns messages that were stored into the
`messages` queue and returns `None` when the
websocket is terminated or closed.
"""
# If the websocket was terminated and there are no messages
# left in the queue, return None immediately otherwise the client
# will block forever
if self.terminated and self.messages.empty():
return None
message = self.messages.get()
if message is StopIteration:
return None
return message
def closed(self, code, reason=None):
self.messages.put(StopIteration)
class WsTransport(Transport):
def __init__(self, url, ssl_options=None):
Transport.__init__(self, (0, 0), False, False, 0.0, 0.0, 0.0, 0.0, 0, False, None, None, None, None, False,
DEFAULT_SSL_VERSION, None, None, None)
self.current_host_and_port = (0, 0) # mocking
self.ws = QueuedWebSocketClient(url, protocols=['http-only', 'chat'], ssl_options=ssl_options)
self.ws.daemon = False
def wait_for_connection(self, timeout=DEFAULT_CONNECTION_TIMEOUT):
"""
Wait until we've established a connection with the server.
:param float timeout: how long to wait, in seconds
"""
with self.get_connect_wait_condition():
self.get_connect_wait_condition().wait(timeout)
if not self.is_connected() and not self.connection_error:
raise ConnectionResponseTimeout("Waiting for connection confirmation timed out")
def is_connected(self):
return self.connected
def attempt_connection(self):
self.ws.connect()
def send(self, encoded_frame):
logger.debug("Outgoing STOMP message:\n>>> " + encoded_frame)
if self.ws.terminated:
raise ConnectionIsAlreadyClosed("Connection is already closed cannot send data")
self.ws.send(encoded_frame)
def receive(self):
try:
msg = self.ws.receive()
msg = str(msg) if msg is not None else msg
logger.debug("Incoming STOMP message:\n<<< {0}".format(msg))
return msg
except:
# exceptions from this method are hidden by the framework so implementing logging by ourselves
logger.exception("Exception while handling incoming STOMP message:")
return None
def stop(self):
self.running = False
try:
self.ws.terminate()
except:
logger.exception("Exception during self.ws.terminate()")
try:
self.disconnect_socket()
except:
logger.exception("Exception during self.disconnect_socket()")
try:
Transport.stop(self)
except:
logger.exception("Exception during Transport.stop(self)")
class WsConnection(BaseConnection, Protocol12):
def __init__(self, url, ssl_options=None):
self.transport = WsTransport(url, ssl_options=ssl_options)
self.transport.set_listener('ws-listener', self)
self.transactions = {}
Protocol12.__init__(self, self.transport, (0, 0))
def disconnect(self, receipt=None, headers=None, **keyword_headers):
try:
Protocol12.disconnect(self, receipt, headers, **keyword_headers)
except:
logger.exception("Exception during Protocol12.disconnect()")
try:
self.transport.stop()
except:
logger.exception("Exception during self.transport.stop()")
class ConnectionResponseTimeout(StompException):
"""
Raised when sent 'STOMP' frame and have not received 'CONNECTED' a certain timeout.
"""
class ConnectionIsAlreadyClosed(StompException):
"""
Raised when trying to send data on connection which is already closed. Usually after it was brought down by server.
"""
pass
|
sekikn/ambari
|
ambari-common/src/main/python/ambari_stomp/adapter/websocket.py
|
Python
|
apache-2.0
| 5,073
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = unicode = str
from py4j.java_gateway import JavaClass
from pyspark import RDD, since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import _to_seq
from pyspark.sql.types import *
from pyspark.sql import utils
from pyspark.sql.utils import to_str
__all__ = ["DataFrameReader", "DataFrameWriter"]
class OptionUtils(object):
def _set_opts(self, schema=None, **options):
"""
Set named options (filter out those the value is None)
"""
if schema is not None:
self.schema(schema)
for k, v in options.items():
if v is not None:
self.option(k, v)
class DataFrameReader(OptionUtils):
"""
Interface used to load a :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :attr:`SparkSession.read`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.read()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
@since(1.4)
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
@since(1.4)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.read.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
@since(1.5)
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a time zone ID to be used to parse
timestamps in the JSON/CSV datasources or partition values. The following
formats of `timeZone` are supported:
* Region-based zone ID: It should have the form 'area/city', such as \
'America/Los_Angeles'.
* Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \
'+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.
Other short names like 'CST' are not recommended to use because they can be
ambiguous. If it isn't set, the current value of the SQL config
``spark.sql.session.timeZone`` is used by default.
* ``pathGlobFilter``: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows org.apache.hadoop.fs.GlobFilter.
It does not change the behavior of partition discovery.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a time zone ID to be used to parse
timestamps in the JSON/CSV datasources or partition values. The following
formats of `timeZone` are supported:
* Region-based zone ID: It should have the form 'area/city', such as \
'America/Los_Angeles'.
* Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \
'+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.
Other short names like 'CST' are not recommended to use because they can be
ambiguous. If it isn't set, the current value of the SQL config
``spark.sql.session.timeZone`` is used by default.
* ``pathGlobFilter``: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows org.apache.hadoop.fs.GlobFilter.
It does not change the behavior of partition discovery.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(1.4)
def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class:`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param options: all other string options
>>> df = spark.read.format("parquet").load('python/test_support/sql/parquet_partitioned',
... opt1=True, opt2=1, opt3='str')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
>>> df = spark.read.format('json').load(['python/test_support/sql/people.json',
... 'python/test_support/sql/people1.json'])
>>> df.dtypes
[('age', 'bigint'), ('aka', 'string'), ('name', 'string')]
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if isinstance(path, basestring):
return self._df(self._jreader.load(path))
elif path is not None:
if type(path) != list:
path = [path]
return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
else:
return self._df(self._jreader.load())
@since(1.4)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None,
dropFieldIfAllNull=None, encoding=None, locale=None, pathGlobFilter=None,
recursiveFileLookup=None):
"""
Loads JSON files and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
:param path: string represents path to the JSON dataset, or a list of paths,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or
a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \
into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \
fields to ``null``. To keep corrupt records, an user can set a string type \
field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \
schema does not have the field, it drops corrupt records during parsing. \
When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \
field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at `datetime pattern`_.
This applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at `datetime pattern`_.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.
:param multiLine: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
:param allowUnquotedControlChars: allows JSON Strings to contain unquoted control
characters (ASCII characters with value less than 32,
including tab and line feed characters) or not.
:param encoding: allows to forcibly set one of standard basic or extended encoding for
the JSON files. For example UTF-16BE, UTF-32LE. If None is set,
the encoding of input JSON will be detected automatically
when the multiLine option is set to ``true``.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
:param samplingRatio: defines fraction of input JSON objects used for schema inferring.
If None is set, it uses the default value, ``1.0``.
:param dropFieldIfAllNull: whether to ignore column of all null values or empty
array/struct during schema inference. If None is set, it
uses the default value, ``false``.
:param locale: sets a locale as language tag in IETF BCP 47 format. If None is set,
it uses the default value, ``en-US``. For instance, ``locale`` is used while
parsing dates and timestamps.
:param pathGlobFilter: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.
It does not change the behavior of `partition discovery`_.
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.
.. _partition discovery:
https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
>>> df1 = spark.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
[('age', 'bigint'), ('name', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/people.json')
>>> df2 = spark.read.json(rdd)
>>> df2.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine,
allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep,
samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding,
locale=locale, pathGlobFilter=pathGlobFilter, recursiveFileLookup=recursiveFileLookup)
if isinstance(path, basestring):
path = [path]
if type(path) == list:
return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path)))
elif isinstance(path, RDD):
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = path.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
return self._df(self._jreader.json(jrdd))
else:
raise TypeError("path can be only string, list or RDD")
@since(1.4)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:param tableName: string, name of the table.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.createOrReplaceTempView('tmpTable')
>>> spark.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.table(tableName))
@since(1.4)
def parquet(self, *paths, **options):
"""
Loads Parquet files, returning the result as a :class:`DataFrame`.
:param mergeSchema: sets whether we should merge schemas collected from all
Parquet part-files. This will override
``spark.sql.parquet.mergeSchema``. The default value is specified in
``spark.sql.parquet.mergeSchema``.
:param pathGlobFilter: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.
It does not change the behavior of `partition discovery`_.
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
mergeSchema = options.get('mergeSchema', None)
pathGlobFilter = options.get('pathGlobFilter', None)
recursiveFileLookup = options.get('recursiveFileLookup', None)
self._set_opts(mergeSchema=mergeSchema, pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup)
return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
@ignore_unicode_prefix
@since(1.6)
def text(self, paths, wholetext=False, lineSep=None, pathGlobFilter=None,
recursiveFileLookup=None):
"""
Loads text files and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
The text files must be encoded as UTF-8.
By default, each line in the text file is a new row in the resulting DataFrame.
:param paths: string, or list of strings, for input path(s).
:param wholetext: if true, read each file from input path(s) as a single row.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
:param pathGlobFilter: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.
It does not change the behavior of `partition discovery`_.
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.
>>> df = spark.read.text('python/test_support/sql/text-test.txt')
>>> df.collect()
[Row(value=u'hello'), Row(value=u'this')]
>>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True)
>>> df.collect()
[Row(value=u'hello\\nthis')]
"""
self._set_opts(
wholetext=wholetext, lineSep=lineSep, pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup)
if isinstance(paths, basestring):
paths = [paths]
return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
@since(2.0)
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None,
samplingRatio=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None,
pathGlobFilter=None, recursiveFileLookup=None):
r"""Loads a CSV file and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
:param path: string, or list of strings, for input path(s),
or RDD of Strings storing CSV rows.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param sep: sets a separator (one or more characters) for each field and value. If None is
set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,
it uses the default value, ``UTF-8``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``.
:param comment: sets a single character used for skipping lines beginning with this
character. By default (None), it is disabled.
:param header: uses the first line as names of columns. If None is set, it uses the
default value, ``false``.
:param inferSchema: infers the input schema automatically from data. It requires one extra
pass over the data. If None is set, it uses the default value, ``false``.
:param enforceSchema: If it is set to ``true``, the specified or inferred schema will be
forcibly applied to datasource files, and headers in CSV files will be
ignored. If the option is set to ``false``, the schema will be
validated against all headers in CSV files or the first header in RDD
if the ``header`` option is set to ``true``. Field names in the schema
and column names in CSV headers are checked by their positions
taking into account ``spark.sql.caseSensitive``. If None is set,
``true`` is used by default. Though the default value is ``true``,
it is recommended to disable the ``enforceSchema`` option
to avoid incorrect results.
:param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string. Since 2.0.1, this ``nullValue`` param
applies to all supported types including the string type.
:param nanValue: sets the string representation of a non-number value. If None is set, it
uses the default value, ``NaN``.
:param positiveInf: sets the string representation of a positive infinity value. If None
is set, it uses the default value, ``Inf``.
:param negativeInf: sets the string representation of a negative infinity value. If None
is set, it uses the default value, ``Inf``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at `datetime pattern`_.
This applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at `datetime pattern`_.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.
:param maxColumns: defines a hard limit of how many columns a record can have. If None is
set, it uses the default value, ``20480``.
:param maxCharsPerColumn: defines the maximum number of characters allowed for any given
value being read. If None is set, it uses the default value,
``-1`` meaning unlimited length.
:param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0.
If specified, it is ignored.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``. Note that Spark tries to
parse only required columns in CSV under column pruning. Therefore, corrupt
records can be different based on required set of fields. This behavior can
be controlled by ``spark.sql.csv.parser.columnPruning.enabled``
(enabled by default).
* ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \
into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \
fields to ``null``. To keep corrupt records, an user can set a string type \
field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \
schema does not have the field, it drops corrupt records during parsing. \
A record with less/more tokens than schema is not a corrupted record to CSV. \
When it meets a record having fewer tokens than the length of the schema, \
sets ``null`` to extra fields. When the record has more tokens than the \
length of the schema, it drops extra tokens.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param multiLine: parse records, which may span multiple lines. If None is
set, it uses the default value, ``false``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise.
:param samplingRatio: defines fraction of rows used for schema inferring.
If None is set, it uses the default value, ``1.0``.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, empty string.
:param locale: sets a locale as language tag in IETF BCP 47 format. If None is set,
it uses the default value, ``en-US``. For instance, ``locale`` is used while
parsing dates and timestamps.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
Maximum length is 1 character.
:param pathGlobFilter: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.
It does not change the behavior of `partition discovery`_.
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.
>>> df = spark.read.csv('python/test_support/sql/ages.csv')
>>> df.dtypes
[('_c0', 'string'), ('_c1', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/ages.csv')
>>> df2 = spark.read.csv(rdd)
>>> df2.dtypes
[('_c0', 'string'), ('_c1', 'string')]
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, samplingRatio=samplingRatio,
enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep,
pathGlobFilter=pathGlobFilter, recursiveFileLookup=recursiveFileLookup)
if isinstance(path, basestring):
path = [path]
if type(path) == list:
return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path)))
elif isinstance(path, RDD):
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = path.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
# see SPARK-22112
# There aren't any jvm api for creating a dataframe from rdd storing csv.
# We can do it through creating a jvm dataset firstly and using the jvm api
# for creating a dataframe from dataset storing csv.
jdataset = self._spark._ssql_ctx.createDataset(
jrdd.rdd(),
self._spark._jvm.Encoders.STRING())
return self._df(self._jreader.csv(jdataset))
else:
raise TypeError("path can be only string, list or RDD")
@since(1.5)
def orc(self, path, mergeSchema=None, pathGlobFilter=None, recursiveFileLookup=None):
"""Loads ORC files, returning the result as a :class:`DataFrame`.
:param mergeSchema: sets whether we should merge schemas collected from all
ORC part-files. This will override ``spark.sql.orc.mergeSchema``.
The default value is specified in ``spark.sql.orc.mergeSchema``.
:param pathGlobFilter: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.
It does not change the behavior of `partition discovery`_.
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.
>>> df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
"""
self._set_opts(mergeSchema=mergeSchema, pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup)
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
@since(1.4)
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of a column of numeric, date, or timestamp type
that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert numPartitions is not None, \
"numPartitions can not be None when ``column`` is specified"
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
gateway = self._spark._sc._gateway
jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
return self._df(self._jreader.jdbc(url, table, jprop))
class DataFrameWriter(OptionUtils):
"""
Interface used to write a :class:`DataFrame` to external storage systems
(e.g. file systems, key-value stores, etc). Use :attr:`DataFrame.write`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.write()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
@since(1.4)
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
@since(1.5)
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a time zone ID to be used to format
timestamps in the JSON/CSV datasources or partition values. The following
formats of `timeZone` are supported:
* Region-based zone ID: It should have the form 'area/city', such as \
'America/Los_Angeles'.
* Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \
'+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.
Other short names like 'CST' are not recommended to use because they can be
ambiguous. If it isn't set, the current value of the SQL config
``spark.sql.session.timeZone`` is used by default.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a time zone ID to be used to format
timestamps in the JSON/CSV datasources or partition values. The following
formats of `timeZone` are supported:
* Region-based zone ID: It should have the form 'area/city', such as \
'America/Los_Angeles'.
* Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \
'+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.
Other short names like 'CST' are not recommended to use because they can be
ambiguous. If it isn't set, the current value of the SQL config
``spark.sql.session.timeZone`` is used by default.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
@since(1.4)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
@since(2.3)
def bucketBy(self, numBuckets, col, *cols):
"""Buckets the output by the given columns.If specified,
the output is laid out on the file system similar to Hive's bucketing scheme.
:param numBuckets: the number of buckets to save
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
.. note:: Applicable for file-based data sources in combination with
:py:meth:`DataFrameWriter.saveAsTable`.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .mode("overwrite")
... .saveAsTable('bucketed_table'))
"""
if not isinstance(numBuckets, int):
raise TypeError("numBuckets should be an int, got {0}.".format(type(numBuckets)))
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.bucketBy(numBuckets, col, _to_seq(self._spark._sc, cols))
return self
@since(2.3)
def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self
@since(1.4)
def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode("append").save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path)
@since(1.4)
def insertInto(self, tableName, overwrite=None):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
if overwrite is not None:
self.mode("overwrite" if overwrite else "append")
self._jwrite.insertInto(tableName)
@since(1.4)
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None,
lineSep=None, encoding=None, ignoreNullFields=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at `datetime pattern`_.
This applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at `datetime pattern`_.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.
:param encoding: specifies encoding (charset) of saved json files. If None is set,
the default UTF-8 charset will be used.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
:param ignoreNullFields: Whether to ignore null fields when generating JSON objects.
If None is set, it uses the default value, ``true``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(
compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat,
lineSep=lineSep, encoding=encoding, ignoreNullFields=ignoreNullFields)
self._jwrite.json(path)
@since(1.4)
def parquet(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, uncompressed, snappy, gzip,
lzo, brotli, lz4, and zstd). This will override
``spark.sql.parquet.compression.codec``. If None is set, it uses the
value specified in ``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.parquet(path)
@since(1.6)
def text(self, path, compression=None, lineSep=None):
"""Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression, lineSep=lineSep)
self._jwrite.text(path)
@since(2.0)
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None,
charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None, lineSep=None):
r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets a separator (one or more characters) for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If an empty string is set, it uses ``u0000`` (null character).
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats follow
the formats at `datetime pattern`_.
This applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format.
Custom date formats follow the formats at `datetime pattern`_.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise..
:param encoding: sets the encoding (charset) of saved csv files. If None is set,
the default UTF-8 charset will be used.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, ``""``.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``. Maximum length is 1 character.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
encoding=encoding, emptyValue=emptyValue, lineSep=lineSep)
self._jwrite.csv(path)
@since(1.5)
def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress`` and
``spark.sql.orc.compression.codec``. If None is set, it uses the value
specified in ``spark.sql.orc.compression.codec``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.orc(path)
@since(1.4)
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self.mode(mode)._jwrite.jdbc(url, table, jprop)
def _test():
import doctest
import os
import tempfile
import py4j
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, Row
import pyspark.sql.readwriter
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.readwriter.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
try:
spark = SparkSession.builder.getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession(sc)
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.read.parquet('python/test_support/sql/parquet_partitioned')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.readwriter, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
sc.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
kevinyu98/spark
|
python/pyspark/sql/readwriter.py
|
Python
|
apache-2.0
| 61,864
|
"""
Data migration to populate tenants.uid with existing tenants.id values.
"""
# pylint: disable=C0103,R0801
import sqlalchemy
meta = sqlalchemy.MetaData()
# define the previous state of tenants
tenant = {}
tenant['id'] = sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
autoincrement=True)
tenant['uid'] = sqlalchemy.Column('uid', sqlalchemy.String(255), unique=False,
nullable=True)
tenant['name'] = sqlalchemy.Column('name', sqlalchemy.String(255), unique=True)
tenant['desc'] = sqlalchemy.Column('desc', sqlalchemy.String(255))
tenant['enabled'] = sqlalchemy.Column('enabled', sqlalchemy.Integer)
tenants = sqlalchemy.Table('tenants', meta, *tenant.values())
def upgrade(migrate_engine):
meta.bind = migrate_engine
dtenants = tenants.select().execute()
for dtenant in dtenants:
whereclause = "`id`='%s'" % (dtenant.id)
values = {'uid': str(dtenant.id)}
tenants.update(whereclause=whereclause, values=values).execute()
def downgrade(migrate_engine):
meta.bind = migrate_engine
tenants.update(values={'uid': None}).execute()
|
HugoKuo/keystone-essex3
|
keystone/backends/sqlalchemy/migrate_repo/versions/006_populate_tenants_uid.py
|
Python
|
apache-2.0
| 1,107
|
#-
# Copyright (c) 2012 Robert M. Norton
# All rights reserved.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_tlb_load_1(BaseBERITestCase):
@attr('tlb')
def test_load_succeeded(self):
self.assertRegisterEqual(self.MIPS.a4, 0xba9876543210fead, "Load from virtual address failed.")
self.assertRegisterEqual(self.MIPS.a5, 0xfedcba9876543210, "Load from virtual address failed.")
|
8l/beri
|
cheritest/trunk/tests/tlb/test_tlb_load_1.py
|
Python
|
apache-2.0
| 1,311
|
# Copyright 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from mock import Mock
from mock import patch
from trove.cluster.models import Cluster
from trove.cluster.models import ClusterTasks
from trove.cluster.models import DBCluster
from trove.common import cfg
from trove.common import exception
from trove.common import remote
from trove.common.strategies.cluster.experimental.mongodb import (
api as mongodb_api)
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.instance import models as inst_models
from trove.instance.models import DBInstance
from trove.instance.tasks import InstanceTasks
from trove.quota.quota import QUOTAS
from trove.taskmanager import api as task_api
from trove.tests.unittests import trove_testtools
CONF = cfg.CONF
class ClusterTest(trove_testtools.TestCase):
def setUp(self):
super(ClusterTest, self).setUp()
self.get_client_patch = patch.object(task_api.API, 'get_client')
self.get_client_mock = self.get_client_patch.start()
self.addCleanup(self.get_client_patch.stop)
self.cluster_id = str(uuid.uuid4())
self.cluster_name = "Cluster" + self.cluster_id
self.tenant_id = "23423432"
self.dv_id = "1"
self.db_info = DBCluster(ClusterTasks.NONE,
id=self.cluster_id,
name=self.cluster_name,
tenant_id=self.tenant_id,
datastore_version_id=self.dv_id,
task_id=ClusterTasks.NONE._code)
self.context = trove_testtools.TroveTestContext(self)
self.datastore = Mock()
self.dv = Mock()
self.dv.manager = "mongodb"
self.datastore_version = self.dv
self.cluster = mongodb_api.MongoDbCluster(self.context, self.db_info,
self.datastore,
self.datastore_version)
self.instances = [{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'},
{'volume_size': 1, 'flavor_id': '1234'}]
self.volume_support = CONF.get(self.dv.manager).volume_support
self.remote_nova = remote.create_nova_client
def tearDown(self):
super(ClusterTest, self).tearDown()
CONF.get(self.dv.manager).volume_support = self.volume_support
remote.create_nova_client = self.remote_nova
def test_create_empty_instances(self):
self.assertRaises(exception.ClusterNumInstancesNotSupported,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
[],
None
)
def test_create_unequal_flavors(self):
instances = self.instances
instances[0]['flavor_id'] = '4567'
self.assertRaises(exception.ClusterFlavorsNotEqual,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances,
None
)
@patch.object(remote, 'create_nova_client')
def test_create_unequal_volumes(self,
mock_client):
instances = self.instances
instances[0]['volume_size'] = 2
flavors = Mock()
mock_client.return_value.flavors = flavors
self.assertRaises(exception.ClusterVolumeSizesNotEqual,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances,
None
)
@patch.object(remote, 'create_nova_client')
def test_create_storage_not_specified(self,
mock_client):
class FakeFlavor:
def __init__(self, flavor_id):
self.flavor_id = flavor_id
@property
def id(self):
return self.flavor.id
@property
def ephemeral(self):
return 0
instances = [{'flavor_id': '1234'},
{'flavor_id': '1234'},
{'flavor_id': '1234'}]
CONF.get(self.dv.manager).volume_support = False
(mock_client.return_value.
flavors.get.return_value) = FakeFlavor('1234')
self.assertRaises(exception.LocalStorageNotSpecified,
Cluster.create,
Mock(),
self.cluster_name,
self.datastore,
self.datastore_version,
instances,
None
)
@patch('trove.cluster.models.LOG')
def test_delete_bad_task_status(self, mock_logging):
self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL
self.assertRaises(exception.UnprocessableEntity,
self.cluster.delete)
@patch.object(task_api.API, 'delete_cluster')
@patch.object(Cluster, 'update_db')
@patch.object(inst_models.DBInstance, 'find_all')
def test_delete_task_status_none(self,
mock_find_all,
mock_update_db,
mock_delete_cluster):
self.cluster.db_info.task_status = ClusterTasks.NONE
self.cluster.delete()
mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING)
@patch.object(task_api.API, 'delete_cluster')
@patch.object(Cluster, 'update_db')
@patch.object(inst_models.DBInstance, 'find_all')
def test_delete_task_status_deleting(self,
mock_find_all,
mock_update_db,
mock_delete_cluster):
self.cluster.db_info.task_status = ClusterTasks.DELETING
self.cluster.delete()
mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING)
@patch('trove.common.strategies.cluster.experimental.mongodb.api.LOG')
def test_add_shard_bad_task_status(self, mock_logging):
task_status = ClusterTasks.BUILDING_INITIAL
self.cluster.db_info.task_status = task_status
self.assertRaises(exception.UnprocessableEntity,
self.cluster.add_shard)
@patch.object(utils, 'generate_uuid', Mock(return_value='new-shard-id'))
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch.object(task_api, 'load')
@patch.object(Cluster, 'update_db')
@patch.object(inst_models.Instance, 'create')
@patch.object(QUOTAS, 'check_quotas')
@patch.object(inst_models, 'load_any_instance')
@patch.object(inst_models.DBInstance, 'find_all')
def test_add_shard(self,
mock_find_all,
mock_load_any_instance,
mock_check_quotas,
mock_instance_create,
mock_update_db,
mock_task_api_load,
mock_load_by_uuid):
self.cluster.db_info.task_status = ClusterTasks.NONE
(mock_find_all.return_value
.all.return_value) = [DBInstance(InstanceTasks.NONE,
name="TestInstance1",
shard_id="1", id='1',
datastore_version_id='1'),
DBInstance(InstanceTasks.NONE,
name="TestInstance2",
shard_id="1", id='2',
datastore_version_id='1'),
DBInstance(InstanceTasks.NONE,
name="TestInstance3",
shard_id="1", id='3',
datastore_version_id='1')]
mock_datastore_version = Mock()
mock_datastore_version.manager = 'mongodb'
mock_load_by_uuid.return_value = mock_datastore_version
mock_task_api = Mock()
mock_task_api.mongodb_add_shard_cluster.return_value = None
mock_task_api_load.return_value = mock_task_api
self.cluster.add_shard()
mock_update_db.assert_called_with(
task_status=ClusterTasks.ADDING_SHARD)
mock_task_api.mongodb_add_shard_cluster.assert_called_with(
self.cluster.id, 'new-shard-id', 'rs2')
|
redhat-openstack/trove
|
trove/tests/unittests/cluster/test_cluster.py
|
Python
|
apache-2.0
| 9,650
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import uuid
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:admin:instances:index')
class InstanceViewTest(test.BaseAdminViewTests):
@test.create_stubs({
api.nova: ('flavor_list', 'server_list', 'extension_supported',),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',),
api.glance: ('image_list_detailed',),
})
def test_index(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
images = self.images.list()
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
search_opts = {'marker': None, 'paginate': True}
api.glance.image_list_detailed(IsA(http.HttpRequest))\
.AndReturn(images)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({
api.nova: ('flavor_list', 'flavor_get', 'server_list',
'extension_supported',),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',),
api.glance: ('image_list_detailed',),
})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
tenants = self.tenants.list()
flavors = self.flavors.list()
full_flavors = OrderedDict([(f.id, f) for f in flavors])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndRaise(self.exceptions.nova)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({
api.nova: ('flavor_list', 'flavor_get', 'server_list',
'extension_supported',),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',),
api.glance: ('image_list_detailed',),
})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
images = self.images.list()
tenants = self.tenants.list()
# UUIDs generated using indexes are unlikely to match
# any of existing flavor ids and are guaranteed to be deterministic.
for i, server in enumerate(servers):
server.flavor['id'] = str(uuid.UUID(int=i))
api.glance.image_list_detailed(IsA(http.HttpRequest))\
.AndReturn(images)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
instances = res.context['table'].data
self.assertTemplateUsed(res, 'admin/instances/index.html')
# Since error messages produced for each instance are identical,
# there will be only one error message for all instances
# (messages de-duplication).
self.assertMessageCount(res, error=1)
self.assertItemsEqual(instances, servers)
@test.create_stubs({
api.nova: ('server_list', 'flavor_list',),
api.keystone: ('tenant_list',),
api.glance: ('image_list_detailed',),
})
def test_index_server_list_exception(self):
tenants = self.tenants.list()
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndRaise(self.exceptions.nova)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
@test.create_stubs({api.nova: ('server_get', 'flavor_get',
'extension_supported', ),
api.network: ('servers_update_addresses',),
api.keystone: ('tenant_get',)})
def test_ajax_loading_instances(self):
server = self.servers.first()
flavor = self.flavors.list()[0]
tenant = self.tenants.list()[0]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_get(IsA(http.HttpRequest),
server.flavor['id']).AndReturn(flavor)
api.keystone.tenant_get(IsA(http.HttpRequest),
server.tenant_id,
admin=True).AndReturn(tenant)
self.mox.ReplayAll()
url = (INDEX_URL +
"?action=row_update&table=instances&obj_id=" + server.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(res, "horizon/common/_data_table_row.html")
self.assertContains(res, "test_tenant", 1, 200)
self.assertContains(res, "instance-host", 1, 200)
# two instances of name, other name comes from row data-display
self.assertContains(res, "server_1", 2, 200)
self.assertContains(res, "10.0.0.1", 1, 200)
self.assertContains(res, "RAM</th><td>512MB", 1, 200)
self.assertContains(res, "VCPUs</th><td>1", 1, 200)
self.assertContains(res, "Size</th><td>0 GB", 1, 200)
self.assertContains(res, "Active", 1, 200)
self.assertContains(res, "Running", 1, 200)
@test.create_stubs({
api.nova: ('flavor_list', 'server_list', 'extension_supported', ),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',),
api.glance: ('image_list_detailed',),
})
def test_index_options_before_migrate(self):
servers = self.servers.list()
images = self.images.list()
flavors = self.flavors.list()
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([self.tenants.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest)) \
.AndReturn(images)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__migrate")
self.assertNotContains(res, "instances__confirm")
self.assertNotContains(res, "instances__revert")
@test.create_stubs({
api.nova: ('flavor_list', 'server_list', 'extension_supported',),
api.keystone: ('tenant_list',),
api.network: ('servers_update_addresses',),
api.glance: ('image_list_detailed',),
})
def test_index_options_after_migrate(self):
servers = self.servers.list()
server1 = servers[0]
server1.status = "VERIFY_RESIZE"
server2 = servers[2]
server2.status = "VERIFY_RESIZE"
images = self.images.list()
flavors = self.flavors.list()
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
api.glance.image_list_detailed(IsA(http.HttpRequest)) \
.AndReturn(images)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
search_opts = {'marker': None, 'paginate': True}
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers,
all_tenants=True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__confirm")
self.assertContains(res, "instances__revert")
self.assertNotContains(res, "instances__migrate")
@test.create_stubs({api.nova: ('host_list',
'server_get',)})
def test_instance_live_migrate_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/instances/live_migrate.html')
@test.create_stubs({api.nova: ('server_get',)})
def test_instance_live_migrate_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('host_list',
'server_get',)})
def test_instance_live_migrate_list_hypervisor_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('host_list',
'server_get',)})
def test_instance_live_migrate_list_hypervisor_without_current(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.get(url)
self.assertNotContains(
res, "<option value=\"instance-host\">devstack004</option>")
self.assertContains(
res, "<option value=\"devstack001\">devstack001</option>")
self.assertNotContains(
res, "<option value=\"devstack002\">devstack002</option>")
self.assertContains(
res, "<option value=\"devstack003\">devstack003</option>")
@test.create_stubs({api.nova: ('host_list',
'server_get',
'server_live_migrate',)})
def test_instance_live_migrate_post(self):
server = self.servers.first()
host = self.hosts.first().host_name
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.server_live_migrate(IsA(http.HttpRequest), server.id, host,
block_migration=False,
disk_over_commit=False) \
.AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.post(url, {'host': host, 'instance_id': server.id})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('host_list',
'server_get',
'server_live_migrate',)})
def test_instance_live_migrate_auto_sched(self):
server = self.servers.first()
host = ""
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.server_live_migrate(IsA(http.HttpRequest), server.id, None,
block_migration=False,
disk_over_commit=False) \
.AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.post(url, {'host': host, 'instance_id': server.id})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('host_list',
'server_get',
'server_live_migrate',)})
def test_instance_live_migrate_post_api_exception(self):
server = self.servers.first()
host = self.hosts.first().host_name
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.host_list(IsA(http.HttpRequest)) \
.AndReturn(self.hosts.list())
api.nova.server_live_migrate(IsA(http.HttpRequest), server.id, host,
block_migration=False,
disk_over_commit=False) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:live_migrate',
args=[server.id])
res = self.client.post(url, {'host': host, 'instance_id': server.id})
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_get',)})
def test_instance_details_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(FILTER_DATA_FIRST={'admin.instances': True})
def test_index_with_admin_filter_first(self):
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, [])
|
sandvine/horizon
|
openstack_dashboard/dashboards/admin/instances/tests.py
|
Python
|
apache-2.0
| 18,794
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
# ----------------------------------------------------------------------------
# Generate trace GAPIC layer
# ----------------------------------------------------------------------------
for version in ["v1", "v2"]:
library = gapic.py_library(
"trace",
version,
config_path=f"/google/devtools/cloudtrace" f"/artman_cloudtrace_{version}.yaml",
artman_output_name=f"trace-{version}",
include_protos=True,
)
s.move(library / f"google/cloud/trace_{version}")
s.move(library / f"tests/unit/gapic/{version}")
# Fix up imports
s.replace(
"google/**/*.py",
f"from google.devtools.cloudtrace_{version}.proto import ",
f"from google.cloud.trace_{version}.proto import ",
)
# Copy docs configuration
s.move(library / f"docs/conf.py")
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(unit_cov_level=97, cov_level=100)
s.move(templated_files, excludes=["noxfile.py"])
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
tseaver/google-cloud-python
|
trace/synth.py
|
Python
|
apache-2.0
| 1,937
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from forms_builder.forms import fields, settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Field',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=200, verbose_name='Label')),
('slug', models.SlugField(default='', max_length=100, verbose_name='Slug', blank=True)),
('field_type', models.IntegerField(verbose_name='Type', choices=fields.NAMES)),
('required', models.BooleanField(default=True, verbose_name='Required')),
('visible', models.BooleanField(default=True, verbose_name='Visible')),
('choices', models.CharField(help_text='Comma separated options where applicable. If an option itself contains commas, surround the option starting with the `character and ending with the ` character.', max_length=1000, verbose_name='Choices', blank=True)),
('default', models.CharField(max_length=2000, verbose_name='Default value', blank=True)),
('placeholder_text', models.CharField(max_length=100, null=True, verbose_name='Placeholder Text', blank=True)),
('help_text', models.CharField(max_length=100, verbose_name='Help text', blank=True)),
('order', models.IntegerField(null=True, verbose_name='Order', blank=True)),
],
options={
'ordering': ('order',),
'abstract': False,
'verbose_name': 'Field',
'verbose_name_plural': 'Fields',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FieldEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field_id', models.IntegerField()),
('value', models.CharField(max_length=2000, null=True)),
],
options={
'abstract': False,
'verbose_name': 'Form field entry',
'verbose_name_plural': 'Form field entries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Form',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50, verbose_name='Title')),
('slug', models.SlugField(verbose_name='Slug', unique=True, max_length=100, editable=False)),
('intro', models.TextField(verbose_name='Intro', blank=True)),
('button_text', models.CharField(default='Submit', max_length=50, verbose_name='Button text')),
('response', models.TextField(verbose_name='Response', blank=True)),
('redirect_url', models.CharField(help_text='An alternate URL to redirect to after form submission', max_length=200, null=True, verbose_name='Redirect url', blank=True)),
('status', models.IntegerField(default=2, verbose_name='Status', choices=[(1, 'Draft'), (2, 'Published')])),
('publish_date', models.DateTimeField(help_text="With published selected, won't be shown until this time", null=True, verbose_name='Published from', blank=True)),
('expiry_date', models.DateTimeField(help_text="With published selected, won't be shown after this time", null=True, verbose_name='Expires on', blank=True)),
('login_required', models.BooleanField(default=False, help_text='If checked, only logged in users can view the form', verbose_name='Login required')),
('send_email', models.BooleanField(default=True, help_text='If checked, the person entering the form will be sent an email', verbose_name='Send email')),
('email_from', models.EmailField(help_text='The address the email will be sent from', max_length=75, verbose_name='From address', blank=True)),
('email_copies', models.CharField(help_text='One or more email addresses, separated by commas', max_length=200, verbose_name='Send copies to', blank=True)),
('email_subject', models.CharField(max_length=200, verbose_name='Subject', blank=True)),
('email_message', models.TextField(verbose_name='Message', blank=True)),
('sites', models.ManyToManyField(default=[settings.SITE_ID], related_name='forms_form_forms', to='sites.Site')),
],
options={
'abstract': False,
'verbose_name': 'Form',
'verbose_name_plural': 'Forms',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entry_time', models.DateTimeField(verbose_name='Date/time')),
('form', models.ForeignKey(related_name='entries', to='forms.Form', on_delete=models.CASCADE)),
],
options={
'abstract': False,
'verbose_name': 'Form entry',
'verbose_name_plural': 'Form entries',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='fieldentry',
name='entry',
field=models.ForeignKey(related_name='fields', to='forms.FormEntry', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='field',
name='form',
field=models.ForeignKey(related_name='fields', to='forms.Form', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
nimbis/django-forms-builder
|
forms_builder/forms/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 6,125
|
from unittest.mock import ANY, Mock, patch
from streamlink_cli.output import PlayerOutput
from tests import posix_only, windows_only
UNICODE_TITLE = "기타치는소율 with UL섬 "
@posix_only
@patch("streamlink_cli.output.sleep", Mock())
@patch("subprocess.Popen")
def test_output_mpv_unicode_title_posix(popen):
po = PlayerOutput("mpv", title=UNICODE_TITLE)
popen().poll.side_effect = lambda: None
po.open()
popen.assert_called_with(["mpv", f"--title={UNICODE_TITLE}", "-"],
bufsize=ANY, stderr=ANY, stdout=ANY, stdin=ANY)
@posix_only
@patch("streamlink_cli.output.sleep", Mock())
@patch("subprocess.Popen")
def test_output_vlc_unicode_title_posix(popen):
po = PlayerOutput("vlc", title=UNICODE_TITLE)
popen().poll.side_effect = lambda: None
po.open()
popen.assert_called_with(["vlc", "--input-title-format", UNICODE_TITLE, "-"],
bufsize=ANY, stderr=ANY, stdout=ANY, stdin=ANY)
@windows_only
@patch("streamlink_cli.output.sleep", Mock())
@patch("subprocess.Popen")
def test_output_mpv_unicode_title_windows_py3(popen):
po = PlayerOutput("mpv.exe", title=UNICODE_TITLE)
popen().poll.side_effect = lambda: None
po.open()
popen.assert_called_with(f"mpv.exe \"--title={UNICODE_TITLE}\" -",
bufsize=ANY, stderr=ANY, stdout=ANY, stdin=ANY)
@windows_only
@patch("streamlink_cli.output.sleep", Mock())
@patch("subprocess.Popen")
def test_output_vlc_unicode_title_windows_py3(popen):
po = PlayerOutput("vlc.exe", title=UNICODE_TITLE)
popen().poll.side_effect = lambda: None
po.open()
popen.assert_called_with(f"vlc.exe --input-title-format \"{UNICODE_TITLE}\" -",
bufsize=ANY, stderr=ANY, stdout=ANY, stdin=ANY)
@posix_only
def test_output_args_posix():
po_none = PlayerOutput("foo")
assert po_none._create_arguments() == ["foo", "-"]
po_implicit = PlayerOutput("foo", args="--bar")
assert po_implicit._create_arguments() == ["foo", "--bar", "-"]
po_explicit = PlayerOutput("foo", args="--bar {playerinput}")
assert po_explicit._create_arguments() == ["foo", "--bar", "-"]
po_fallback = PlayerOutput("foo", args="--bar {filename}")
assert po_fallback._create_arguments() == ["foo", "--bar", "-"]
@windows_only
def test_output_args_windows():
po_none = PlayerOutput("foo")
assert po_none._create_arguments() == "foo -"
po_implicit = PlayerOutput("foo", args="--bar")
assert po_implicit._create_arguments() == "foo --bar -"
po_explicit = PlayerOutput("foo", args="--bar {playerinput}")
assert po_explicit._create_arguments() == "foo --bar -"
po_fallback = PlayerOutput("foo", args="--bar {filename}")
assert po_fallback._create_arguments() == "foo --bar -"
|
beardypig/streamlink
|
tests/test_cli_playerout.py
|
Python
|
bsd-2-clause
| 2,813
|
from __future__ import unicode_literals
try:
import configparser
except ImportError:
import ConfigParser as configparser
text = type('')
bool_or_auto = 'bool_or_auto'
class ParseError(Exception):
pass
def parse_ini(fileobj, field_specs, section_name):
parser = configparser.RawConfigParser()
readfile = getattr(parser, 'read_file', getattr(parser, 'readfp', None))
readfile(fileobj)
if not parser.has_section(section_name):
return None
result = {}
for (key, value) in parser.items(section_name):
spec = field_specs.get(key, text)
result[key] = _parse_value(parser, section_name, key, value, spec)
return result
def _parse_value(parser, section_name, key, value, spec):
if spec in (bool, bool_or_auto):
if spec == bool_or_auto and value == 'auto':
return 'auto'
try:
return parser.getboolean(section_name, key)
except ValueError:
raise ParseError(
'Unknown bool value for option "{}": {!r}'.format(key, value))
elif spec == text:
return value
elif isinstance(spec, list):
if spec == [text]:
return [x for x in value.splitlines() if x]
elif isinstance(spec, dict):
if spec == {text: text}:
return dict(x.split(' = ', 1) for x in value.splitlines() if x)
raise NotImplementedError("Type spec not implemented: {!r}".format(spec))
|
suutari/prequ
|
prequ/ini_parser.py
|
Python
|
bsd-2-clause
| 1,443
|
# Use this file to convert the metrorail_sample_data.json data set
# to Excel format.
#
# python create_xls_file.py
#
# will create a file named Metro Rail Data.xlsx.
#
# metrorail.py will produce the same result regardless of whether
# it is run on metrorail_sample_data.json or Metro Rail Data.xlsx.
# This result is largely consistent with the heat map result from
# https://orbythebeach.wordpress.com/2018/03/01/buying-metrorail-tickets-in-miami/
# with the exception that we find only two infeasible sub-models.
from metrorail import input_schema
dat = input_schema.json.create_tic_dat("metrorail_sample_data.json")
input_schema.xls.write_file(dat, "Metro Rail Data.xlsx", allow_overwrite=True,
case_space_sheet_names=True)
|
opalytics/opalytics-ticdat
|
examples/gurobipy/metrorail/create_xls_file.py
|
Python
|
bsd-2-clause
| 760
|
#!/bin/env python
import os, sys
run = sys.argv[1]
hummingbird_dir = "/reg/neh/home/hantke/programs/hummingbird"
this_dir = os.path.dirname(os.path.realpath(__file__))
os.system("source /reg/neh/home/hantke/.bashrc; export HUMMINGBIRD_RUN=%s; cd %s; ./hummingbird.py -b %s/conf.py" % (run, hummingbird_dir, this_dir))
|
SPIhub/hummingbird
|
examples/advanced/save_camera/brun.py
|
Python
|
bsd-2-clause
| 321
|
#!/usr/bin/python3
#
# Copyright (C) 2006, 2007, 2010 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the locking module"""
import os
import unittest
import time
import queue
import threading
import random
import gc
import itertools
from ganeti import constants
from ganeti import locking
from ganeti import errors
from ganeti import utils
from ganeti import compat
from ganeti import objects
from ganeti import query
import testutils
# This is used to test the ssynchronize decorator.
# Since it's passed as input to a decorator it must be declared as a global.
_decoratorlock = locking.SharedLock("decorator lock")
#: List for looping tests
ITERATIONS = range(8)
def _Repeat(fn):
"""Decorator for executing a function many times"""
def wrapper(*args, **kwargs):
for i in ITERATIONS:
fn(*args, **kwargs)
return wrapper
def SafeSleep(duration):
start = time.time()
while True:
delay = start + duration - time.time()
if delay <= 0.0:
break
time.sleep(delay)
class _ThreadedTestCase(unittest.TestCase):
"""Test class that supports adding/waiting on threads"""
def setUp(self):
unittest.TestCase.setUp(self)
self.done = queue.Queue(0)
self.threads = []
def _addThread(self, *args, **kwargs):
"""Create and remember a new thread"""
t = threading.Thread(*args, **kwargs)
self.threads.append(t)
t.start()
return t
def _waitThreads(self):
"""Wait for all our threads to finish"""
for t in self.threads:
t.join(60)
self.assertFalse(t.is_alive())
self.threads = []
class _ConditionTestCase(_ThreadedTestCase):
"""Common test case for conditions"""
def setUp(self, cls):
_ThreadedTestCase.setUp(self)
self.lock = threading.Lock()
self.cond = cls(self.lock)
def _testAcquireRelease(self):
self.assertFalse(self.cond._is_owned())
self.assertRaises(RuntimeError, self.cond.wait, None)
self.assertRaises(RuntimeError, self.cond.notifyAll)
self.cond.acquire()
self.assertTrue(self.cond._is_owned())
self.cond.notifyAll()
self.assertTrue(self.cond._is_owned())
self.cond.release()
self.assertFalse(self.cond._is_owned())
self.assertRaises(RuntimeError, self.cond.wait, None)
self.assertRaises(RuntimeError, self.cond.notifyAll)
def _testNotification(self):
def _NotifyAll():
self.done.put("NE")
self.cond.acquire()
self.done.put("NA")
self.cond.notifyAll()
self.done.put("NN")
self.cond.release()
self.cond.acquire()
self._addThread(target=_NotifyAll)
self.assertEqual(self.done.get(True, 1), "NE")
self.assertRaises(queue.Empty, self.done.get_nowait)
self.cond.wait(None)
self.assertEqual(self.done.get(True, 1), "NA")
self.assertEqual(self.done.get(True, 1), "NN")
self.assertTrue(self.cond._is_owned())
self.cond.release()
self.assertFalse(self.cond._is_owned())
class TestSingleNotifyPipeCondition(_ConditionTestCase):
"""SingleNotifyPipeCondition tests"""
def setUp(self):
_ConditionTestCase.setUp(self, locking.SingleNotifyPipeCondition)
def testAcquireRelease(self):
self._testAcquireRelease()
def testNotification(self):
self._testNotification()
def testWaitReuse(self):
self.cond.acquire()
self.cond.wait(0)
self.cond.wait(0.1)
self.cond.release()
def testNoNotifyReuse(self):
self.cond.acquire()
self.cond.notifyAll()
self.assertRaises(RuntimeError, self.cond.wait, None)
self.assertRaises(RuntimeError, self.cond.notifyAll)
self.cond.release()
class TestPipeCondition(_ConditionTestCase):
"""PipeCondition tests"""
def setUp(self):
_ConditionTestCase.setUp(self, locking.PipeCondition)
def testAcquireRelease(self):
self._testAcquireRelease()
def testNotification(self):
self._testNotification()
def _TestWait(self, fn):
threads = [
self._addThread(target=fn),
self._addThread(target=fn),
self._addThread(target=fn),
]
# Wait for threads to be waiting
for _ in threads:
self.assertEqual(self.done.get(True, 1), "A")
self.assertRaises(queue.Empty, self.done.get_nowait)
self.cond.acquire()
self.assertEqual(len(self.cond._waiters), 3)
self.assertEqual(self.cond._waiters, set(threads))
self.assertTrue(repr(self.cond).startswith("<"))
self.assertTrue("waiters=" in repr(self.cond))
# This new thread can't acquire the lock, and thus call wait, before we
# release it
self._addThread(target=fn)
self.cond.notifyAll()
self.assertRaises(queue.Empty, self.done.get_nowait)
self.cond.release()
# We should now get 3 W and 1 A (for the new thread) in whatever order
w = 0
a = 0
for i in range(4):
got = self.done.get(True, 1)
if got == "W":
w += 1
elif got == "A":
a += 1
else:
self.fail("Got %s on the done queue" % got)
self.assertEqual(w, 3)
self.assertEqual(a, 1)
self.cond.acquire()
self.cond.notifyAll()
self.cond.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "W")
self.assertRaises(queue.Empty, self.done.get_nowait)
def testBlockingWait(self):
def _BlockingWait():
self.cond.acquire()
self.done.put("A")
self.cond.wait(None)
self.cond.release()
self.done.put("W")
self._TestWait(_BlockingWait)
def testLongTimeoutWait(self):
def _Helper():
self.cond.acquire()
self.done.put("A")
self.cond.wait(15.0)
self.cond.release()
self.done.put("W")
self._TestWait(_Helper)
def _TimeoutWait(self, timeout, check):
self.cond.acquire()
self.cond.wait(timeout)
self.cond.release()
self.done.put(check)
def testShortTimeoutWait(self):
self._addThread(target=self._TimeoutWait, args=(0.1, "T1"))
self._addThread(target=self._TimeoutWait, args=(0.1, "T1"))
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "T1")
self.assertEqual(self.done.get_nowait(), "T1")
self.assertRaises(queue.Empty, self.done.get_nowait)
def testZeroTimeoutWait(self):
self._addThread(target=self._TimeoutWait, args=(0, "T0"))
self._addThread(target=self._TimeoutWait, args=(0, "T0"))
self._addThread(target=self._TimeoutWait, args=(0, "T0"))
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "T0")
self.assertEqual(self.done.get_nowait(), "T0")
self.assertEqual(self.done.get_nowait(), "T0")
self.assertRaises(queue.Empty, self.done.get_nowait)
class TestSharedLock(_ThreadedTestCase):
"""SharedLock tests"""
def setUp(self):
_ThreadedTestCase.setUp(self)
self.sl = locking.SharedLock("TestSharedLock")
self.assertTrue(repr(self.sl).startswith("<"))
self.assertTrue("name=TestSharedLock" in repr(self.sl))
def testSequenceAndOwnership(self):
self.assertFalse(self.sl.is_owned())
self.sl.acquire(shared=1)
self.assertTrue(self.sl.is_owned())
self.assertTrue(self.sl.is_owned(shared=1))
self.assertFalse(self.sl.is_owned(shared=0))
self.sl.release()
self.assertFalse(self.sl.is_owned())
self.sl.acquire()
self.assertTrue(self.sl.is_owned())
self.assertFalse(self.sl.is_owned(shared=1))
self.assertTrue(self.sl.is_owned(shared=0))
self.sl.release()
self.assertFalse(self.sl.is_owned())
self.sl.acquire(shared=1)
self.assertTrue(self.sl.is_owned())
self.assertTrue(self.sl.is_owned(shared=1))
self.assertFalse(self.sl.is_owned(shared=0))
self.sl.release()
self.assertFalse(self.sl.is_owned())
def testBooleanValue(self):
# semaphores are supposed to return a true value on a successful acquire
self.assertTrue(self.sl.acquire(shared=1))
self.sl.release()
self.assertTrue(self.sl.acquire())
self.sl.release()
def testDoubleLockingStoE(self):
self.sl.acquire(shared=1)
self.assertRaises(AssertionError, self.sl.acquire)
def testDoubleLockingEtoS(self):
self.sl.acquire()
self.assertRaises(AssertionError, self.sl.acquire, shared=1)
def testDoubleLockingStoS(self):
self.sl.acquire(shared=1)
self.assertRaises(AssertionError, self.sl.acquire, shared=1)
def testDoubleLockingEtoE(self):
self.sl.acquire()
self.assertRaises(AssertionError, self.sl.acquire)
# helper functions: called in a separate thread they acquire the lock, send
# their identifier on the done queue, then release it.
def _doItSharer(self):
try:
self.sl.acquire(shared=1)
self.done.put("SHR")
self.sl.release()
except errors.LockError:
self.done.put("ERR")
def _doItExclusive(self):
try:
self.sl.acquire()
self.done.put("EXC")
self.sl.release()
except errors.LockError:
self.done.put("ERR")
def _doItDelete(self):
try:
self.sl.delete()
self.done.put("DEL")
except errors.LockError:
self.done.put("ERR")
def testSharersCanCoexist(self):
self.sl.acquire(shared=1)
threading.Thread(target=self._doItSharer).start()
self.assertTrue(self.done.get(True, 1))
self.sl.release()
@_Repeat
def testExclusiveBlocksExclusive(self):
self.sl.acquire()
self._addThread(target=self._doItExclusive)
self.assertRaises(queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "EXC")
@_Repeat
def testExclusiveBlocksDelete(self):
self.sl.acquire()
self._addThread(target=self._doItDelete)
self.assertRaises(queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "DEL")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testExclusiveBlocksSharer(self):
self.sl.acquire()
self._addThread(target=self._doItSharer)
self.assertRaises(queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "SHR")
@_Repeat
def testSharerBlocksExclusive(self):
self.sl.acquire(shared=1)
self._addThread(target=self._doItExclusive)
self.assertRaises(queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "EXC")
@_Repeat
def testSharerBlocksDelete(self):
self.sl.acquire(shared=1)
self._addThread(target=self._doItDelete)
self.assertRaises(queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "DEL")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testWaitingExclusiveBlocksSharer(self):
"""SKIPPED testWaitingExclusiveBlockSharer"""
return
self.sl.acquire(shared=1)
# the lock is acquired in shared mode...
self._addThread(target=self._doItExclusive)
# ...but now an exclusive is waiting...
self._addThread(target=self._doItSharer)
# ...so the sharer should be blocked as well
self.assertRaises(queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
# The exclusive passed before
self.assertEqual(self.done.get_nowait(), "EXC")
self.assertEqual(self.done.get_nowait(), "SHR")
@_Repeat
def testWaitingSharerBlocksExclusive(self):
"""SKIPPED testWaitingSharerBlocksExclusive"""
return
self.sl.acquire()
# the lock is acquired in exclusive mode...
self._addThread(target=self._doItSharer)
# ...but now a sharer is waiting...
self._addThread(target=self._doItExclusive)
# ...the exclusive is waiting too...
self.assertRaises(queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
# The sharer passed before
self.assertEqual(self.done.get_nowait(), "SHR")
self.assertEqual(self.done.get_nowait(), "EXC")
def testDelete(self):
self.sl.delete()
self.assertRaises(errors.LockError, self.sl.acquire)
self.assertRaises(errors.LockError, self.sl.acquire, shared=1)
self.assertRaises(errors.LockError, self.sl.delete)
def testDeleteTimeout(self):
self.assertTrue(self.sl.delete(timeout=60))
def testDeleteTimeoutFail(self):
ready = threading.Event()
finish = threading.Event()
def fn():
self.sl.acquire(shared=0)
ready.set()
finish.wait()
self.sl.release()
self._addThread(target=fn)
ready.wait()
# Test if deleting a lock owned in exclusive mode by another thread fails
# to delete when a timeout is used
self.assertFalse(self.sl.delete(timeout=0.02))
finish.set()
self._waitThreads()
self.assertTrue(self.sl.delete())
self.assertRaises(errors.LockError, self.sl.acquire)
def testNoDeleteIfSharer(self):
self.sl.acquire(shared=1)
self.assertRaises(AssertionError, self.sl.delete)
@_Repeat
def testDeletePendingSharersExclusiveDelete(self):
self.sl.acquire()
self._addThread(target=self._doItSharer)
self._addThread(target=self._doItSharer)
self._addThread(target=self._doItExclusive)
self._addThread(target=self._doItDelete)
self.sl.delete()
self._waitThreads()
# The threads who were pending return ERR
for _ in range(4):
self.assertEqual(self.done.get_nowait(), "ERR")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testDeletePendingDeleteExclusiveSharers(self):
self.sl.acquire()
self._addThread(target=self._doItDelete)
self._addThread(target=self._doItExclusive)
self._addThread(target=self._doItSharer)
self._addThread(target=self._doItSharer)
self.sl.delete()
self._waitThreads()
# The two threads who were pending return both ERR
self.assertEqual(self.done.get_nowait(), "ERR")
self.assertEqual(self.done.get_nowait(), "ERR")
self.assertEqual(self.done.get_nowait(), "ERR")
self.assertEqual(self.done.get_nowait(), "ERR")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testExclusiveAcquireTimeout(self):
for shared in [0, 1]:
on_queue = threading.Event()
release_exclusive = threading.Event()
def _LockExclusive():
self.sl.acquire(shared=0, test_notify=on_queue.set)
self.done.put("A: start wait")
release_exclusive.wait()
self.done.put("A: end wait")
self.sl.release()
# Start thread to hold lock in exclusive mode
self._addThread(target=_LockExclusive)
# Wait for wait to begin
self.assertEqual(self.done.get(timeout=60), "A: start wait")
# Wait up to 60s to get lock, but release exclusive lock as soon as we're
# on the queue
self.assertTrue(self.sl.acquire(shared=shared, timeout=60,
test_notify=release_exclusive.set))
self.done.put("got 2nd")
self.sl.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "A: end wait")
self.assertEqual(self.done.get_nowait(), "got 2nd")
self.assertRaises(queue.Empty, self.done.get_nowait)
@_Repeat
def testAcquireExpiringTimeout(self):
def _AcquireWithTimeout(shared, timeout):
if not self.sl.acquire(shared=shared, timeout=timeout):
self.done.put("timeout")
for shared in [0, 1]:
# Lock exclusively
self.sl.acquire()
# Start shared acquires with timeout between 0 and 20 ms
for i in range(11):
self._addThread(target=_AcquireWithTimeout,
args=(shared, i * 2.0 / 1000.0))
# Wait for threads to finish (makes sure the acquire timeout expires
# before releasing the lock)
self._waitThreads()
# Release lock
self.sl.release()
for _ in range(11):
self.assertEqual(self.done.get_nowait(), "timeout")
self.assertRaises(queue.Empty, self.done.get_nowait)
@_Repeat
def testSharedSkipExclusiveAcquires(self):
# Tests whether shared acquires jump in front of exclusive acquires in the
# queue.
def _Acquire(shared, name, notify_ev, wait_ev):
if notify_ev:
notify_fn = notify_ev.set
else:
notify_fn = None
if wait_ev:
wait_ev.wait()
if not self.sl.acquire(shared=shared, test_notify=notify_fn):
return
self.done.put(name)
self.sl.release()
# Get exclusive lock while we fill the queue
self.sl.acquire()
shrcnt1 = 5
shrcnt2 = 7
shrcnt3 = 9
shrcnt4 = 2
# Add acquires using threading.Event for synchronization. They'll be
# acquired exactly in the order defined in this list.
acquires = (shrcnt1 * [(1, "shared 1")] +
3 * [(0, "exclusive 1")] +
shrcnt2 * [(1, "shared 2")] +
shrcnt3 * [(1, "shared 3")] +
shrcnt4 * [(1, "shared 4")] +
3 * [(0, "exclusive 2")])
ev_cur = None
ev_prev = None
for args in acquires:
ev_cur = threading.Event()
self._addThread(target=_Acquire, args=args + (ev_cur, ev_prev))
ev_prev = ev_cur
# Wait for last acquire to start
ev_prev.wait()
# Expect 6 pending exclusive acquires and 1 for all shared acquires
# together
self.assertEqual(self.sl._count_pending(), 7)
# Release exclusive lock and wait
self.sl.release()
self._waitThreads()
# Check sequence
for _ in range(shrcnt1 + shrcnt2 + shrcnt3 + shrcnt4):
# Shared locks aren't guaranteed to be notified in order, but they'll be
# first
tmp = self.done.get_nowait()
if tmp == "shared 1":
shrcnt1 -= 1
elif tmp == "shared 2":
shrcnt2 -= 1
elif tmp == "shared 3":
shrcnt3 -= 1
elif tmp == "shared 4":
shrcnt4 -= 1
self.assertEqual(shrcnt1, 0)
self.assertEqual(shrcnt2, 0)
self.assertEqual(shrcnt3, 0)
self.assertEqual(shrcnt3, 0)
for _ in range(3):
self.assertEqual(self.done.get_nowait(), "exclusive 1")
for _ in range(3):
self.assertEqual(self.done.get_nowait(), "exclusive 2")
self.assertRaises(queue.Empty, self.done.get_nowait)
def testIllegalDowngrade(self):
# Not yet acquired
self.assertRaises(AssertionError, self.sl.downgrade)
# Acquire in shared mode, downgrade should be no-op
self.assertTrue(self.sl.acquire(shared=1))
self.assertTrue(self.sl.is_owned(shared=1))
self.assertTrue(self.sl.downgrade())
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
def testDowngrade(self):
self.assertTrue(self.sl.acquire())
self.assertTrue(self.sl.is_owned(shared=0))
self.assertTrue(self.sl.downgrade())
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
@_Repeat
def testDowngradeJumpsAheadOfExclusive(self):
def _KeepExclusive(ev_got, ev_downgrade, ev_release):
self.assertTrue(self.sl.acquire())
self.assertTrue(self.sl.is_owned(shared=0))
ev_got.set()
ev_downgrade.wait()
self.assertTrue(self.sl.is_owned(shared=0))
self.assertTrue(self.sl.downgrade())
self.assertTrue(self.sl.is_owned(shared=1))
ev_release.wait()
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
def _KeepExclusive2(ev_started, ev_release):
self.assertTrue(self.sl.acquire(test_notify=ev_started.set))
self.assertTrue(self.sl.is_owned(shared=0))
ev_release.wait()
self.assertTrue(self.sl.is_owned(shared=0))
self.sl.release()
def _KeepShared(ev_started, ev_got, ev_release):
self.assertTrue(self.sl.acquire(shared=1, test_notify=ev_started.set))
self.assertTrue(self.sl.is_owned(shared=1))
ev_got.set()
ev_release.wait()
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
# Acquire lock in exclusive mode
ev_got_excl1 = threading.Event()
ev_downgrade_excl1 = threading.Event()
ev_release_excl1 = threading.Event()
th_excl1 = self._addThread(target=_KeepExclusive,
args=(ev_got_excl1, ev_downgrade_excl1,
ev_release_excl1))
ev_got_excl1.wait()
# Start a second exclusive acquire
ev_started_excl2 = threading.Event()
ev_release_excl2 = threading.Event()
th_excl2 = self._addThread(target=_KeepExclusive2,
args=(ev_started_excl2, ev_release_excl2))
ev_started_excl2.wait()
# Start shared acquires, will jump ahead of second exclusive acquire when
# first exclusive acquire downgrades
ev_shared = [(threading.Event(), threading.Event()) for _ in range(5)]
ev_release_shared = threading.Event()
th_shared = [self._addThread(target=_KeepShared,
args=(ev_started, ev_got, ev_release_shared))
for (ev_started, ev_got) in ev_shared]
# Wait for all shared acquires to start
for (ev, _) in ev_shared:
ev.wait()
# Check lock information
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE, query.LQ_OWNER])),
[(self.sl.name, "exclusive", [th_excl1.getName()], None)])
[(_, _, _, pending), ] = self.sl.GetLockInfo(set([query.LQ_PENDING]))
self.assertEqual([(pendmode, sorted(waiting))
for (pendmode, waiting) in pending],
[("exclusive", [th_excl2.getName()]),
("shared", sorted(th.getName() for th in th_shared))])
# Shared acquires won't start until the exclusive lock is downgraded
ev_downgrade_excl1.set()
# Wait for all shared acquires to be successful
for (_, ev) in ev_shared:
ev.wait()
# Check lock information again
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE,
query.LQ_PENDING])),
[(self.sl.name, "shared", None,
[("exclusive", [th_excl2.getName()])])])
[(_, _, owner, _), ] = self.sl.GetLockInfo(set([query.LQ_OWNER]))
self.assertEqual(set(owner), set([th_excl1.getName()] +
[th.getName() for th in th_shared]))
ev_release_excl1.set()
ev_release_excl2.set()
ev_release_shared.set()
self._waitThreads()
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE, query.LQ_OWNER,
query.LQ_PENDING])),
[(self.sl.name, None, None, [])])
@_Repeat
def testMixedAcquireTimeout(self):
sync = threading.Event()
def _AcquireShared(ev):
if not self.sl.acquire(shared=1, timeout=None):
return
self.done.put("shared")
# Notify main thread
ev.set()
# Wait for notification from main thread
sync.wait()
# Release lock
self.sl.release()
acquires = []
for _ in range(3):
ev = threading.Event()
self._addThread(target=_AcquireShared, args=(ev, ))
acquires.append(ev)
# Wait for all acquires to finish
for i in acquires:
i.wait()
self.assertEqual(self.sl._count_pending(), 0)
# Try to get exclusive lock
self.assertFalse(self.sl.acquire(shared=0, timeout=0.02))
# Acquire exclusive without timeout
exclsync = threading.Event()
exclev = threading.Event()
def _AcquireExclusive():
if not self.sl.acquire(shared=0):
return
self.done.put("exclusive")
# Notify main thread
exclev.set()
# Wait for notification from main thread
exclsync.wait()
self.sl.release()
self._addThread(target=_AcquireExclusive)
# Try to get exclusive lock
self.assertFalse(self.sl.acquire(shared=0, timeout=0.02))
# Make all shared holders release their locks
sync.set()
# Wait for exclusive acquire to succeed
exclev.wait()
self.assertEqual(self.sl._count_pending(), 0)
# Try to get exclusive lock
self.assertFalse(self.sl.acquire(shared=0, timeout=0.02))
def _AcquireSharedSimple():
if self.sl.acquire(shared=1, timeout=None):
self.done.put("shared2")
self.sl.release()
for _ in range(10):
self._addThread(target=_AcquireSharedSimple)
# Tell exclusive lock to release
exclsync.set()
# Wait for everything to finish
self._waitThreads()
self.assertEqual(self.sl._count_pending(), 0)
# Check sequence
for _ in range(3):
self.assertEqual(self.done.get_nowait(), "shared")
self.assertEqual(self.done.get_nowait(), "exclusive")
for _ in range(10):
self.assertEqual(self.done.get_nowait(), "shared2")
self.assertRaises(queue.Empty, self.done.get_nowait)
def testPriority(self):
# Acquire in exclusive mode
self.assertTrue(self.sl.acquire(shared=0))
# Queue acquires
def _Acquire(prev, next, shared, priority, result):
prev.wait()
self.sl.acquire(shared=shared, priority=priority, test_notify=next.set)
try:
self.done.put(result)
finally:
self.sl.release()
counter = itertools.count(0)
priorities = range(-20, 30)
first = threading.Event()
prev = first
# Data structure:
# {
# priority:
# [(shared/exclusive, set(acquire names), set(pending threads)),
# (shared/exclusive, ...),
# ...,
# ],
# }
perprio = {}
# References shared acquire per priority in L{perprio}. Data structure:
# {
# priority: (shared=1, set(acquire names), set(pending threads)),
# }
prioshared = {}
for seed in [4979, 9523, 14902, 32440]:
# Use a deterministic random generator
rnd = random.Random(seed)
for priority in [rnd.choice(priorities) for _ in range(30)]:
modes = [0, 1]
rnd.shuffle(modes)
for shared in modes:
# Unique name
acqname = "%s/shr=%s/prio=%s" % (next(counter), shared, priority)
ev = threading.Event()
thread = self._addThread(target=_Acquire,
args=(prev, ev, shared, priority, acqname))
prev = ev
# Record expected aqcuire, see above for structure
data = (shared, set([acqname]), set([thread]))
priolist = perprio.setdefault(priority, [])
if shared:
priosh = prioshared.get(priority, None)
if priosh:
# Shared acquires are merged
for i, j in zip(priosh[1:], data[1:]):
i.update(j)
assert data[0] == priosh[0]
else:
prioshared[priority] = data
priolist.append(data)
else:
priolist.append(data)
# Start all acquires and wait for them
first.set()
prev.wait()
# Check lock information
self.assertEqual(self.sl.GetLockInfo(set()),
[(self.sl.name, None, None, None)])
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE, query.LQ_OWNER])),
[(self.sl.name, "exclusive",
[threading.currentThread().getName()], None)])
self._VerifyPrioPending(self.sl.GetLockInfo(set([query.LQ_PENDING])),
perprio)
# Let threads acquire the lock
self.sl.release()
# Wait for everything to finish
self._waitThreads()
self.assertTrue(self.sl._check_empty())
# Check acquires by priority
for acquires in [perprio[i] for i in sorted(perprio.keys())]:
for (_, names, _) in acquires:
# For shared acquires, the set will contain 1..n entries. For exclusive
# acquires only one.
while names:
names.remove(self.done.get_nowait())
self.assertFalse(compat.any(names for (_, names, _) in acquires))
self.assertRaises(queue.Empty, self.done.get_nowait)
def _VerifyPrioPending(self, lockinfo, perprio):
((name, mode, owner, pending), ) = lockinfo
self.assertEqual(name, self.sl.name)
self.assertTrue(mode is None)
self.assertTrue(owner is None)
self.assertEqual([(pendmode, sorted(waiting))
for (pendmode, waiting) in pending],
[(["exclusive", "shared"][int(bool(shared))],
sorted(t.getName() for t in threads))
for acquires in [perprio[i]
for i in sorted(perprio.keys())]
for (shared, _, threads) in acquires])
class _FakeTimeForSpuriousNotifications:
def __init__(self, now, check_end):
self.now = now
self.check_end = check_end
# Deterministic random number generator
self.rnd = random.Random(15086)
def time(self):
# Advance time if the random number generator thinks so (this is to test
# multiple notifications without advancing the time)
if self.rnd.random() < 0.3:
self.now += self.rnd.random()
self.check_end(self.now)
return self.now
@_Repeat
def testAcquireTimeoutWithSpuriousNotifications(self):
ready = threading.Event()
locked = threading.Event()
req = queue.Queue(0)
epoch = 4000.0
timeout = 60.0
def check_end(now):
self.assertFalse(locked.isSet())
# If we waited long enough (in virtual time), tell main thread to release
# lock, otherwise tell it to notify once more
req.put(now < (epoch + (timeout * 0.8)))
time_fn = self._FakeTimeForSpuriousNotifications(epoch, check_end).time
sl = locking.SharedLock("test", _time_fn=time_fn)
# Acquire in exclusive mode
sl.acquire(shared=0)
def fn():
self.assertTrue(sl.acquire(shared=0, timeout=timeout,
test_notify=ready.set))
locked.set()
sl.release()
self.done.put("success")
# Start acquire with timeout and wait for it to be ready
self._addThread(target=fn)
ready.wait()
# The separate thread is now waiting to acquire the lock, so start sending
# spurious notifications.
# Wait for separate thread to ask for another notification
count = 0
while req.get():
# After sending the notification, the lock will take a short amount of
# time to notice and to retrieve the current time
sl._notify_topmost()
count += 1
self.assertTrue(count > 100, "Not enough notifications were sent")
self.assertFalse(locked.isSet())
# Some notifications have been sent, now actually release the lock
sl.release()
# Wait for lock to be acquired
locked.wait()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "success")
self.assertRaises(queue.Empty, self.done.get_nowait)
class TestSharedLockInCondition(_ThreadedTestCase):
"""SharedLock as a condition lock tests"""
def setUp(self):
_ThreadedTestCase.setUp(self)
self.sl = locking.SharedLock("TestSharedLockInCondition")
self.setCondition()
def setCondition(self):
self.cond = threading.Condition(self.sl)
def testKeepMode(self):
self.cond.acquire(shared=1)
self.assertTrue(self.sl.is_owned(shared=1))
self.cond.wait(0)
self.assertTrue(self.sl.is_owned(shared=1))
self.cond.release()
self.cond.acquire(shared=0)
self.assertTrue(self.sl.is_owned(shared=0))
self.cond.wait(0)
self.assertTrue(self.sl.is_owned(shared=0))
self.cond.release()
class TestSharedLockInPipeCondition(TestSharedLockInCondition):
"""SharedLock as a pipe condition lock tests"""
def setCondition(self):
self.cond = locking.PipeCondition(self.sl)
class TestSSynchronizedDecorator(_ThreadedTestCase):
"""Shared Lock Synchronized decorator test"""
def setUp(self):
_ThreadedTestCase.setUp(self)
@locking.ssynchronized(_decoratorlock)
def _doItExclusive(self):
self.assertTrue(_decoratorlock.is_owned())
self.done.put("EXC")
@locking.ssynchronized(_decoratorlock, shared=1)
def _doItSharer(self):
self.assertTrue(_decoratorlock.is_owned(shared=1))
self.done.put("SHR")
def testDecoratedFunctions(self):
self._doItExclusive()
self.assertFalse(_decoratorlock.is_owned())
self._doItSharer()
self.assertFalse(_decoratorlock.is_owned())
def testSharersCanCoexist(self):
_decoratorlock.acquire(shared=1)
threading.Thread(target=self._doItSharer).start()
self.assertTrue(self.done.get(True, 1))
_decoratorlock.release()
@_Repeat
def testExclusiveBlocksExclusive(self):
_decoratorlock.acquire()
self._addThread(target=self._doItExclusive)
# give it a bit of time to check that it's not actually doing anything
self.assertRaises(queue.Empty, self.done.get_nowait)
_decoratorlock.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "EXC")
@_Repeat
def testExclusiveBlocksSharer(self):
_decoratorlock.acquire()
self._addThread(target=self._doItSharer)
self.assertRaises(queue.Empty, self.done.get_nowait)
_decoratorlock.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "SHR")
@_Repeat
def testSharerBlocksExclusive(self):
_decoratorlock.acquire(shared=1)
self._addThread(target=self._doItExclusive)
self.assertRaises(queue.Empty, self.done.get_nowait)
_decoratorlock.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "EXC")
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
ganeti/ganeti
|
test/py/ganeti.locking_unittest.py
|
Python
|
bsd-2-clause
| 34,642
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
import numpy as np
from ... import units as u
from .. import SphericalRepresentation, Longitude, Latitude
from ...tests.helper import pytest
from ...utils.compat.numpycompat import NUMPY_LT_1_9
from ...utils.compat.numpy import broadcast_to as np_broadcast_to
class TestManipulation():
"""Manipulation of Representation shapes.
Checking that attributes are manipulated correctly.
Even more exhaustive tests are done in time.tests.test_methods
"""
def setup(self):
lon = Longitude(np.arange(0, 24, 4), u.hourangle)
lat = Latitude(np.arange(-90, 91, 30), u.deg)
# With same-sized arrays
self.s0 = SphericalRepresentation(
lon[:, np.newaxis] * np.ones(lat.shape),
lat * np.ones(lon.shape)[:, np.newaxis],
np.ones(lon.shape + lat.shape) * u.kpc)
# With unequal arrays -> these will be broadcast.
self.s1 = SphericalRepresentation(lon[:, np.newaxis], lat, 1. * u.kpc)
# For completeness on some tests, also a cartesian one
self.c0 = self.s0.to_cartesian()
def test_ravel(self):
s0_ravel = self.s0.ravel()
assert type(s0_ravel) is type(self.s0)
assert s0_ravel.shape == (self.s0.size,)
assert np.all(s0_ravel.lon == self.s0.lon.ravel())
assert np.may_share_memory(s0_ravel.lon, self.s0.lon)
assert np.may_share_memory(s0_ravel.lat, self.s0.lat)
assert np.may_share_memory(s0_ravel.distance, self.s0.distance)
# Since s1 was broadcast, ravel needs to make a copy.
s1_ravel = self.s1.ravel()
assert type(s1_ravel) is type(self.s1)
assert s1_ravel.shape == (self.s1.size,)
assert np.all(s1_ravel.lon == self.s1.lon.ravel())
assert not np.may_share_memory(s1_ravel.lat, self.s1.lat)
def test_copy(self):
s0_copy = self.s0.copy()
assert s0_copy.shape == self.s0.shape
assert np.all(s0_copy.lon == self.s0.lon)
assert np.all(s0_copy.lat == self.s0.lat)
# Check copy was made of internal data.
assert not np.may_share_memory(s0_copy.distance, self.s0.distance)
def test_flatten(self):
s0_flatten = self.s0.flatten()
assert s0_flatten.shape == (self.s0.size,)
assert np.all(s0_flatten.lon == self.s0.lon.flatten())
# Flatten always copies.
assert not np.may_share_memory(s0_flatten.distance, self.s0.distance)
s1_flatten = self.s1.flatten()
assert s1_flatten.shape == (self.s1.size,)
assert np.all(s1_flatten.lon == self.s1.lon.flatten())
assert not np.may_share_memory(s1_flatten.lat, self.s1.lat)
def test_transpose(self):
s0_transpose = self.s0.transpose()
assert s0_transpose.shape == (7, 6)
assert np.all(s0_transpose.lon == self.s0.lon.transpose())
assert np.may_share_memory(s0_transpose.distance, self.s0.distance)
s1_transpose = self.s1.transpose()
assert s1_transpose.shape == (7, 6)
assert np.all(s1_transpose.lat == self.s1.lat.transpose())
assert np.may_share_memory(s1_transpose.lat, self.s1.lat)
# Only one check on T, since it just calls transpose anyway.
# Doing it on the CartesianRepresentation just for variety's sake.
c0_T = self.c0.T
assert c0_T.shape == (7, 6)
assert np.all(c0_T.x == self.c0.x.T)
assert np.may_share_memory(c0_T.y, self.c0.y)
def test_diagonal(self):
s0_diagonal = self.s0.diagonal()
assert s0_diagonal.shape == (6,)
assert np.all(s0_diagonal.lat == self.s0.lat.diagonal())
if not NUMPY_LT_1_9:
assert np.may_share_memory(s0_diagonal.lat, self.s0.lat)
def test_swapaxes(self):
s1_swapaxes = self.s1.swapaxes(0, 1)
assert s1_swapaxes.shape == (7, 6)
assert np.all(s1_swapaxes.lat == self.s1.lat.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.lat, self.s1.lat)
def test_reshape(self):
s0_reshape = self.s0.reshape(2, 3, 7)
assert s0_reshape.shape == (2, 3, 7)
assert np.all(s0_reshape.lon == self.s0.lon.reshape(2, 3, 7))
assert np.all(s0_reshape.lat == self.s0.lat.reshape(2, 3, 7))
assert np.all(s0_reshape.distance == self.s0.distance.reshape(2, 3, 7))
assert np.may_share_memory(s0_reshape.lon, self.s0.lon)
assert np.may_share_memory(s0_reshape.lat, self.s0.lat)
assert np.may_share_memory(s0_reshape.distance, self.s0.distance)
s1_reshape = self.s1.reshape(3, 2, 7)
assert s1_reshape.shape == (3, 2, 7)
assert np.all(s1_reshape.lat == self.s1.lat.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.lat, self.s1.lat)
# For reshape(3, 14), copying is necessary for lon, lat, but not for d
s1_reshape2 = self.s1.reshape(3, 14)
assert s1_reshape2.shape == (3, 14)
assert np.all(s1_reshape2.lon == self.s1.lon.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.lon, self.s1.lon)
assert s1_reshape2.distance.shape == (3, 14)
assert np.may_share_memory(s1_reshape2.distance, self.s1.distance)
def test_shape_setting(self):
# Shape-setting should be on the object itself, since copying removes
# zero-strides due to broadcasting. We reset the objects at the end.
self.s0.shape = (2, 3, 7)
assert self.s0.shape == (2, 3, 7)
assert self.s0.lon.shape == (2, 3, 7)
assert self.s0.lat.shape == (2, 3, 7)
assert self.s0.distance.shape == (2, 3, 7)
# this works with the broadcasting.
self.s1.shape = (2, 3, 7)
assert self.s1.shape == (2, 3, 7)
assert self.s1.lon.shape == (2, 3, 7)
assert self.s1.lat.shape == (2, 3, 7)
assert self.s1.distance.shape == (2, 3, 7)
assert self.s1.distance.strides == (0, 0, 0)
# but this one does not.
oldshape = self.s1.shape
with pytest.raises(AttributeError):
self.s1.shape = (42,)
assert self.s1.shape == oldshape
assert self.s1.lon.shape == oldshape
assert self.s1.lat.shape == oldshape
assert self.s1.distance.shape == oldshape
# Finally, a more complicated one that checks that things get reset
# properly if it is not the first component that fails.
s2 = SphericalRepresentation(self.s1.lon.copy(), self.s1.lat,
self.s1.distance, copy=False)
assert 0 not in s2.lon.strides
assert 0 in s2.lat.strides
with pytest.raises(AttributeError):
s2.shape = (42,)
assert s2.shape == oldshape
assert s2.lon.shape == oldshape
assert s2.lat.shape == oldshape
assert s2.distance.shape == oldshape
assert 0 not in s2.lon.strides
assert 0 in s2.lat.strides
self.setup()
def test_squeeze(self):
s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze()
assert s0_squeeze.shape == (3, 2, 7)
assert np.all(s0_squeeze.lat == self.s0.lat.reshape(3, 2, 7))
assert np.may_share_memory(s0_squeeze.lat, self.s0.lat)
def test_add_dimension(self):
s0_adddim = self.s0[:, np.newaxis, :]
assert s0_adddim.shape == (6, 1, 7)
assert np.all(s0_adddim.lon == self.s0.lon[:, np.newaxis, :])
assert np.may_share_memory(s0_adddim.lat, self.s0.lat)
def test_take(self):
s0_take = self.s0.take((5, 2))
assert s0_take.shape == (2,)
assert np.all(s0_take.lon == self.s0.lon.take((5, 2)))
def test_broadcast_to(self):
s0_broadcast = self.s0._apply(np_broadcast_to, (3, 6, 7), subok=True)
assert type(s0_broadcast) is type(self.s0)
assert s0_broadcast.shape == (3, 6, 7)
assert np.all(s0_broadcast.lon == self.s0.lon)
assert np.all(s0_broadcast.lat == self.s0.lat)
assert np.all(s0_broadcast.distance == self.s0.distance)
assert np.may_share_memory(s0_broadcast.lon, self.s0.lon)
assert np.may_share_memory(s0_broadcast.lat, self.s0.lat)
assert np.may_share_memory(s0_broadcast.distance, self.s0.distance)
s1_broadcast = self.s1._apply(np_broadcast_to, shape=(3, 6, 7),
subok=True)
assert s1_broadcast.shape == (3, 6, 7)
assert np.all(s1_broadcast.lat == self.s1.lat)
assert np.all(s1_broadcast.lon == self.s1.lon)
assert np.all(s1_broadcast.distance == self.s1.distance)
assert s1_broadcast.distance.shape == (3, 6, 7)
assert np.may_share_memory(s1_broadcast.lat, self.s1.lat)
assert np.may_share_memory(s1_broadcast.lon, self.s1.lon)
assert np.may_share_memory(s1_broadcast.distance, self.s1.distance)
# A final test that "may_share_memory" equals "does_share_memory"
# Do this on a copy, to keep self.s0 unchanged.
sc = self.s0.copy()
assert not np.may_share_memory(sc.lon, self.s0.lon)
assert not np.may_share_memory(sc.lat, self.s0.lat)
sc_broadcast = sc._apply(np_broadcast_to, (3, 6, 7), subok=True)
assert np.may_share_memory(sc_broadcast.lon, sc.lon)
# Can only write to copy, not to broadcast version.
sc.lon[0, 0] = 22. * u.hourangle
assert np.all(sc_broadcast.lon[:, 0, 0] == 22. * u.hourangle)
|
joergdietrich/astropy
|
astropy/coordinates/tests/test_representation_methods.py
|
Python
|
bsd-3-clause
| 9,461
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Module utils.misc contains utilities."""
from mriqc.utils.bids import collect_bids_data
from mriqc.utils.misc import reorder_csv
__all__ = [
"reorder_csv",
"collect_bids_data",
]
|
poldracklab/mriqc
|
mriqc/utils/__init__.py
|
Python
|
bsd-3-clause
| 1,070
|
import pathlib
from collections import defaultdict
from operator import itemgetter
from itertools import groupby
from flask import Flask, send_from_directory, request, jsonify
from logbook import Logger
from aesop import isocodes, events
from aesop.models import Movie, TVShow, TVShowEpisode, Source, Config, database_proxy, Genre, MovieGenre, TVShowGenre
app = Flask('aesop.ui')
log = Logger('aesop.ui')
@app.route('/')
def root():
templates = str(pathlib.Path(__file__).with_name('templates'))
return send_from_directory(templates, 'index.html')
@app.route('/series')
def series():
series = list(TVShow.select().order_by(TVShow.title).dicts())
tvshow_genre_map = Genre.select(TVShowGenre.media, Genre.text).join(TVShowGenre).tuples()
d = defaultdict(list)
for show_id, text in tvshow_genre_map:
d[show_id].append(text)
for tv in series:
tv['genres'] = d[tv['id']]
return jsonify({'data': series})
@app.route('/series/<id>')
def singleseries(id):
tvshow = TVShow.select(TVShow.media_id, TVShow.title).where(TVShow.media_id == id).dicts().get()
return jsonify({'data': tvshow})
# this and set_watched_movie are not websocket commands because they need data
# back.
@app.route('/series/setwatched/<int:video_id>', methods=['POST'])
def set_watched_series(video_id):
m = TVShowEpisode.select().where(TVShowEpisode.id == video_id).get()
with database_proxy.transaction():
m.watched = not m.watched
m.save()
show = m.show
if all([episode.watched for episode in show.episodes]):
show.watched = True
if show.is_dirty():
show.save()
return jsonify({'watched': m.watched})
@app.route('/series/<id>/seasons')
def seasons(id):
tvshow = TVShow.select().where(TVShow.media_id == id).get()
seasons = tvshow.episodes.select(TVShowEpisode.season, TVShowEpisode.watched).group_by(TVShowEpisode.season, TVShowEpisode.watched).dicts()
collapsed_seasons = defaultdict(bool)
for season in seasons:
watched = season['watched']
season = season['season']
if season in collapsed_seasons:
watched = collapsed_seasons[season] and watched
collapsed_seasons[season] = watched
seasons = [dict(season=season, watched=watched) for (season, watched) in collapsed_seasons.items()]
return jsonify({'data': seasons})
@app.route('/series/<id>/episodes/<int:season>')
def episodes(id, season):
tvshow = TVShow.select().where(TVShow.media_id == id).get()
return jsonify({'data': list(tvshow.episodes.select().where(TVShowEpisode.season == season).order_by(TVShowEpisode.episode).dicts())})
@app.route('/movies')
def movies():
movies = list(Movie.select(Movie.id, Movie.title, Movie.watched, Movie.year).order_by(Movie.title).dicts())
movie_genre_map = Genre.select(MovieGenre.media, Genre.text).join(MovieGenre).tuples()
d = defaultdict(list)
for movie_id, text in movie_genre_map:
d[movie_id].append(text)
for m in movies:
m['genres'] = d[m['id']]
return jsonify({'data': movies})
@app.route('/movies/<int:id>', methods=['GET', 'POST'])
def movie(id):
if request.method == 'POST':
genres = request.json['movie'].pop('genres')
Movie.update(**request.json['movie']).where(Movie.id == id).execute()
m = Movie.get(Movie.id == id)
m.replace_genres([Genre.get_or_create(g) for g in genres])
return jsonify({'status': 'ok'})
else:
movie = Movie.select().where(Movie.id == id).dicts().get()
q = Genre.select(Genre.text).join(MovieGenre).where(MovieGenre.media == movie['id'])
movie['genres'] = [g[0] for g in q.tuples()]
return jsonify({'movie': movie})
@app.route('/movies/setwatched/<int:video_id>', methods=['POST'])
def set_watched_movie(video_id):
m = Movie.select(Movie.id, Movie.watched).where(Movie.id == video_id).get()
m.watched = not m.watched
m.save()
return jsonify({'watched': m.watched})
@app.route('/genres')
def genres():
return jsonify({'genres': [g[0] for g in Genre.select(Genre.text).order_by(Genre.text).tuples()]})
@app.route('/update/', methods=['POST'])
def update():
raise NotImplementedError()
@app.route('/settings/', methods=['GET', 'POST'])
def settings():
if request.method == 'POST':
from aesop.models import database
try:
with database.transaction():
Config.delete().execute()
for setting in request.json['configuration']:
Config.create(**setting)
Source.delete().execute()
for setting in request.json['sources']:
Source.create(**setting)
except Exception as e:
events.error.blocking("Settings could not be saved: {!r}".format(str(e)))
raise
else:
events.success.blocking("Settings saved")
else:
configuration = []
for section, values in groupby(list(Config.select(Config.section, Config.key, Config.value).dicts()), key=itemgetter('section')):
configuration.append({
'name': section,
'values': [config_with_help(v) for v in values],
})
return jsonify({
'configuration': configuration,
'sources': list(Source.select().dicts()),
})
return jsonify({'response': 'ok'})
@app.route('/stats/')
def stats():
series = TVShow.select().count()
episodes = TVShowEpisode.select().count()
episodes_watched = TVShowEpisode.select().where(TVShowEpisode.watched == True).count()
movies = Movie.select().count()
movies_watched = Movie.select().where(Movie.watched == True).count()
stats = {
'series': series,
'episodes': episodes,
'episodes watched': episodes_watched,
'movies': movies,
'movies watched': movies_watched,
}
return jsonify({'stats': stats})
@app.route('/manifest.json')
def manifest():
return jsonify({
'name': 'Aesop',
"start_url": "/",
"display": "standalone",
})
@app.route('/search/genres/')
def get_upstream_genres():
imdb_id = request.values['i']
video_type = request.values['type']
upstream = request.values.get('m', 'omdb')
if upstream == 'omdb':
import requests
params = {
'i': imdb_id,
'p': 'full',
'type': video_type,
}
resp = requests.get('http://www.omdbapi.com/?', params=params)
json = resp.json()
if json['Response'] != 'False':
genres = json['Genre'].split(', ')
else:
genres = []
else:
assert False, "Unknown upstream type {!r}".format(upstream)
return jsonify({'genres': genres})
@app.route('/search/')
def search_upstream():
query = request.values['q']
video_type = request.values['type']
upstream = request.values.get('m', 'omdb')
if len(query) < 3:
results = []
elif upstream == 'omdb':
import requests
params = {
's': query,
'type': video_type,
}
resp = requests.get('http://www.omdbapi.com/', params=params)
results = resp.json().get('Search', [])
results = [
dict(title=t['Title'], year=int(t['Year']), id=t['imdbID'],
description='{} {}'.format(t['Year'], t['Title']))
for t in results
]
else:
assert False, "Unknown upstream type {!r}".format(upstream)
return jsonify({'results': list(results)})
help_map = {
'concurrency': 'Amount of concurrent requests to perform when retrieving video metadata.',
'frequency': 'How frequently to scan for new videos',
'theme': 'Website theme to use',
'seek size': 'Amount of time in seconds to jump forward/backward',
'subtitles for matching audio': 'Should subtitles be automatically enabled if the audio and subtitles language are the same?',
'video output': 'Video Output Driver. Messing with this can break things so be careful',
}
isochoices = [dict(display='-- None --', value='0')] + sorted([
dict(display=nicename, value=iso)
for (iso, nicename) in isocodes.isocodes.items()
], key=itemgetter('display'))
extras_map = {
'theme': {
'choices': {
'cyborg': 'Cyborg',
'darkly': 'Darkly',
'flatly': 'Flatly',
'journal': 'Journal',
'cosmo': 'Cosmo',
'cerulean': 'Cerulean',
},
},
'preferred subtitle': {
'choices': isochoices,
'typeahead': 'Preferred Subtitle Language',
'default': '',
},
'preferred audio': {
'choices': isochoices,
'typeahead': 'Preferred Audio Language',
'default': '',
},
'subtitles for matching audio': {
'choices': {
'1': 'Yes',
'0': 'No',
},
},
'concurrency': {
'type': 'number',
},
}
def config_with_help(config):
config['help'] = help_map.get(config['key'], '')
config.update(extras_map.get(config['key'], {}))
if 'typeahead' in config:
value = config['value']
choices = config.get('choices', [])
for choice in choices:
if choice['value'] == value:
config['value'] = dict(value=value, display=choice['display'])
break
if config['key'] == 'concurrency':
config['value'] = int(config['value'])
return config
def main():
from aesop.models import init
from aesop.utils import setup_logging
setup_logging('aesop.ui', 'INFO')
init()
app.run(debug=True, host='0.0.0.0')
if __name__ == '__main__':
main()
|
nathan-hoad/aesop
|
aesop/ui.py
|
Python
|
bsd-3-clause
| 9,806
|
import numpy as np
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import pytest
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.ensemble._hist_gradient_boosting.types import (
G_H_DTYPE, PREDICTOR_RECORD_DTYPE)
@pytest.mark.parametrize('max_bins', [200, 256])
def test_boston_dataset(max_bins):
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(
boston.data, boston.target, random_state=42)
mapper = _BinMapper(max_bins=max_bins, random_state=42)
X_train_binned = mapper.fit_transform(X_train)
# Init gradients and hessians to that of least squares loss
gradients = -y_train.astype(G_H_DTYPE)
hessians = np.ones(1, dtype=G_H_DTYPE)
min_samples_leaf = 8
max_leaf_nodes = 31
grower = TreeGrower(X_train_binned, gradients, hessians,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes, max_bins=max_bins,
actual_n_bins=mapper.actual_n_bins_)
grower.grow()
predictor = grower.make_predictor(bin_thresholds=mapper.bin_thresholds_)
assert r2_score(y_train, predictor.predict(X_train)) > 0.85
assert r2_score(y_test, predictor.predict(X_test)) > 0.70
@pytest.mark.parametrize('threshold, expected_predictions', [
(-np.inf, [0, 1, 1, 1]),
(10, [0, 0, 1, 1]),
(20, [0, 0, 0, 1]),
(np.inf, [0, 0, 0, 1]),
])
def test_infinite_values_and_thresholds(threshold, expected_predictions):
# Make sure infinite values and infinite thresholds are handled properly.
# In paticular, if a value is +inf and the threhsold is +inf, the sample
# should go to the right child.
X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1)
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes[0]['left'] = 1
nodes[0]['right'] = 2
nodes[0]['feature_idx'] = 0
nodes[0]['threshold'] = threshold
# left child
nodes[1]['is_leaf'] = True
nodes[1]['value'] = 0
# right child
nodes[2]['is_leaf'] = True
nodes[2]['value'] = 1
predictor = TreePredictor(nodes)
predictions = predictor.predict(X)
assert np.all(predictions == expected_predictions)
|
chrsrds/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py
|
Python
|
bsd-3-clause
| 2,540
|
"""Small but quite comprehensive example showing the use of PyTables.
The program creates an output file, 'tutorial1.h5'. You can view it
with any HDF5 generic utility.
"""
import numpy as np
import tables as tb
#'-**-**-**-**-**-**- user record definition -**-**-**-**-**-**-**-'
# Define a user record to characterize some kind of particles
class Particle(tb.IsDescription):
name = tb.StringCol(16) # 16-character String
idnumber = tb.Int64Col() # Signed 64-bit integer
ADCcount = tb.UInt16Col() # Unsigned short integer
TDCcount = tb.UInt8Col() # unsigned byte
grid_i = tb.Int32Col() # integer
grid_j = tb.Int32Col() # integer
pressure = tb.Float32Col() # float (single-precision)
energy = tb.Float64Col() # double (double-precision)
print()
print('-**-**-**-**-**-**- file creation -**-**-**-**-**-**-**-')
# The name of our HDF5 filename
filename = "tutorial1.h5"
print("Creating file:", filename)
# Open a file in "w"rite mode
h5file = tb.open_file(filename, mode="w", title="Test file")
print()
print('-**-**-**-**-**- group and table creation -**-**-**-**-**-**-**-')
# Create a new group under "/" (root)
group = h5file.create_group("/", 'detector', 'Detector information')
print("Group '/detector' created")
# Create one table on it
table = h5file.create_table(group, 'readout', Particle, "Readout example")
print("Table '/detector/readout' created")
# Print the file
print(h5file)
print()
print(repr(h5file))
# Get a shortcut to the record object in table
particle = table.row
# Fill the table with 10 particles
for i in range(10):
particle['name'] = 'Particle: %6d' % (i)
particle['TDCcount'] = i % 256
particle['ADCcount'] = (i * 256) % (1 << 16)
particle['grid_i'] = i
particle['grid_j'] = 10 - i
particle['pressure'] = float(i * i)
particle['energy'] = float(particle['pressure'] ** 4)
particle['idnumber'] = i * (2 ** 34)
particle.append()
# Flush the buffers for table
table.flush()
print()
print('-**-**-**-**-**-**- table data reading & selection -**-**-**-**-**-')
# Read actual data from table. We are interested in collecting pressure values
# on entries where TDCcount field is greater than 3 and pressure less than 50
xs = [x for x in table.iterrows()
if x['TDCcount'] > 3 and 20 <= x['pressure'] < 50]
pressure = [x['pressure'] for x in xs ]
print("Last record read:")
print(repr(xs[-1]))
print("Field pressure elements satisfying the cuts:")
print(repr(pressure))
# Read also the names with the same cuts
names = [
x['name'] for x in table.where(
"""(TDCcount > 3) & (20 <= pressure) & (pressure < 50)""")
]
print("Field names elements satisfying the cuts:")
print(repr(names))
print()
print('-**-**-**-**-**-**- array object creation -**-**-**-**-**-**-**-')
print("Creating a new group called '/columns' to hold new arrays")
gcolumns = h5file.create_group(h5file.root, "columns", "Pressure and Name")
print("Creating an array called 'pressure' under '/columns' group")
h5file.create_array(gcolumns, 'pressure', np.array(pressure),
"Pressure column selection")
print(repr(h5file.root.columns.pressure))
print("Creating another array called 'name' under '/columns' group")
h5file.create_array(gcolumns, 'name', names, "Name column selection")
print(repr(h5file.root.columns.name))
print("HDF5 file:")
print(h5file)
# Close the file
h5file.close()
print("File '" + filename + "' created")
|
avalentino/PyTables
|
examples/tutorial1-1.py
|
Python
|
bsd-3-clause
| 3,487
|
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import numpy as np
from collections import Counter
def most_common(lst):
return max(set(lst), key=lst.count)
class CLF:
""" Implements a classifier for hierarchical clustering
Parameters:
----------
clf_type : str
Type of cluster, either 'svm' or 'logreg'
kwarg : optional arguments for SVM (see SVM definition below for name of keyword arguments)
"""
def __init__(self, clf_type='svm', n_average=10, test_size = 0.8, clf_args=None):
self.clf_type = clf_type
self.n_average = n_average
self.test_size = test_size
self.clf_args = clf_args
self.trained = False
self.cv_score = 1.0
def fit(self, X, y):
""" Fit clf to data.
Parameters
------------
X: array, shape = (n_sample, n_feature)
your data
y: array, shape = (n_sample, 1)
your labels
Other parameters
------------
self.n_average : int
number of classifiers to train (will then take majority vote)
self.test_size: float
ratio of test size (between 0 and 1).
Return
-------
self, CLF object
"""
#### ----------
# #### ----------
self.trained = True
if self.clf_type == 'svm':
if self.clf_args is not None:
clf = SVC(**self.clf_args)
else:
clf = SVC()
elif self.clf_type == 'rf':
if self.clf_args is not None:
clf = RandomForestClassifier(**self.clf_args)
#clf = RandomForestClassifier(**self.clf_args, warm_start=True)
else:
clf = RandomForestClassifier()
n_average = self.n_average
predict_score = []
training_score = []
clf_list = []
xtrain_scaler_list = []
n_sample = X.shape[0]
zero_eps = 1e-6
y_unique = np.unique(y) # different labels
assert len(y_unique)>1, "Cluster provided only has a unique label, can't classify !"
n_sample = X.shape[0]
idx = np.arange(n_sample)
yu_pos = {yu : idx[(y == yu)] for yu in y_unique}
n_class = len(y_unique)
import time
dt=0.0
import pickle
for _ in range(n_average):
while True:
ytrain, ytest, xtrain, xtest = train_test_split(y, X, test_size=self.test_size)
if len(np.unique(ytrain)) > 1: # could create a bug otherwise
break
#print("train size, test size:", len(ytrain),len(ytest),sep='\t')
std = np.std(xtrain, axis = 0)
std[std < zero_eps] = 1.0 # get rid of zero variance data.
mu, inv_sigma = np.mean(xtrain, axis=0), 1./std
xtrain = (xtrain - mu)*inv_sigma # zscoring the data
xtest = (xtest - mu)*inv_sigma
pickle.dump([xtrain, ytrain], open('test.pkl','wb'))
s=time.time()
print(len(xtrain))
clf.fit(xtrain, ytrain)
dt += (time.time() - s)
t_score = clf.score(xtrain, ytrain) # predict on test set
training_score.append(t_score)
p_score = clf.score(xtest, ytest) # predict on test set
#print(t_score,'\t',p_score)
predict_score.append(p_score)
clf_list.append(clf)
xtrain_scaler_list.append([mu,inv_sigma])
print("TRAINING ONLY\t",dt)
self.scaler_list = xtrain_scaler_list # scaling transformations (zero mean, unit std)
self.cv_score = np.mean(predict_score)
self.cv_score_std = np.std(predict_score)
self.mean_train_score = np.mean(training_score)
self.std_train_score = np.std(training_score)
self.clf_list = clf_list # classifier list for majority voting !
self._n_sample = len(y)
return self
def predict(self, X, option='fast'):
"""Returns labels for X (-1, 1)"""
if option is 'fast':
mu, inv_sigma = self.scaler_list[0]
return self.clf_list[0].predict(inv_sigma*(X-mu))
if self.clf_type == 'trivial':
self._n_sample = len(X)
return np.zeros(len(X))
assert self.trained is True, "Must train model first !"
# col is clf, row are different data points
n_clf = len(self.clf_list)
vote = []
for i in range(n_clf):
clf = self.clf_list[i]
mu, inv_sigma = self.scaler_list[i]
xstandard = inv_sigma*(X-mu)
vote.append(clf.predict(inv_sigma*(X-mu)))
vote = np.vstack(vote).T
# row are data, col are clf
y_pred = []
for x_vote in vote: # majority voting here !
y_pred.append(most_common(list(x_vote)))
return np.array(y_pred)#.reshape(-1,1)
def score(self, X, y):
y_pred = self.predict(X).flatten()
return np.count_nonzero(y_pred == y)/len(y)
|
alexandreday/fast_density_clustering
|
fdc/classify.py
|
Python
|
bsd-3-clause
| 5,227
|
"""
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = (
'TestCase', 'TransactionTestCase', 'APITestCase', 'AuthProviderTestCase',
'RuleTestCase', 'PermissionTestCase', 'PluginTestCase', 'CliTestCase',
'AcceptanceTestCase',
)
import base64
import os
import os.path
import pytest
import six
import types
from click.testing import CliRunner
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, fixture, Exam
from pkg_resources import iter_entry_points
from rest_framework.test import APITestCase as BaseAPITestCase
from six.moves.urllib.parse import urlencode
from sentry import auth
from sentry.auth.providers.dummy import DummyProvider
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from .fixtures import Fixtures
from .helpers import AuthProvider, Feature, get_auth_header, TaskRunner, override_options
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
class BaseTestCase(Fixtures, Exam):
urls = 'sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'].startswith('http://testserver' + reverse('sentry-login'))
@before
def setup_dummy_auth_provider(self):
auth.register('dummy', DummyProvider)
self.addCleanup(auth.unregister, 'dummy', DummyProvider)
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def tasks(self):
return TaskRunner()
def feature(self, name, active=True):
"""
>>> with self.feature('feature:name')
>>> # ...
"""
return Feature(name, active)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
def _makeMessage(self, data):
return json.dumps(data).encode('utf-8')
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None, protocol=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.tasks():
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header(
'_postWithHeader/0.0.0',
key,
secret,
protocol,
),
)
return resp
def _postCspWithHeader(self, data, key=None, **extra):
if isinstance(data, dict):
body = json.dumps({'csp-report': data})
elif isinstance(data, six.string_types):
body = data
path = reverse('sentry-api-csp-report', kwargs={'project_id': self.project.id})
path += '?sentry_key=%s' % self.projectkey.public_key
with self.tasks():
return self.client.post(
path, data=body,
content_type='application/csp-report',
HTTP_USER_AGENT=DEFAULT_USER_AGENT,
**extra
)
def _getWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.tasks():
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
**headers
)
return resp
def _postWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
}
with self.tasks():
resp = self.client.post(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
data=message,
content_type='application/json',
**headers
)
return resp
def options(self, options):
"""
A context manager that temporarily sets a global option and reverts
back to the original value when exiting the context.
"""
return override_options(options)
@contextmanager
def dsn(self, dsn):
"""
A context manager that temporarily sets the internal client's DSN
"""
from raven.contrib.django.models import client
try:
client.set_dsn(dsn)
yield
finally:
client.set_dsn(None)
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = 'dummy'
def setUp(self):
super(AuthProviderTestCase, self).setUp()
# TestCase automatically sets up dummy provider
if self.provider_name != 'dummy' or self.provider != DummyProvider:
auth.register(self.provider_name, self.provider)
self.addCleanup(auth.unregister, self.provider_name, self.provider)
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, data=None):
return self.rule_cls(
project=self.project,
data=data or {},
)
def get_state(self, **kwargs):
kwargs.setdefault('is_new', True)
kwargs.setdefault('is_regression', True)
kwargs.setdefault('is_sample', True)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user(is_superuser=False)
self.organization = self.create_organization(
owner=self.owner,
flags=0, # disable default allow_joinleave access
)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 300
def assert_member_can_access(self, path):
return self.assert_role_can_access(path, 'member')
def assert_teamless_member_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_can_access(user, path)
def assert_member_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'member')
def assert_manager_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'manager')
def assert_teamless_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_admin_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_teamless_admin_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_can_access(user, path)
def assert_team_admin_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'admin')
def assert_teamless_admin_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_owner_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_owner_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_owner_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'owner')
def assert_non_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.assert_cannot_access(user, path)
def assert_role_can_access(self, path, role):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role=role, teams=[self.team],
)
self.assert_can_access(user, path)
def assert_role_cannot_access(self, path, role):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role=role, teams=[self.team],
)
self.assert_cannot_access(user, path)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
# Old plugins, plugin is a class, new plugins, it's an instance
# New plugins don't need to be registered
if isinstance(self.plugin, (type, types.ClassType)):
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
def assertAppInstalled(self, name, path):
for ep in iter_entry_points('sentry.apps'):
if ep.name == name:
ep_path = ep.module_name
if ep_path == path:
return
self.fail('Found app in entry_points, but wrong class. Got %r, expected %r' % (ep_path, path))
self.fail('Missing app from entry_points: %r' % (name,))
def assertPluginInstalled(self, name, plugin):
path = type(plugin).__module__ + ':' + type(plugin).__name__
for ep in iter_entry_points('sentry.plugins'):
if ep.name == name:
ep_path = ep.module_name + ':' + '.'.join(ep.attrs)
if ep_path == path:
return
self.fail('Found plugin in entry_points, but wrong class. Got %r, expected %r' % (ep_path, path))
self.fail('Missing plugin from entry_points: %r' % (name,))
class CliTestCase(TestCase):
runner = fixture(CliRunner)
command = None
default_args = []
def invoke(self, *args):
args += tuple(self.default_args)
return self.runner.invoke(self.command, args, obj={})
@pytest.mark.usefixtures('browser')
class AcceptanceTestCase(TransactionTestCase):
def save_session(self):
self.session.save()
self.browser.save_cookie(
name=settings.SESSION_COOKIE_NAME,
value=self.session.session_key,
)
|
alexm92/sentry
|
src/sentry/testutils/cases.py
|
Python
|
bsd-3-clause
| 14,299
|
import os
import subprocess
from unittest import mock
from django.conf import settings
from django.core import mail
from olympia import amo
from olympia.amo.cron import gc, write_sitemaps
from olympia.amo.sitemap import get_sitemaps
from olympia.amo.tests import TestCase, addon_factory, user_factory, version_factory
from olympia.constants.promoted import RECOMMENDED
from olympia.constants.scanners import YARA
from olympia.addons.models import Addon
from olympia.files.models import FileUpload
from olympia.scanners.models import ScannerResult
from olympia.amo.models import FakeEmail
@mock.patch('olympia.amo.cron.storage')
class TestGC(TestCase):
def test_file_uploads_deletion(self, storage_mock):
user = user_factory()
fu_new = FileUpload.objects.create(
path='/tmp/new',
name='new',
user=user,
source=amo.UPLOAD_SOURCE_DEVHUB,
ip_address='127.0.0.8',
)
fu_new.update(created=self.days_ago(6))
fu_old = FileUpload.objects.create(
path='/tmp/old',
name='old',
user=user,
source=amo.UPLOAD_SOURCE_DEVHUB,
ip_address='127.0.0.8',
)
fu_old.update(created=self.days_ago(16))
gc()
assert FileUpload.objects.count() == 1
assert storage_mock.delete.call_count == 1
assert storage_mock.delete.call_args[0][0] == fu_old.path
def test_file_uploads_deletion_no_path_somehow(self, storage_mock):
user = user_factory()
fu_old = FileUpload.objects.create(
path='',
name='foo',
user=user,
source=amo.UPLOAD_SOURCE_DEVHUB,
ip_address='127.0.0.8',
)
fu_old.update(created=self.days_ago(16))
gc()
assert FileUpload.objects.count() == 0 # FileUpload was deleted.
assert storage_mock.delete.call_count == 0 # No path to delete.
def test_file_uploads_deletion_oserror(self, storage_mock):
user = user_factory()
fu_older = FileUpload.objects.create(
path='/tmp/older',
name='older',
user=user,
source=amo.UPLOAD_SOURCE_DEVHUB,
ip_address='127.0.0.8',
)
fu_older.update(created=self.days_ago(300))
fu_old = FileUpload.objects.create(
path='/tmp/old',
name='old',
user=user,
source=amo.UPLOAD_SOURCE_DEVHUB,
ip_address='127.0.0.8',
)
fu_old.update(created=self.days_ago(16))
storage_mock.delete.side_effect = OSError
gc()
# Even though delete() caused a OSError, we still deleted the
# FileUploads rows, and tried to delete each corresponding path on
# the filesystem.
assert FileUpload.objects.count() == 0
assert storage_mock.delete.call_count == 2
assert storage_mock.delete.call_args_list[0][0][0] == fu_older.path
assert storage_mock.delete.call_args_list[1][0][0] == fu_old.path
def test_delete_fake_emails(self, storage_mock):
fe_old = FakeEmail.objects.create(message='This is the oldest fake email')
fe_old.update(created=self.days_ago(360))
fe_new = FakeEmail.objects.create(message='This is the newest fake email')
fe_new.update(created=self.days_ago(45))
gc()
# FakeEmail which are older than 90 were deleted.
assert FakeEmail.objects.count() == 1
assert FakeEmail.objects.filter(pk=fe_new.pk).count() == 1
def test_scanner_results_deletion(self, storage_mock):
user = user_factory()
old_upload = FileUpload.objects.create(
path='/tmp/old',
name='old',
user=user,
source=amo.UPLOAD_SOURCE_DEVHUB,
ip_address='127.0.0.8',
)
old_upload.update(created=self.days_ago(16))
new_upload = FileUpload.objects.create(
path='/tmp/new',
name='new',
user=user,
source=amo.UPLOAD_SOURCE_DEVHUB,
ip_address='127.0.0.8',
)
new_upload.update(created=self.days_ago(6))
version = version_factory(addon=addon_factory())
# upload = None, version = None --> DELETED
ScannerResult.objects.create(scanner=YARA)
# upload will become None because it is bound to an old upload, version
# = None --> DELETED
ScannerResult.objects.create(scanner=YARA, upload=old_upload)
# upload is not None, version = None --> KEPT
ScannerResult.objects.create(scanner=YARA, upload=new_upload)
# upload = None, version is not None --> KEPT
ScannerResult.objects.create(scanner=YARA, version=version)
# upload is not None, version is not None --> KEPT
ScannerResult.objects.create(scanner=YARA, upload=new_upload, version=version)
assert ScannerResult.objects.count() == 5
gc()
assert ScannerResult.objects.count() == 3
assert storage_mock.delete.call_count == 1
def test_stale_addons_deletion(self, storage_mock):
in_the_past = self.days_ago(16)
to_delete = [
Addon.objects.create(),
Addon.objects.create(status=amo.STATUS_NULL),
# Shouldn't be possible to have a public add-on with no versions,
# but just in case it should still work.
Addon.objects.create(status=amo.STATUS_APPROVED),
Addon.objects.create(status=amo.STATUS_DELETED),
]
for addon in to_delete:
addon.update(created=in_the_past)
to_keep = [
Addon.objects.create(),
Addon.objects.create(status=amo.STATUS_NULL),
addon_factory(created=in_the_past, version_kw={'deleted': True}),
addon_factory(created=in_the_past, status=amo.STATUS_NULL),
addon_factory(created=in_the_past, status=amo.STATUS_DELETED),
]
gc()
for addon in to_delete:
assert not Addon.unfiltered.filter(pk=addon.pk).exists()
for addon in to_keep:
assert Addon.unfiltered.filter(pk=addon.pk).exists()
# Make sure no email was sent.
assert len(mail.outbox) == 0
class TestWriteSitemaps(TestCase):
def setUp(self):
addon_factory()
TestCase.make_addon_promoted(
addon_factory(version_kw={'application': amo.ANDROID.id}),
RECOMMENDED,
approve_version=True,
)
assert len(os.listdir(settings.SITEMAP_STORAGE_PATH)) == 0
def test_basic(self):
sitemaps_dir = settings.SITEMAP_STORAGE_PATH
write_sitemaps()
sitemaps = get_sitemaps()
# Root should contain all sections dirs + index.
assert (
len(os.listdir(sitemaps_dir)) == len(set(item[0] for item in sitemaps)) + 1
)
with open(os.path.join(sitemaps_dir, 'sitemap.xml')) as sitemap:
contents = sitemap.read()
entry = (
'<sitemap><loc>http://testserver/sitemap.xml?{params}</loc></sitemap>'
)
for (section, app), sitemap in sitemaps.items():
if not app:
assert entry.format(params=f'section={section}') in contents
else:
assert (
entry.format(
params=f'section={section}&app_name={app.short}'
)
in contents
)
assert (
'<sitemap><loc>http://testserver/blog/sitemap.xml</loc></sitemap>'
in contents
)
with open(os.path.join(sitemaps_dir, 'amo/sitemap.xml')) as sitemap:
contents = sitemap.read()
assert '<url><loc>http://testserver/en-US/about</loc>' in contents
with open(os.path.join(sitemaps_dir, 'addons/firefox/1/01/1.xml')) as sitemap:
contents = sitemap.read()
assert '<url><loc>http://testserver/en-US/firefox/' in contents
with open(os.path.join(sitemaps_dir, 'addons/android/1/01/1.xml')) as sitemap:
contents = sitemap.read()
assert '<url><loc>http://testserver/en-US/android/' in contents
xml_path = os.path.join(sitemaps_dir, 'collections/firefox/1/01/1.xml')
with open(xml_path) as sitemap:
contents = sitemap.read()
assert (
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" '
'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n\n</urlset>' in contents
)
def test_with_args_sections(self):
sitemaps_dir = settings.SITEMAP_STORAGE_PATH
write_sitemaps(section='index')
assert len(os.listdir(sitemaps_dir)) == 1
assert os.path.exists(os.path.join(sitemaps_dir, 'sitemap.xml'))
os.remove(os.path.join(sitemaps_dir, 'sitemap.xml'))
write_sitemaps(section='amo')
assert len(os.listdir(sitemaps_dir)) == 1
assert os.path.exists(os.path.join(sitemaps_dir, 'amo/sitemap.xml'))
os.remove(os.path.join(sitemaps_dir, 'amo/sitemap.xml'))
write_sitemaps(section='addons')
assert len(os.listdir(sitemaps_dir)) == 2
assert os.path.exists(os.path.join(sitemaps_dir, 'addons/firefox/1/01/1.xml'))
assert os.path.exists(os.path.join(sitemaps_dir, 'addons/android/1/01/1.xml'))
def test_with_args_app_name(self):
sitemaps_dir = settings.SITEMAP_STORAGE_PATH
# typically app_name would be used in combination with a section
write_sitemaps(section='addons', app_name='firefox')
assert len(os.listdir(sitemaps_dir)) == 1
assert os.path.exists(os.path.join(sitemaps_dir, 'addons/firefox/1/01/1.xml'))
os.remove(os.path.join(sitemaps_dir, 'addons/firefox/1/01/1.xml'))
# but it does work on its own, to generate all relevant sitemaps
write_sitemaps(app_name='android')
assert len(os.listdir(sitemaps_dir)) == 3
assert os.path.exists(os.path.join(sitemaps_dir, 'addons/android/1/01/1.xml'))
assert os.path.exists(os.path.join(sitemaps_dir, 'users/android/1/01/1.xml'))
assert os.path.exists(os.path.join(sitemaps_dir, 'tags/android/1/01/1.xml'))
def test_gen_cron():
args = [
'scripts/crontab/gen-cron.py',
'-z ./',
'-u root',
]
output = subprocess.check_output(args)
assert b'MAILTO=amo-crons@mozilla.com' in output
# check some known jobs are rendered as expected in the output
prefix = b'root cd ./; /usr/bin/python -W ignore::DeprecationWarning manage.py'
assert (b'*/5 * * * * %s auto_approve' % prefix) in output
assert (b'10 * * * * %s cron update_blog_posts' % prefix) in output
|
mozilla/addons-server
|
src/olympia/amo/tests/test_cron.py
|
Python
|
bsd-3-clause
| 10,869
|
class Artifact(object):
def __init__(self, parts, format_name, theme=None, basename=None):
self._parts = parts
self._theme = theme
self._format_name = format_name
self._basename = basename
@property
def parts(self):
return self._parts
@property
def format_name(self):
return self._format_name
@property
def theme(self):
return self._theme
@property
def basename(self):
if len(self.parts) == 1:
return self.parts[0]
else:
if self._basename:
return self._basename
else:
raise Exception("Basename must be specified for multipart artifact")
def __repr__(self):
return "artifact: {0} {1} {2}".format(self._parts, self._theme, self._format_name)
|
venicegeo/eventkit-cloud
|
eventkit_cloud/utils/artifact.py
|
Python
|
bsd-3-clause
| 833
|
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import pytest
import numpy as np
from pandas import Interval
from pandas.core.arrays import IntervalArray
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.tests.extension import base
import pandas.util.testing as tm
def make_data():
N = 100
left = np.random.uniform(size=N).cumsum()
right = left + np.random.uniform(size=N)
return [Interval(l, r) for l, r in zip(left, right)]
@pytest.fixture
def dtype():
return IntervalDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
return IntervalArray(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return IntervalArray.from_tuples([None, (0, 1)])
@pytest.fixture
def data_for_sorting():
return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)])
@pytest.fixture
def data_missing_for_sorting():
return IntervalArray.from_tuples([(1, 2), None, (0, 1)])
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
a = (0, 1)
b = (1, 2)
c = (2, 3)
return IntervalArray.from_tuples([b, b, None, None, a, a, b, c])
class BaseInterval(object):
pass
class TestDtype(BaseInterval, base.BaseDtypeTests):
pass
class TestCasting(BaseInterval, base.BaseCastingTests):
pass
class TestConstructors(BaseInterval, base.BaseConstructorsTests):
pass
class TestGetitem(BaseInterval, base.BaseGetitemTests):
pass
class TestGrouping(BaseInterval, base.BaseGroupbyTests):
pass
class TestInterface(BaseInterval, base.BaseInterfaceTests):
pass
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseInterval, base.BaseMethodsTests):
@pytest.mark.skip(reason='addition is not defined for intervals')
def test_combine_add(self, data_repeated):
pass
class TestMissing(BaseInterval, base.BaseMissingTests):
# Index.fillna only accepts scalar `value`, so we have to skip all
# non-scalar fill tests.
unsupported_fill = pytest.mark.skip("Unsupported fillna option.")
@unsupported_fill
def test_fillna_limit_pad(self):
pass
@unsupported_fill
def test_fillna_series_method(self):
pass
@unsupported_fill
def test_fillna_limit_backfill(self):
pass
@unsupported_fill
def test_fillna_series(self):
pass
def test_non_scalar_raises(self, data_missing):
msg = "Got a 'list' instead."
with tm.assert_raises_regex(TypeError, msg):
data_missing.fillna([1, 1])
class TestReshaping(BaseInterval, base.BaseReshapingTests):
pass
class TestSetitem(BaseInterval, base.BaseSetitemTests):
pass
|
cython-testbed/pandas
|
pandas/tests/extension/test_interval.py
|
Python
|
bsd-3-clause
| 3,315
|