repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
willu47/SALib
|
refs/heads/master
|
src/SALib/analyze/__init__.py
|
12133432
| |
hogarthj/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/purestorage/__init__.py
|
12133432
| |
rohitwaghchaure/New_Theme_Erp
|
refs/heads/develop
|
erpnext/patches/v4_2/default_website_style.py
|
9
|
import frappe
from frappe.templates.pages.style_settings import default_properties
def execute():
style_settings = frappe.get_doc("Style Settings", "Style Settings")
if not style_settings.apply_style:
style_settings.update(default_properties)
style_settings.apply_style = 1
style_settings.save()
|
entomb/CouchPotatoServer
|
refs/heads/master
|
libs/migrate/versioning/util/importpath.py
|
74
|
import os
import sys
def import_path(fullpath):
""" Import a file with full path specification. Allows one to
import from anywhere, something __import__ does not do.
"""
# http://zephyrfalcon.org/weblog/arch_d7_2002_08_31.html
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date during tests
del sys.path[-1]
return module
|
bcoca/ansible-modules-extras
|
refs/heads/devel
|
cloud/azure/__init__.py
|
12133432
| |
Edraak/edraak-platform
|
refs/heads/master
|
common/test/acceptance/tests/studio/__init__.py
|
12133432
| |
rahulunair/nova
|
refs/heads/master
|
nova/db/sqlalchemy/__init__.py
|
12133432
| |
ee-in/python-api
|
refs/heads/master
|
plotly/offline/__init__.py
|
1
|
"""
offline
======
This module provides offline functionality.
"""
from . offline import (
download_plotlyjs,
init_notebook_mode,
iplot
)
|
Passtechsoft/TPEAlpGen
|
refs/heads/master
|
blender/release/scripts/addons/rigify/rigs/basic/copy_chain.py
|
6
|
#====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
# <pep8 compliant>
import bpy
from ...utils import MetarigError
from ...utils import copy_bone
from ...utils import connected_children_names
from ...utils import strip_org, make_deformer_name
from ...utils import create_bone_widget
class Rig:
""" A "copy_chain" rig. All it does is duplicate the original bone chain
and constrain it.
This is a control and deformation rig.
"""
def __init__(self, obj, bone_name, params):
""" Gather and validate data about the rig.
"""
self.obj = obj
self.org_bones = [bone_name] + connected_children_names(obj, bone_name)
self.params = params
self.make_controls = params.make_controls
self.make_deforms = params.make_deforms
if len(self.org_bones) <= 1:
raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 2 or more bones" % (strip_org(bone_name)))
def generate(self):
""" Generate the rig.
Do NOT modify any of the original bones, except for adding constraints.
The main armature should be selected and active before this is called.
"""
bpy.ops.object.mode_set(mode='EDIT')
# Create the deformation and control bone chains.
# Just copies of the original chain.
def_chain = []
ctrl_chain = []
for i in range(len(self.org_bones)):
name = self.org_bones[i]
# Control bone
if self.make_controls:
# Copy
ctrl_bone = copy_bone(self.obj, name)
eb = self.obj.data.edit_bones
ctrl_bone_e = eb[ctrl_bone]
# Name
ctrl_bone_e.name = strip_org(name)
# Parenting
if i == 0:
# First bone
ctrl_bone_e.parent = eb[self.org_bones[0]].parent
else:
# The rest
ctrl_bone_e.parent = eb[ctrl_chain[-1]]
# Add to list
ctrl_chain += [ctrl_bone_e.name]
else:
ctrl_chain += [None]
# Deformation bone
if self.make_deforms:
# Copy
def_bone = copy_bone(self.obj, name)
eb = self.obj.data.edit_bones
def_bone_e = eb[def_bone]
# Name
def_bone_e.name = make_deformer_name(strip_org(name))
# Parenting
if i == 0:
# First bone
def_bone_e.parent = eb[self.org_bones[0]].parent
else:
# The rest
def_bone_e.parent = eb[def_chain[-1]]
# Add to list
def_chain += [def_bone_e.name]
else:
def_chain += [None]
bpy.ops.object.mode_set(mode='OBJECT')
pb = self.obj.pose.bones
# Constraints for org and def
for org, ctrl, defrm in zip(self.org_bones, ctrl_chain, def_chain):
if self.make_controls:
con = pb[org].constraints.new('COPY_TRANSFORMS')
con.name = "copy_transforms"
con.target = self.obj
con.subtarget = ctrl
if self.make_deforms:
con = pb[defrm].constraints.new('COPY_TRANSFORMS')
con.name = "copy_transforms"
con.target = self.obj
con.subtarget = org
# Create control widgets
if self.make_controls:
for bone in ctrl_chain:
create_bone_widget(self.obj, bone)
def add_parameters(params):
""" Add the parameters of this rig type to the
RigifyParameters PropertyGroup
"""
params.make_controls = bpy.props.BoolProperty(name="Controls", default=True, description="Create control bones for the copy")
params.make_deforms = bpy.props.BoolProperty(name="Deform", default=True, description="Create deform bones for the copy")
def parameters_ui(layout, params):
""" Create the ui for the rig parameters.
"""
r = layout.row()
r.prop(params, "make_controls")
r = layout.row()
r.prop(params, "make_deforms")
def create_sample(obj):
""" Create a sample metarig for this rig type.
"""
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
bone = arm.edit_bones.new('bone.01')
bone.head[:] = 0.0000, 0.0000, 0.0000
bone.tail[:] = 0.0000, 0.0000, 0.3333
bone.roll = 0.0000
bone.use_connect = False
bones['bone.01'] = bone.name
bone = arm.edit_bones.new('bone.02')
bone.head[:] = 0.0000, 0.0000, 0.3333
bone.tail[:] = 0.0000, 0.0000, 0.6667
bone.roll = 3.1416
bone.use_connect = True
bone.parent = arm.edit_bones[bones['bone.01']]
bones['bone.02'] = bone.name
bone = arm.edit_bones.new('bone.03')
bone.head[:] = 0.0000, 0.0000, 0.6667
bone.tail[:] = 0.0000, 0.0000, 1.0000
bone.roll = 3.1416
bone.use_connect = True
bone.parent = arm.edit_bones[bones['bone.02']]
bones['bone.03'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['bone.01']]
pbone.rigify_type = 'basic.copy_chain'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['bone.02']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['bone.03']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
|
glennq/zhihu-py3
|
refs/heads/master
|
docs/conf.py
|
15
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# zhihu-py3 documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 22 23:01:19 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'zhihu-py3'
copyright = '2015, 7sDream'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'zhihu-py3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'zhihu-py3.tex', 'zhihu-py3 Documentation',
'7sDream', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zhihu-py3', 'zhihu-py3 Documentation',
['7sDream'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'zhihu-py3', 'zhihu-py3 Documentation',
'7sDream', 'zhihu-py3', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
divio/djangocms-rest-api
|
refs/heads/master
|
djangocms_rest_api/serializers/mapping.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.conf import settings
plugin_serializer_mapping = {}
if 'cmsplugin_filer_image' in settings.INSTALLED_APPS:
from cmsplugin_filer_image.cms_plugins import FilerImagePlugin
from djangocms_rest_api.serializers.filerimageplugin_serializer import FilerImagePluginSerializer
plugin_serializer_mapping[FilerImagePlugin.__name__] = FilerImagePluginSerializer
|
hazelnusse/sympy-old
|
refs/heads/master
|
sympy/series/tests/test_lseries.py
|
7
|
from sympy import sin, cos, exp, E
from sympy.abc import x
def test_sin():
e = sin(x).lseries(x, 0)
assert e.next() == x
assert e.next() == -x**3/6
assert e.next() == x**5/120
def test_cos():
e = cos(x).lseries(x, 0)
assert e.next() == 1
assert e.next() == -x**2/2
assert e.next() == x**4/24
def test_exp():
e = exp(x).lseries(x, 0)
assert e.next() == 1
assert e.next() == x
assert e.next() == x**2/2
assert e.next() == x**3/6
def test_exp2():
e = exp(cos(x)).lseries(x, 0)
assert e.next() == E
assert e.next() == -E*x**2/2
assert e.next() == E*x**4/6
assert e.next() == -31*E*x**6/720
|
cloudbase/neutron-virtualbox
|
refs/heads/virtualbox_agent
|
neutron/tests/functional/api/test_v2_plugin.py
|
5
|
# Copyright 2014, Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module implements BaseNeutronClient for the programmatic plugin
api and configures the api tests with scenarios targeting individual
plugins.
"""
import testscenarios
from neutron.common import exceptions as q_exc
from neutron import context
from neutron import manager
from neutron.tests.api import base_v2
from neutron.tests.unit.ml2 import test_ml2_plugin
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
# Each plugin must add a class to plugin_configurations that can configure the
# plugin for use with PluginClient. For a given plugin, the setup
# used for NeutronDbPluginV2TestCase can usually be reused. See the
# configuration classes listed below for examples of this reuse.
#TODO(marun) Discover plugin conf via a metaclass
plugin_configurations = [
test_ml2_plugin.Ml2PluginConf,
]
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
class PluginClient(base_v2.BaseNeutronClient):
@property
def ctx(self):
if not hasattr(self, '_ctx'):
self._ctx = context.Context('', 'test-tenant')
return self._ctx
@property
def plugin(self):
return manager.NeutronManager.get_plugin()
@property
def NotFound(self):
return q_exc.NetworkNotFound
def create_network(self, **kwargs):
# Supply defaults that are expected to be set by the api
# framwork
kwargs.setdefault('admin_state_up', True)
kwargs.setdefault('shared', False)
data = dict(network=kwargs)
result = self.plugin.create_network(self.ctx, data)
return base_v2.AttributeDict(result)
def update_network(self, id_, **kwargs):
data = dict(network=kwargs)
result = self.plugin.update_network(self.ctx, id_, data)
return base_v2.AttributeDict(result)
def get_network(self, *args, **kwargs):
result = self.plugin.get_network(self.ctx, *args, **kwargs)
return base_v2.AttributeDict(result)
def get_networks(self, *args, **kwargs):
result = self.plugin.get_networks(self.ctx, *args, **kwargs)
return [base_v2.AttributeDict(x) for x in result]
def delete_network(self, id_):
self.plugin.delete_network(self.ctx, id_)
def get_scenarios():
scenarios = []
client = PluginClient()
for conf in plugin_configurations:
name = conf.plugin_name
class_name = name[name.rfind('.') + 1:]
scenarios.append((class_name, {'client': client, 'plugin_conf': conf}))
return scenarios
class TestPluginApi(base_v2.BaseTestApi,
testlib_api.SqlTestCase,
testlib_plugin.PluginSetupHelper):
scenarios = get_scenarios()
def setUp(self):
# BaseTestApi is not based on BaseTestCase to avoid import
# errors when importing Tempest. When targeting the plugin
# api, it is necessary to avoid calling BaseTestApi's parent
# setUp, since that setup will be called by SqlTestCase.setUp.
super(TestPluginApi, self).setUp(setup_parent=False)
testlib_api.SqlTestCase.setUp(self)
self.setup_coreplugin(self.plugin_conf.plugin_name)
self.plugin_conf.setUp(self)
|
OSSESAC/odoopubarquiluz
|
refs/heads/7.0
|
openerp/addons/base/res/ir_property.py
|
63
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv,fields
from openerp.tools.misc import attrgetter
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
class ir_property(osv.osv):
_name = 'ir.property'
def _models_field_get(self, cr, uid, field_key, field_value, context=None):
get = attrgetter(field_key, field_value)
obj = self.pool.get('ir.model.fields')
ids = obj.search(cr, uid, [('view_load','=',1)], context=context)
res = set()
for o in obj.browse(cr, uid, ids, context=context):
res.add(get(o))
return list(res)
def _models_get(self, cr, uid, context=None):
return self._models_field_get(cr, uid, 'model', 'model_id.name', context)
def _models_get2(self, cr, uid, context=None):
return self._models_field_get(cr, uid, 'relation', 'relation', context)
_columns = {
'name': fields.char('Name', size=128, select=1),
'res_id': fields.reference('Resource', selection=_models_get, size=128,
help="If not set, acts as a default value for new resources", select=1),
'company_id': fields.many2one('res.company', 'Company', select=1),
'fields_id': fields.many2one('ir.model.fields', 'Field', ondelete='cascade', required=True, select=1),
'value_float' : fields.float('Value'),
'value_integer' : fields.integer('Value'),
'value_text' : fields.text('Value'), # will contain (char, text)
'value_binary' : fields.binary('Value'),
'value_reference': fields.reference('Value', selection=_models_get2, size=128),
'value_datetime' : fields.datetime('Value'),
'type' : fields.selection([('char', 'Char'),
('float', 'Float'),
('boolean', 'Boolean'),
('integer', 'Integer'),
('text', 'Text'),
('binary', 'Binary'),
('many2one', 'Many2One'),
('date', 'Date'),
('datetime', 'DateTime'),
],
'Type',
required=True,
select=1),
}
_defaults = {
'type': 'many2one',
}
def _update_values(self, cr, uid, ids, values):
value = values.pop('value', None)
if not value:
return values
prop = None
type_ = values.get('type')
if not type_:
if ids:
prop = self.browse(cr, uid, ids[0])
type_ = prop.type
else:
type_ = self._defaults['type']
type2field = {
'char': 'value_text',
'float': 'value_float',
'boolean' : 'value_integer',
'integer': 'value_integer',
'text': 'value_text',
'binary': 'value_binary',
'many2one': 'value_reference',
'date' : 'value_datetime',
'datetime' : 'value_datetime',
}
field = type2field.get(type_)
if not field:
raise osv.except_osv('Error', 'Invalid type')
if field == 'value_reference':
if isinstance(value, osv.orm.browse_record):
value = '%s,%d' % (value._name, value.id)
elif isinstance(value, (int, long)):
field_id = values.get('fields_id')
if not field_id:
if not prop:
raise ValueError()
field_id = prop.fields_id
else:
field_id = self.pool.get('ir.model.fields').browse(cr, uid, field_id)
value = '%s,%d' % (field_id.relation, value)
values[field] = value
return values
def write(self, cr, uid, ids, values, context=None):
return super(ir_property, self).write(cr, uid, ids, self._update_values(cr, uid, ids, values), context=context)
def create(self, cr, uid, values, context=None):
return super(ir_property, self).create(cr, uid, self._update_values(cr, uid, None, values), context=context)
def get_by_record(self, cr, uid, record, context=None):
if record.type in ('char', 'text'):
return record.value_text
elif record.type == 'float':
return record.value_float
elif record.type == 'boolean':
return bool(record.value_integer)
elif record.type == 'integer':
return record.value_integer
elif record.type == 'binary':
return record.value_binary
elif record.type == 'many2one':
return record.value_reference
elif record.type == 'datetime':
return record.value_datetime
elif record.type == 'date':
if not record.value_datetime:
return False
return time.strftime('%Y-%m-%d', time.strptime(record.value_datetime, '%Y-%m-%d %H:%M:%S'))
return False
def get(self, cr, uid, name, model, res_id=False, context=None):
domain = self._get_domain(cr, uid, name, model, context=context)
if domain is not None:
domain = [('res_id', '=', res_id)] + domain
nid = self.search(cr, uid, domain, context=context)
if not nid: return False
record = self.browse(cr, uid, nid[0], context=context)
return self.get_by_record(cr, uid, record, context=context)
return False
def _get_domain_default(self, cr, uid, prop_name, model, context=None):
domain = self._get_domain(cr, uid, prop_name, model, context=context)
if domain is None:
return None
return ['&', ('res_id', '=', False)] + domain
def _get_domain(self, cr, uid, prop_name, model, context=None):
context = context or {}
cr.execute('select id from ir_model_fields where name=%s and model=%s', (prop_name, model))
res = cr.fetchone()
if not res:
return None
if 'force_company' in context and context['force_company']:
cid = context['force_company']
else:
company = self.pool.get('res.company')
cid = company._company_default_get(cr, uid, model, res[0], context=context)
domain = ['&', ('fields_id', '=', res[0]),
'|', ('company_id', '=', cid), ('company_id', '=', False)]
return domain
ir_property()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rbalda/neural_ocr
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/core/management/commands/createcachetable.py
|
342
|
from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS, connections, models, router, transaction,
)
from django.db.utils import DatabaseError
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='table_name', nargs='*',
help='Optional table names. Otherwise, settings.CACHES is used to '
'find cache tables.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database onto which the cache tables will be '
'installed. Defaults to the "default" database.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
help='Does not create the table, just prints the SQL that would '
'be run.')
def handle(self, *tablenames, **options):
db = options.get('database')
self.verbosity = int(options.get('verbosity'))
dry_run = options.get('dry_run')
if len(tablenames):
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename, dry_run)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table, dry_run)
def create_table(self, database, tablename, dry_run):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type(connection=connection)]
field_output.append("%sNULL" % ("NOT " if not f.null else ""))
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append("CREATE %sINDEX %s ON %s (%s);" %
(unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
qn(f.name)))
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(');')
full_statement = "\n".join(full_statement)
if dry_run:
self.stdout.write(full_statement)
for statement in index_output:
self.stdout.write(statement)
return
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as curs:
try:
curs.execute(full_statement)
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s." %
(tablename, force_text(e)))
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
|
lchqfnu/mongo-connector
|
refs/heads/master
|
tests/test_rollbacks.py
|
14
|
"""Test Mongo Connector's behavior when its source MongoDB system is
experiencing a rollback.
"""
import os
import sys
import time
from pymongo.read_preferences import ReadPreference
from pymongo import MongoClient
sys.path[0:0] = [""]
from mongo_connector.util import retry_until_ok
from mongo_connector.locking_dict import LockingDict
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.oplog_manager import OplogThread
from tests import unittest, STRESS_COUNT
from tests.util import assert_soon
from tests.setup_cluster import ReplicaSet
class TestRollbacks(unittest.TestCase):
def tearDown(self):
self.repl_set.stop()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Start a replica set
self.repl_set = ReplicaSet().start()
# Connection to the replica set as a whole
self.main_conn = self.repl_set.client()
# Connection to the primary specifically
self.primary_conn = self.repl_set.primary.client()
# Connection to the secondary specifically
self.secondary_conn = self.repl_set.secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
# Wipe any test data
self.main_conn["test"]["mc"].drop()
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_client=self.main_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mc"]
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 2,
"not all writes were replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.main_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(lambda: len(doc_manager._search()) == 1,
'documents never rolled back in doc manager.')
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
def test_many_targets(self):
"""Test with several replication targets"""
# OplogThread has multiple doc managers
doc_managers = [DocManager(), DocManager(), DocManager()]
self.opman.doc_managers = doc_managers
self.opman.start()
# Insert a document into each namespace
self.main_conn["test"]["mc"].insert({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary")
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary.admin.command("isMaster")['ismaster'],
'secondary was never promoted')
# Insert more documents. This will be rolled back later
# Some of these documents will be manually removed from
# certain doc managers, to emulate the effect of certain
# target systems being ahead/behind others
secondary_ids = []
for i in range(1, 10):
secondary_ids.append(
retry_until_ok(self.main_conn["test"]["mc"].insert,
{"i": i}))
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10)
# Wait for replication to the doc managers
def docmans_done():
for dm in self.opman.doc_managers:
if len(dm._search()) != 10:
return False
return True
assert_soon(docmans_done,
"not all writes were replicated to doc managers")
# Remove some documents from the doc managers to simulate
# uneven replication
ts = self.opman.doc_managers[0].get_last_doc()['_ts']
for id in secondary_ids[8:]:
self.opman.doc_managers[1].remove(id, 'test.mc', ts)
for id in secondary_ids[2:]:
self.opman.doc_managers[2].remove(id, 'test.mc', ts)
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")['ismaster'],
'restarted primary never resumed primary status')
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(secondary.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
assert_soon(lambda:
retry_until_ok(self.primary_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted")
# Only first document should exist in MongoDB
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0)
# Give OplogThread some time to catch up
time.sleep(10)
# Same case should hold for the doc managers
for dm in self.opman.doc_managers:
self.assertEqual(len(dm._search()), 1)
self.assertEqual(dm._search()[0]["i"], 0)
self.opman.join()
def test_deletions(self):
"""Test rolling back 'd' operations"""
self.opman.start()
# Insert a document, wait till it replicates to secondary
self.main_conn["test"]["mc"].insert({"i": 0})
self.main_conn["test"]["mc"].insert({"i": 1})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2)
assert_soon(lambda: self.secondary_conn["test"]["mc"].count() == 2,
"first write didn't replicate to secondary")
# Kill the primary, wait for secondary to be promoted
self.repl_set.primary.stop(destroy=False)
assert_soon(lambda: self.secondary_conn["admin"]
.command("isMaster")["ismaster"])
# Delete first document
retry_until_ok(self.main_conn["test"]["mc"].remove, {"i": 0})
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1)
# Wait for replication to doc manager
assert_soon(lambda: len(self.opman.doc_managers[0]._search()) == 1,
"delete was not replicated to doc manager")
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status")
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(self.secondary_conn.admin.command,
'replSetGetStatus')['myState'] == 2,
"restarted secondary never resumed secondary status")
# Both documents should exist in mongo
assert_soon(lambda: retry_until_ok(
self.main_conn["test"]["mc"].count) == 2)
# Both document should exist in doc manager
doc_manager = self.opman.doc_managers[0]
docs = list(doc_manager._search())
self.assertEqual(len(docs), 2,
"Expected two documents, but got %r" % docs)
self.opman.join()
def test_stressed_rollback(self):
"""Stress test for a rollback with many documents."""
self.opman.start()
c = self.main_conn.test.mc
docman = self.opman.doc_managers[0]
c.insert({'i': i} for i in range(STRESS_COUNT))
assert_soon(lambda: c.count() == STRESS_COUNT)
condition = lambda: len(docman._search()) == STRESS_COUNT
assert_soon(condition, ("Was expecting %d documents in DocManager, "
"but %d found instead."
% (STRESS_COUNT, len(docman._search()))))
primary_conn = self.repl_set.primary.client()
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin = new_primary_conn.admin
assert_soon(
lambda: retry_until_ok(admin.command, "isMaster")['ismaster'])
retry_until_ok(c.insert,
[{'i': str(STRESS_COUNT + i)}
for i in range(STRESS_COUNT)])
assert_soon(lambda: len(docman._search()) == c.count())
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
admin = primary_conn.admin
assert_soon(
lambda: retry_until_ok(admin.command, "isMaster")['ismaster'])
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(c.count) == STRESS_COUNT)
assert_soon(condition, ("Was expecting %d documents in DocManager, "
"but %d found instead."
% (STRESS_COUNT, len(docman._search()))))
self.opman.join()
|
VisionSystemsInc/voxel_globe
|
refs/heads/master
|
voxel_globe/voxel_viewer/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
alangwansui/mtl_ordercenter
|
refs/heads/master
|
openerp/addons/l10n_be/wizard/__init__.py
|
438
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_be_partner_vat_listing
import l10n_be_vat_intra
import l10n_be_account_vat_declaration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nhenezi/kuma
|
refs/heads/master
|
vendor/packages/translate-toolkit/translate/convert/po2html.py
|
6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""convert Gettext PO localization files to HTML files
see: http://translate.sourceforge.net/wiki/toolkit/po2html for examples and
usage instructions
"""
from translate.storage import po
try:
import textwrap
except:
textwrap = None
try:
import tidy
except:
tidy = None
class po2html:
"""po2html can take a po file and generate html. best to give it a template file otherwise will just concat msgstrs"""
def __init__(self, wrap=None, usetidy=None):
self.wrap = wrap
self.tidy = tidy and usetidy
def wrapmessage(self, message):
"""rewraps text as required"""
if self.wrap is None:
return message
return "\n".join([textwrap.fill(line, self.wrap, replace_whitespace=False) for line in message.split("\n")])
def convertstore(self, inputstore, includefuzzy):
"""converts a file to .po format"""
htmlresult = ""
for inputunit in inputstore.units:
if inputunit.isheader():
continue
if includefuzzy or not inputunit.isfuzzy():
htmlresult += self.wrapmessage(inputunit.target) + "\n" + "\n"
else:
htmlresult += self.wrapmessage(inputunit.source) + "\n" + "\n"
return htmlresult.encode('utf-8')
def mergestore(self, inputstore, templatetext, includefuzzy):
"""converts a file to .po format"""
htmlresult = templatetext.replace("\n", " ")
if isinstance(htmlresult, str):
#TODO: get the correct encoding
htmlresult = htmlresult.decode('utf-8')
# TODO: use the algorithm from html2po to get blocks and translate them individually
# rather than using replace
for inputunit in inputstore.units:
if inputunit.isheader():
continue
msgid = inputunit.source
msgstr = None
if includefuzzy or not inputunit.isfuzzy():
msgstr = self.wrapmessage(inputunit.target)
else:
msgstr = self.wrapmessage(inputunit.source)
if msgstr.strip():
# TODO: "msgid" is already html-encoded ("&" -> "&"), while
# "msgstr" is not encoded -> thus the replace fails
# see test_po2html.py in line 67
htmlresult = htmlresult.replace(msgid, msgstr, 1)
htmlresult = htmlresult.encode('utf-8')
if self.tidy:
htmlresult = str(tidy.parseString(htmlresult))
return htmlresult
def converthtml(inputfile, outputfile, templatefile, wrap=None, includefuzzy=False, usetidy=True):
"""reads in stdin using fromfileclass, converts using convertorclass, writes to stdout"""
inputstore = po.pofile(inputfile)
convertor = po2html(wrap=wrap, usetidy=usetidy)
if templatefile is None:
outputstring = convertor.convertstore(inputstore, includefuzzy)
else:
templatestring = templatefile.read()
outputstring = convertor.mergestore(inputstore, templatestring, includefuzzy)
outputfilepos = outputfile.tell()
outputfile.write(outputstring)
return 1
def main(argv=None):
from translate.convert import convert
from translate.misc import stdiotell
import sys
sys.stdout = stdiotell.StdIOWrapper(sys.stdout)
formats = {("po", "htm"):("htm", converthtml), ("po", "html"):("html", converthtml), ("po", "xhtml"):("xhtml", converthtml), ("po"):("html", converthtml)}
parser = convert.ConvertOptionParser(formats, usetemplates=True, description=__doc__)
if textwrap is not None:
parser.add_option("-w", "--wrap", dest="wrap", default=None, type="int",
help="set number of columns to wrap html at", metavar="WRAP")
parser.passthrough.append("wrap")
if tidy is not None:
parser.add_option("", "--notidy", dest="usetidy", default=True,
help="disables the use of HTML tidy", action="store_false")
parser.passthrough.append("usetidy")
parser.add_fuzzy_option()
parser.run(argv)
if __name__ == '__main__':
main()
|
dparlevliet/zelenka-report-storage
|
refs/heads/master
|
server-db/twisted/manhole/ui/gtk2manhole.py
|
37
|
# -*- test-case-name: twisted.manhole.ui.test.test_gtk2manhole -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Manhole client with a GTK v2.x front-end.
"""
__version__ = '$Revision: 1.9 $'[11:-2]
from twisted import copyright
from twisted.internet import reactor
from twisted.python import components, failure, log, util
from twisted.python.reflect import prefixedMethodNames
from twisted.spread import pb
from twisted.spread.ui import gtk2util
from twisted.manhole.service import IManholeClient
from zope.interface import implements
# The pygtk.require for version 2.0 has already been done by the reactor.
import gtk
import code, types, inspect
# TODO:
# Make wrap-mode a run-time option.
# Explorer.
# Code doesn't cleanly handle opening a second connection. Fix that.
# Make some acknowledgement of when a command has completed, even if
# it has no return value so it doesn't print anything to the console.
class OfflineError(Exception):
pass
class ManholeWindow(components.Componentized, gtk2util.GladeKeeper):
gladefile = util.sibpath(__file__, "gtk2manhole.glade")
_widgets = ('input','output','manholeWindow')
def __init__(self):
self.defaults = {}
gtk2util.GladeKeeper.__init__(self)
components.Componentized.__init__(self)
self.input = ConsoleInput(self._input)
self.input.toplevel = self
self.output = ConsoleOutput(self._output)
# Ugh. GladeKeeper actually isn't so good for composite objects.
# I want this connected to the ConsoleInput's handler, not something
# on this class.
self._input.connect("key_press_event", self.input._on_key_press_event)
def setDefaults(self, defaults):
self.defaults = defaults
def login(self):
client = self.getComponent(IManholeClient)
d = gtk2util.login(client, **self.defaults)
d.addCallback(self._cbLogin)
d.addCallback(client._cbLogin)
d.addErrback(self._ebLogin)
def _cbDisconnected(self, perspective):
self.output.append("%s went away. :(\n" % (perspective,), "local")
self._manholeWindow.set_title("Manhole")
def _cbLogin(self, perspective):
peer = perspective.broker.transport.getPeer()
self.output.append("Connected to %s\n" % (peer,), "local")
perspective.notifyOnDisconnect(self._cbDisconnected)
self._manholeWindow.set_title("Manhole - %s" % (peer))
return perspective
def _ebLogin(self, reason):
self.output.append("Login FAILED %s\n" % (reason.value,), "exception")
def _on_aboutMenuItem_activate(self, widget, *unused):
import sys
from os import path
self.output.append("""\
a Twisted Manhole client
Versions:
%(twistedVer)s
Python %(pythonVer)s on %(platform)s
GTK %(gtkVer)s / PyGTK %(pygtkVer)s
%(module)s %(modVer)s
http://twistedmatrix.com/
""" % {'twistedVer': copyright.longversion,
'pythonVer': sys.version.replace('\n', '\n '),
'platform': sys.platform,
'gtkVer': ".".join(map(str, gtk.gtk_version)),
'pygtkVer': ".".join(map(str, gtk.pygtk_version)),
'module': path.basename(__file__),
'modVer': __version__,
}, "local")
def _on_openMenuItem_activate(self, widget, userdata=None):
self.login()
def _on_manholeWindow_delete_event(self, widget, *unused):
reactor.stop()
def _on_quitMenuItem_activate(self, widget, *unused):
reactor.stop()
def on_reload_self_activate(self, *unused):
from twisted.python import rebuild
rebuild.rebuild(inspect.getmodule(self.__class__))
tagdefs = {
'default': {"family": "monospace"},
# These are message types we get from the server.
'stdout': {"foreground": "black"},
'stderr': {"foreground": "#AA8000"},
'result': {"foreground": "blue"},
'exception': {"foreground": "red"},
# Messages generate locally.
'local': {"foreground": "#008000"},
'log': {"foreground": "#000080"},
'command': {"foreground": "#666666"},
}
# TODO: Factor Python console stuff back out to pywidgets.
class ConsoleOutput:
_willScroll = None
def __init__(self, textView):
self.textView = textView
self.buffer = textView.get_buffer()
# TODO: Make this a singleton tag table.
for name, props in tagdefs.iteritems():
tag = self.buffer.create_tag(name)
# This can be done in the constructor in newer pygtk (post 1.99.14)
for k, v in props.iteritems():
tag.set_property(k, v)
self.buffer.tag_table.lookup("default").set_priority(0)
self._captureLocalLog()
def _captureLocalLog(self):
return log.startLogging(_Notafile(self, "log"), setStdout=False)
def append(self, text, kind=None):
# XXX: It seems weird to have to do this thing with always applying
# a 'default' tag. Can't we change the fundamental look instead?
tags = ["default"]
if kind is not None:
tags.append(kind)
self.buffer.insert_with_tags_by_name(self.buffer.get_end_iter(),
text, *tags)
# Silly things, the TextView needs to update itself before it knows
# where the bottom is.
if self._willScroll is None:
self._willScroll = gtk.idle_add(self._scrollDown)
def _scrollDown(self, *unused):
self.textView.scroll_to_iter(self.buffer.get_end_iter(), 0,
True, 1.0, 1.0)
self._willScroll = None
return False
class History:
def __init__(self, maxhist=10000):
self.ringbuffer = ['']
self.maxhist = maxhist
self.histCursor = 0
def append(self, htext):
self.ringbuffer.insert(-1, htext)
if len(self.ringbuffer) > self.maxhist:
self.ringbuffer.pop(0)
self.histCursor = len(self.ringbuffer) - 1
self.ringbuffer[-1] = ''
def move(self, prevnext=1):
'''
Return next/previous item in the history, stopping at top/bottom.
'''
hcpn = self.histCursor + prevnext
if hcpn >= 0 and hcpn < len(self.ringbuffer):
self.histCursor = hcpn
return self.ringbuffer[hcpn]
else:
return None
def histup(self, textbuffer):
if self.histCursor == len(self.ringbuffer) - 1:
si, ei = textbuffer.get_start_iter(), textbuffer.get_end_iter()
self.ringbuffer[-1] = textbuffer.get_text(si,ei)
newtext = self.move(-1)
if newtext is None:
return
textbuffer.set_text(newtext)
def histdown(self, textbuffer):
newtext = self.move(1)
if newtext is None:
return
textbuffer.set_text(newtext)
class ConsoleInput:
toplevel, rkeymap = None, None
__debug = False
def __init__(self, textView):
self.textView=textView
self.rkeymap = {}
self.history = History()
for name in prefixedMethodNames(self.__class__, "key_"):
keysymName = name.split("_")[-1]
self.rkeymap[getattr(gtk.keysyms, keysymName)] = keysymName
def _on_key_press_event(self, entry, event):
stopSignal = False
ksym = self.rkeymap.get(event.keyval, None)
mods = []
for prefix, mask in [('ctrl', gtk.gdk.CONTROL_MASK), ('shift', gtk.gdk.SHIFT_MASK)]:
if event.state & mask:
mods.append(prefix)
if mods:
ksym = '_'.join(mods + [ksym])
if ksym:
rvalue = getattr(
self, 'key_%s' % ksym, lambda *a, **kw: None)(entry, event)
if self.__debug:
print ksym
return rvalue
def getText(self):
buffer = self.textView.get_buffer()
iter1, iter2 = buffer.get_bounds()
text = buffer.get_text(iter1, iter2, False)
return text
def setText(self, text):
self.textView.get_buffer().set_text(text)
def key_Return(self, entry, event):
text = self.getText()
# Figure out if that Return meant "next line" or "execute."
try:
c = code.compile_command(text)
except SyntaxError, e:
# This could conceivably piss you off if the client's python
# doesn't accept keywords that are known to the manhole's
# python.
point = buffer.get_iter_at_line_offset(e.lineno, e.offset)
buffer.place(point)
# TODO: Componentize!
self.toplevel.output.append(str(e), "exception")
except (OverflowError, ValueError), e:
self.toplevel.output.append(str(e), "exception")
else:
if c is not None:
self.sendMessage()
# Don't insert Return as a newline in the buffer.
self.history.append(text)
self.clear()
# entry.emit_stop_by_name("key_press_event")
return True
else:
# not a complete code block
return False
return False
def key_Up(self, entry, event):
# if I'm at the top, previous history item.
textbuffer = self.textView.get_buffer()
if textbuffer.get_iter_at_mark(textbuffer.get_insert()).get_line() == 0:
self.history.histup(textbuffer)
return True
return False
def key_Down(self, entry, event):
textbuffer = self.textView.get_buffer()
if textbuffer.get_iter_at_mark(textbuffer.get_insert()).get_line() == (
textbuffer.get_line_count() - 1):
self.history.histdown(textbuffer)
return True
return False
key_ctrl_p = key_Up
key_ctrl_n = key_Down
def key_ctrl_shift_F9(self, entry, event):
if self.__debug:
import pdb; pdb.set_trace()
def clear(self):
buffer = self.textView.get_buffer()
buffer.delete(*buffer.get_bounds())
def sendMessage(self):
buffer = self.textView.get_buffer()
iter1, iter2 = buffer.get_bounds()
text = buffer.get_text(iter1, iter2, False)
self.toplevel.output.append(pythonify(text), 'command')
# TODO: Componentize better!
try:
return self.toplevel.getComponent(IManholeClient).do(text)
except OfflineError:
self.toplevel.output.append("Not connected, command not sent.\n",
"exception")
def pythonify(text):
'''
Make some text appear as though it was typed in at a Python prompt.
'''
lines = text.split('\n')
lines[0] = '>>> ' + lines[0]
return '\n... '.join(lines) + '\n'
class _Notafile:
"""Curry to make failure.printTraceback work with the output widget."""
def __init__(self, output, kind):
self.output = output
self.kind = kind
def write(self, txt):
self.output.append(txt, self.kind)
def flush(self):
pass
class ManholeClient(components.Adapter, pb.Referenceable):
implements(IManholeClient)
capabilities = {
# "Explorer": 'Set',
"Failure": 'Set'
}
def _cbLogin(self, perspective):
self.perspective = perspective
perspective.notifyOnDisconnect(self._cbDisconnected)
return perspective
def remote_console(self, messages):
for kind, content in messages:
if isinstance(content, types.StringTypes):
self.original.output.append(content, kind)
elif (kind == "exception") and isinstance(content, failure.Failure):
content.printTraceback(_Notafile(self.original.output,
"exception"))
else:
self.original.output.append(str(content), kind)
def remote_receiveExplorer(self, xplorer):
pass
def remote_listCapabilities(self):
return self.capabilities
def _cbDisconnected(self, perspective):
self.perspective = None
def do(self, text):
if self.perspective is None:
raise OfflineError
return self.perspective.callRemote("do", text)
components.registerAdapter(ManholeClient, ManholeWindow, IManholeClient)
|
hiezust/teask
|
refs/heads/master
|
website/migrations/0004_users_county.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-04 15:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('website', '0003_users'),
]
operations = [
migrations.AddField(
model_name='users',
name='county',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
]
|
Chunjie/xsconsole
|
refs/heads/master
|
plugins-base/XSFeatureHostCommon.py
|
4
|
# Copyright (c) 2008-2009 Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
if __name__ == "__main__":
raise Exception("This script is a plugin for xsconsole and cannot run independently")
from XSConsoleStandard import *
class HostUtils:
operationNames = {
'evacuate' : Struct(name = Lang("Evacuate Host"), priority = 10),
'enable' : Struct(name = Lang("Enable"), priority = 20),
'designate_new_master' : Struct(name = Lang("Designate as new Pool Master"), priority = 30)
}
@classmethod
def AllowedOperations(cls):
return cls.operationNames.keys()
@classmethod
def OtherConfigRemove(cls, inHostHandle, inName):
Task.Sync(lambda x: x.xenapi.host.remove_from_other_config(inHostHandle.OpaqueRef(), inName))
@classmethod
def OtherConfigReplace(cls, inHostHandle, inName, inValue):
cls.OtherConfigRemove(inHostHandle, inName)
Task.Sync(lambda x: x.xenapi.host.add_to_other_config(inHostHandle.OpaqueRef(), inName, inValue))
@classmethod
def AsyncOperation(cls, inOperation, inHostHandle, *inParams):
if inOperation == 'evacuate':
# Gather the list of VMs to restart on exit of maintenance mode
runningVMs = [ vm.HotOpaqueRef().OpaqueRef() for vm in HotAccessor().local_host.resident_VMs if not vm.is_control_domain() ]
task = Task.New(lambda x: x.xenapi.Async.host.evacuate(inHostHandle.OpaqueRef()))
cls.OtherConfigReplace(inHostHandle, 'MAINTENANCE_MODE_EVACUATED_VMS', ','.join(runningVMs))
cls.OtherConfigReplace(inHostHandle, 'MAINTENANCE_MODE', 'true')
elif inOperation == 'disable':
task = Task.New(lambda x: x.xenapi.Async.host.disable(inHostHandle.OpaqueRef()))
elif inOperation == 'enable':
cls.OtherConfigRemove(inHostHandle, 'MAINTENANCE_MODE')
cls.OtherConfigRemove(inHostHandle, 'MAINTENANCE_MODE_EVACUATED_VMS')
task = Task.New(lambda x: x.xenapi.Async.host.enable(inHostHandle.OpaqueRef()))
elif inOperation == 'designate_new_master':
task = Task.New(lambda x: x.xenapi.Async.pool.designate_new_master(inHostHandle.OpaqueRef()))
elif inOperation == 'join':
task = Task.New(lambda x: x.xenapi.Async.pool.join(*inParams))
elif inOperation == 'join_force':
task = Task.New(lambda x: x.xenapi.Async.pool.join_force(*inParams))
elif inOperation == 'eject':
task = Task.New(lambda x: x.xenapi.Async.pool.eject(inHostHandle.OpaqueRef()))
else:
raise Exception("Unknown Host operation "+str(inOperation))
return task
@classmethod
def DoOperation(cls, inOperation, inHostHandle):
task = cls.AsyncOperation(inOperation, inHostHandle)
if task is not None:
while task.IsPending():
time.sleep(0.1)
task.RaiseIfFailed()
@classmethod
def OperationStruct(cls, inOperation):
retVal = cls.operationNames.get(inOperation, None)
if retVal is None:
raise Exception("Unknown Host operation "+str(inOperation))
return retVal
@classmethod
def OperationName(cls, inOperation):
return cls.OperationStruct(inOperation).name
@classmethod
def OperationPriority(cls, inOperation):
return cls.OperationStruct(inOperation).priority
class XSFeatureHostCommon:
def Register(self):
Importer.RegisterResource(
self,
'HOST_COMMON', # Name of this item for replacement, etc.
{
'HostUtils' : HostUtils
}
)
# Register this plugin when module is imported
XSFeatureHostCommon().Register()
|
pgmillon/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/iosxr/iosxr_logging.py
|
11
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_logging
version_added: "2.4"
author:
- "Trishna Guha (@trishnaguha)"
- "Kedar Kekan (@kedarX)"
short_description: Configuration management of system logging services on network devices
description:
- This module provides declarative management configuration of system logging (syslog)
on Cisco IOS XR devices.
notes:
- Tested against IOS XRv 6.1.2
options:
dest:
description:
- Destination for system logging (syslog) messages.
choices: ['host', 'console', 'monitor', 'buffered', 'file']
name:
description:
- When C(dest) = I(file) name indicates file-name
- When C(dest) = I(host) name indicates the host-name or ip-address of syslog server.
vrf:
description:
- vrf name when syslog server is configured, C(dest) = C(host)
default: default
version_added: 2.5
size:
description:
- Size of buffer when C(dest) = C(buffered). The acceptable value is in the range I(307200 to 125000000 bytes). Default 307200
- Size of file when C(dest) = C(file). The acceptable value is in the range I(1 to 2097152)KB. Default 2 GB
facility:
description:
- To configure the type of syslog facility in which system logging (syslog) messages are sent to syslog servers
Optional config for C(dest) = C(host)
default: local7
hostnameprefix:
description:
- To append a hostname prefix to system logging (syslog) messages logged to syslog servers.
Optional config for C(dest) = C(host)
version_added: 2.5
level:
description:
- Specifies the severity level for the logging.
default: debugging
aliases: ['severity']
aggregate:
description: List of syslog logging configuration definitions.
state:
description:
- Existential state of the logging configuration on the node.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: iosxr
"""
EXAMPLES = """
- name: configure logging for syslog server host
iosxr_logging:
dest: host
name: 10.10.10.1
level: critical
state: present
- name: add hostnameprefix configuration
iosxr_logging:
hostnameprefix: host1
state: absent
- name: add facility configuration
iosxr_logging:
facility: local1
state: present
- name: configure console logging level
iosxr_logging:
dest: console
level: debugging
state: present
- name: configure monitor logging level
iosxr_logging:
dest: monitor
level: errors
state: present
- name: configure syslog to a file
iosxr_logging:
dest: file
name: file_name
size: 2048
level: errors
state: present
- name: configure buffered logging with size
iosxr_logging:
dest: buffered
size: 5100000
- name: Configure logging using aggregate
iosxr_logging:
aggregate:
- { dest: console, level: warning }
- { dest: buffered, size: 4800000 }
- { dest: file, name: file3, size: 2048}
- { dest: host, name: host3, level: critical}
- name: Delete logging using aggregate
iosxr_logging:
aggregate:
- { dest: console, level: warning }
- { dest: buffered, size: 4800000 }
- { dest: file, name: file3, size: 2048}
- { dest: host, name: host3, level: critical}
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always (empty list when no commands to send)
type: list
sample:
- logging 10.10.10.1 vrf default severity debugging
- logging facility local7
- logging hostnameprefix host1
- logging console critical
- logging buffered 2097153
- logging buffered warnings
- logging monitor errors
- logging file log_file maxfilesize 1024 severity info
xml:
description: NetConf rpc xml sent to device with transport C(netconf)
returned: always (empty list when no xml rpc to send)
type: list
version_added: 2.5
sample:
- '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<syslog xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-infra-syslog-cfg">
<files>
<file xc:operation="delete">
<file-name>file1</file-name>
<file-log-attributes>
<max-file-size>2097152</max-file-size>
<severity>2</severity>
</file-log-attributes>
</file>
</files>
</syslog>
</config>'
"""
import re
import collections
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.iosxr.iosxr import get_config, load_config, build_xml
from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec, etree_findall
from ansible.module_utils.network.iosxr.iosxr import is_netconf, is_cliconf, etree_find
from ansible.module_utils.network.common.utils import remove_default_spec
severity_level = {'emergency': '0',
'alert': '1',
'critical': '2',
'error': '3',
'warning': '4',
'notice': '5',
'info': '6',
'debug': '7',
'disable': '15'}
severity_transpose = {'emergencies': 'emergency',
'alerts': 'alert',
'critical': 'critical',
'errors': 'error',
'warning': 'warning',
'notifications': 'notice',
'informational': 'info',
'debugging': 'debug'}
class ConfigBase(object):
def __init__(self, module):
self._flag = None
self._module = module
self._result = {'changed': False, 'warnings': []}
self._want = list()
self._have = list()
def validate_size(self, value, type=None):
if value:
if type == 'buffer':
if value and not int(307200) <= value <= int(125000000):
self._module.fail_json(msg='buffer size must be between 307200 and 125000000')
elif type == 'file':
if value and not int(1) <= value <= int(2097152):
self._module.fail_json(msg='file size must be between 1 and 2097152')
return value
def map_params_to_obj(self, required_if=None):
aggregate = self._module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = self._module.params[key]
d = item.copy()
if d['dest'] not in ('host', 'file'):
d['name'] = None
if d['dest'] == 'buffered':
if d['size'] is not None:
d['size'] = str(self.validate_size(d['size'], 'buffer'))
else:
d['size'] = str(307200)
elif d['dest'] == 'file':
if d['size'] is not None:
d['size'] = str(self.validate_size(d['size'], 'file'))
else:
d['size'] = str(2097152)
else:
d['size'] = None
if self._flag == 'NC':
d['level'] = severity_transpose[d['level']]
self._want.append(d)
else:
params = self._module.params
if params['dest'] not in ('host', 'file'):
params['name'] = None
if params['dest'] == 'buffered':
if params['size'] is not None:
params['size'] = str(self.validate_size(params['size'], 'buffer'))
else:
params['size'] = str(307200)
elif params['dest'] == 'file':
if params['size'] is not None:
params['size'] = str(self.validate_size(params['size'], 'file'))
else:
params['size'] = str(2097152)
else:
params['size'] = None
if self._flag == 'NC':
params['level'] = severity_transpose[params['level']]
self._want.append({
'dest': params['dest'],
'name': params['name'],
'vrf': params['vrf'],
'size': params['size'],
'facility': params['facility'],
'level': params['level'],
'hostnameprefix': params['hostnameprefix'],
'state': params['state']
})
class CliConfiguration(ConfigBase):
def __init__(self, module):
super(CliConfiguration, self).__init__(module)
self._file_list = set()
self._host_list = set()
def map_obj_to_commands(self):
commands = list()
for want_item in self._want:
dest = want_item['dest']
name = want_item['name']
size = want_item['size']
facility = want_item['facility']
level = want_item['level']
vrf = want_item['vrf']
hostnameprefix = want_item['hostnameprefix']
state = want_item['state']
del want_item['state']
have_size = None
have_console_level = None
have_monitor_level = None
have_prefix = None
have_facility = None
for item in self._have:
if item['dest'] == 'buffered':
have_size = item['size']
if item['dest'] == 'console':
have_console_level = item['level']
if item['dest'] == 'monitor':
have_monitor_level = item['level']
if item['dest'] is None and item['hostnameprefix'] is not None:
have_prefix = item['hostnameprefix']
if item['dest'] is None and item['hostnameprefix'] is None and item['facility'] is not None:
have_facility = item['facility']
if state == 'absent':
if dest == 'host' and name in self._host_list:
commands.append('no logging {0} vrf {1}'.format(name, vrf))
elif dest == 'file' and name in self._file_list:
commands.append('no logging file {0}'.format(name))
elif dest == 'console' and have_console_level is not None:
commands.append('no logging {0}'.format(dest))
elif dest == 'monitor' and have_monitor_level:
commands.append('no logging {0}'.format(dest))
elif dest == 'buffered' and have_size:
commands.append('no logging {0}'.format(dest))
if dest is None and hostnameprefix is not None and have_prefix == hostnameprefix:
commands.append('no logging hostnameprefix {0}'.format(hostnameprefix))
if dest is None and facility is not None and have_facility == facility:
commands.append('no logging facility {0}'.format(facility))
if state == 'present':
if dest == 'host' and name not in self._host_list:
if level == 'errors' or level == 'informational':
level = severity_transpose[level]
commands.append('logging {0} vrf {1} severity {2}'.format(name, vrf, level))
elif dest == 'file' and name not in self._file_list:
if level == 'errors' or level == 'informational':
level = severity_transpose[level]
commands.append('logging file {0} maxfilesize {1} severity {2}'.format(name, size, level))
elif dest == 'buffered' and (have_size is None or (have_size is not None and size != have_size)):
commands.append('logging buffered {0}'.format(size))
elif dest == 'console' and (have_console_level is None or
(have_console_level is not None and have_console_level != level)):
commands.append('logging console {0}'.format(level))
elif dest == 'monitor' and (have_monitor_level is None or
(have_monitor_level is not None and have_monitor_level != level)):
commands.append('logging monitor {0}'.format(level))
if dest is None and hostnameprefix is not None and (have_prefix is None or
(have_prefix is not None and hostnameprefix != have_prefix)):
commands.append('logging hostnameprefix {0}'.format(hostnameprefix))
if dest is None and hostnameprefix is None and facility != have_facility:
commands.append('logging facility {0}'.format(facility))
self._result['commands'] = commands
if commands:
commit = not self._module.check_mode
diff = load_config(self._module, commands, commit=commit)
if diff:
self._result['diff'] = dict(prepared=diff)
self._result['changed'] = True
def parse_facility(self, line):
match = re.search(r'logging facility (\S+)', line, re.M)
facility = None
if match:
facility = match.group(1)
return facility
def parse_size(self, line, dest):
size = None
if dest == 'buffered':
match = re.search(r'logging buffered (\S+)', line, re.M)
if match:
try:
int_size = int(match.group(1))
except ValueError:
int_size = None
if int_size is not None:
if isinstance(int_size, int):
size = str(match.group(1))
return size
def parse_hostnameprefix(self, line):
prefix = None
match = re.search(r'logging hostnameprefix (\S+)', line, re.M)
if match:
prefix = match.group(1)
return prefix
def parse_name(self, line, dest):
name = None
if dest == 'file':
match = re.search(r'logging file (\S+)', line, re.M)
if match:
name = match.group(1)
elif dest == 'host':
match = re.search(r'logging (\S+)', line, re.M)
if match:
name = match.group(1)
return name
def parse_level(self, line, dest):
level_group = ('emergencies', 'alerts', 'critical', 'errors', 'warning',
'notifications', 'informational', 'debugging')
level = None
match = re.search(r'logging {0} (\S+)'.format(dest), line, re.M)
if match:
if match.group(1) in level_group:
level = match.group(1)
return level
def parse_dest(self, line, group):
dest_group = ('console', 'monitor', 'buffered', 'file')
dest = None
if group in dest_group:
dest = group
elif 'vrf' in line:
dest = 'host'
return dest
def parse_vrf(self, line, dest):
vrf = None
if dest == 'host':
match = re.search(r'logging (\S+) vrf (\S+)', line, re.M)
if match:
vrf = match.group(2)
return vrf
def map_config_to_obj(self):
data = get_config(self._module, config_filter='logging')
lines = data.split("\n")
for line in lines:
match = re.search(r'logging (\S+)', line, re.M)
if match:
dest = self.parse_dest(line, match.group(1))
name = self.parse_name(line, dest)
if dest == 'host' and name is not None:
self._host_list.add(name)
if dest == 'file' and name is not None:
self._file_list.add(name)
self._have.append({
'dest': dest,
'name': name,
'size': self.parse_size(line, dest),
'facility': self.parse_facility(line),
'level': self.parse_level(line, dest),
'vrf': self.parse_vrf(line, dest),
'hostnameprefix': self.parse_hostnameprefix(line),
})
def run(self):
self.map_params_to_obj()
self.map_config_to_obj()
self.map_obj_to_commands()
return self._result
class NCConfiguration(ConfigBase):
def __init__(self, module):
super(NCConfiguration, self).__init__(module)
self._flag = 'NC'
self._log_file_meta = collections.OrderedDict()
self._log_host_meta = collections.OrderedDict()
self._log_console_meta = collections.OrderedDict()
self._log_monitor_meta = collections.OrderedDict()
self._log_buffered_size_meta = collections.OrderedDict()
self._log_buffered_level_meta = collections.OrderedDict()
self._log_facility_meta = collections.OrderedDict()
self._log_prefix_meta = collections.OrderedDict()
def map_obj_to_xml_rpc(self):
self._log_file_meta.update([
('files', {'xpath': 'syslog/files', 'tag': True, 'operation': 'edit'}),
('file', {'xpath': 'syslog/files/file', 'tag': True, 'operation': 'edit', 'attrib': "operation"}),
('a:name', {'xpath': 'syslog/files/file/file-name', 'operation': 'edit'}),
('file-attrib', {'xpath': 'syslog/files/file/file-log-attributes', 'tag': True, 'operation': 'edit'}),
('a:size', {'xpath': 'syslog/files/file/file-log-attributes/max-file-size', 'operation': 'edit'}),
('a:level', {'xpath': 'syslog/files/file/file-log-attributes/severity', 'operation': 'edit'}),
])
self._log_host_meta.update([
('host-server', {'xpath': 'syslog/host-server', 'tag': True, 'operation': 'edit'}),
('vrfs', {'xpath': 'syslog/host-server/vrfs', 'tag': True, 'operation': 'edit'}),
('vrf', {'xpath': 'syslog/host-server/vrfs/vrf', 'tag': True, 'operation': 'edit'}),
('a:vrf', {'xpath': 'syslog/host-server/vrfs/vrf/vrf-name', 'operation': 'edit'}),
('ipv4s', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s', 'tag': True, 'operation': 'edit'}),
('ipv4', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4', 'tag': True, 'operation': 'edit', 'attrib': "operation"}),
('a:name', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4/address', 'operation': 'edit'}),
('ipv4-sev', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4/ipv4-severity-port', 'tag': True, 'operation': 'edit'}),
('a:level', {'xpath': 'syslog/host-server/vrfs/vrf/ipv4s/ipv4/ipv4-severity-port/severity', 'operation': 'edit'}),
])
self._log_console_meta.update([
('a:enable-console', {'xpath': 'syslog/enable-console-logging', 'operation': 'edit', 'attrib': "operation"}),
('console', {'xpath': 'syslog/console-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}),
('a:console-level', {'xpath': 'syslog/console-logging/logging-level', 'operation': 'edit'}),
])
self._log_monitor_meta.update([
('monitor', {'xpath': 'syslog/monitor-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}),
('a:monitor-level', {'xpath': 'syslog/monitor-logging/logging-level', 'operation': 'edit'}),
])
self._log_buffered_size_meta.update([
('buffered', {'xpath': 'syslog/buffered-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}),
('a:size', {'xpath': 'syslog/buffered-logging/buffer-size', 'operation': 'edit'}),
])
self._log_buffered_level_meta.update([
('buffered', {'xpath': 'syslog/buffered-logging', 'tag': True, 'operation': 'edit', 'attrib': "operation"}),
('a:level', {'xpath': 'syslog/buffered-logging/logging-level', 'operation': 'edit'}),
])
self._log_facility_meta.update([
('facility', {'xpath': 'syslog/logging-facilities', 'tag': True, 'operation': 'edit', 'attrib': "operation"}),
('a:facility', {'xpath': 'syslog/logging-facilities/facility-level', 'operation': 'edit'}),
])
self._log_prefix_meta.update([
('a:hostnameprefix', {'xpath': 'syslog/host-name-prefix', 'operation': 'edit', 'attrib': "operation"}),
])
state = self._module.params['state']
_get_filter = build_xml('syslog', opcode="filter")
running = get_config(self._module, source='running', config_filter=_get_filter)
file_ele = etree_findall(running, 'file')
file_list = list()
if len(file_ele):
for file in file_ele:
file_name = etree_find(file, 'file-name')
file_list.append(file_name.text if file_name is not None else None)
vrf_ele = etree_findall(running, 'vrf')
host_list = list()
for vrf in vrf_ele:
host_ele = etree_findall(vrf, 'ipv4')
for host in host_ele:
host_name = etree_find(host, 'address')
host_list.append(host_name.text if host_name is not None else None)
console_ele = etree_find(running, 'console-logging')
console_level = etree_find(console_ele, 'logging-level') if console_ele is not None else None
have_console = console_level.text if console_level is not None else None
monitor_ele = etree_find(running, 'monitor-logging')
monitor_level = etree_find(monitor_ele, 'logging-level') if monitor_ele is not None else None
have_monitor = monitor_level.text if monitor_level is not None else None
buffered_ele = etree_find(running, 'buffered-logging')
buffered_size = etree_find(buffered_ele, 'buffer-size') if buffered_ele is not None else None
have_buffered = buffered_size.text if buffered_size is not None else None
facility_ele = etree_find(running, 'logging-facilities')
facility_level = etree_find(facility_ele, 'facility-level') if facility_ele is not None else None
have_facility = facility_level.text if facility_level is not None else None
prefix_ele = etree_find(running, 'host-name-prefix')
have_prefix = prefix_ele.text if prefix_ele is not None else None
console_enable_ele = etree_find(running, 'enable-console-logging')
have_console_enable = console_enable_ele.text if console_enable_ele is not None else None
file_params = list()
host_params = list()
console_params = dict()
monitor_params = dict()
buffered_params = dict()
facility_params = dict()
prefix_params = dict()
opcode = None
if state == 'absent':
opcode = "delete"
for item in self._want:
if item['dest'] == 'file' and item['name'] in file_list:
item['level'] = severity_level[item['level']]
file_params.append(item)
elif item['dest'] == 'host' and item['name'] in host_list:
item['level'] = severity_level[item['level']]
host_params.append(item)
elif item['dest'] == 'console' and have_console:
console_params.update({'console-level': item['level']})
elif item['dest'] == 'monitor' and have_monitor:
monitor_params.update({'monitor-level': item['level']})
elif item['dest'] == 'buffered' and have_buffered:
buffered_params['size'] = str(item['size']) if item['size'] else None
buffered_params['level'] = item['level'] if item['level'] else None
elif item['dest'] is None and item['hostnameprefix'] is None and \
item['facility'] is not None and have_facility:
facility_params.update({'facility': item['facility']})
elif item['dest'] is None and item['hostnameprefix'] is not None and have_prefix:
prefix_params.update({'hostnameprefix': item['hostnameprefix']})
elif state == 'present':
opcode = 'merge'
for item in self._want:
if item['dest'] == 'file':
item['level'] = severity_level[item['level']]
file_params.append(item)
elif item['dest'] == 'host':
item['level'] = severity_level[item['level']]
host_params.append(item)
elif item['dest'] == 'console':
console_params.update({'console-level': item['level']})
elif item['dest'] == 'monitor':
monitor_params.update({'monitor-level': item['level']})
elif item['dest'] == 'buffered':
buffered_params['size'] = str(item['size']) if item['size'] else None
buffered_params['level'] = item['level'] if item['level'] else None
elif item['dest'] is None and item['hostnameprefix'] is None and \
item['facility'] is not None:
facility_params.update({'facility': item['facility']})
elif item['dest'] is None and item['hostnameprefix'] is not None:
prefix_params.update({'hostnameprefix': item['hostnameprefix']})
self._result['xml'] = []
_edit_filter_list = list()
if opcode:
if len(file_params):
_edit_filter_list.append(build_xml('syslog', xmap=self._log_file_meta,
params=file_params, opcode=opcode))
if len(host_params):
_edit_filter_list.append(build_xml('syslog', xmap=self._log_host_meta,
params=host_params, opcode=opcode))
if len(console_params):
_edit_filter_list.append(build_xml('syslog', xmap=self._log_console_meta,
params=console_params, opcode=opcode))
if len(monitor_params):
_edit_filter_list.append(build_xml('syslog', xmap=self._log_monitor_meta,
params=monitor_params, opcode=opcode))
if len(buffered_params):
_edit_filter_list.append(build_xml('syslog', xmap=self._log_buffered_size_meta,
params=buffered_params, opcode=opcode))
_edit_filter_list.append(build_xml('syslog', xmap=self._log_buffered_level_meta,
params=buffered_params, opcode=opcode))
if len(facility_params):
_edit_filter_list.append(build_xml('syslog', xmap=self._log_facility_meta,
params=facility_params, opcode=opcode))
if len(prefix_params):
_edit_filter_list.append(build_xml('syslog', xmap=self._log_prefix_meta,
params=prefix_params, opcode=opcode))
diff = None
if len(_edit_filter_list):
commit = not self._module.check_mode
diff = load_config(self._module, _edit_filter_list, commit=commit, running=running,
nc_get_filter=_get_filter)
if diff:
if self._module._diff:
self._result['diff'] = dict(prepared=diff)
self._result['xml'] = _edit_filter_list
self._result['changed'] = True
def run(self):
self.map_params_to_obj()
self.map_obj_to_xml_rpc()
return self._result
def main():
""" main entry point for module execution
"""
element_spec = dict(
dest=dict(type='str', choices=['host', 'console', 'monitor', 'buffered', 'file']),
name=dict(type='str'),
size=dict(type='int'),
vrf=dict(type='str', default='default'),
facility=dict(type='str', default='local7'),
hostnameprefix=dict(type='str'),
level=dict(type='str', default='informational', aliases=['severity'],
choices=['emergencies', 'alerts', 'critical', 'errors', 'warning',
'notifications', 'informational', 'debugging']),
state=dict(default='present', choices=['present', 'absent']),
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
mutually_exclusive = [('dest', 'facility', 'hostnameprefix')]
required_if = [('dest', 'host', ['name']),
('dest', 'file', ['name']),
('dest', 'buffered', ['size']),
('dest', 'console', ['level']),
('dest', 'monitor', ['level'])]
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec,
mutually_exclusive=mutually_exclusive, required_if=required_if),
)
argument_spec.update(element_spec)
argument_spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
config_object = None
if is_cliconf(module):
# Commenting the below cliconf deprecation support call for Ansible 2.9 as it'll be continued to be supported
# module.deprecate("cli support for 'iosxr_interface' is deprecated. Use transport netconf instead",
# version='2.9')
config_object = CliConfiguration(module)
elif is_netconf(module):
config_object = NCConfiguration(module)
if config_object:
result = config_object.run()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
lrntc/touch_hdr
|
refs/heads/master
|
touch_hdr_variables.py
|
1
|
EV_dict = { 0 : "-5000",
1 : "-4666",
2 : "-4333",
3 : "-4000",
4 : "-3666",
5 : "-3333",
6 : "-3000",
7 : "-2666",
8 : "-2333",
9 : "-2000",
10 : "-1666",
11 : "-1333",
12 : "-1000",
13 : "-666",
14 : "-333",
15 : "0",
16 : "333",
17 : "666",
18 : "1000",
19 : "1333",
20 : "1666",
21 : "2000",
22 : "2333",
23 : "2666",
24 : "3000",
25 : "3333",
26 : "3666",
27 : "4000",
28 : "4333",
29 : "4666",
30 : "5000"
}
EV = ["-5000", "-4666", "-4333", "-4000", "-3666",
"-3333", "-3000", "-2666", "-2333", "-2000",
"-1666", "-1333", "-1000", "-666", "-333",
"0", "333", "666", "1000", "1333", "1666",
"2000", "2333", "2666", "3000", "3333",
"3666", "4000", "4333", "4666", "5000"]
|
alex/sqlalchemy
|
refs/heads/master
|
test/orm/inheritance/test_poly_persistence.py
|
28
|
"""tests basic polymorphic mapper loading/saving, minimal relationships"""
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import exc as sa_exc
from sqlalchemy.testing.schema import Column
from sqlalchemy import testing
from sqlalchemy.testing.util import function_named
from test.orm import _fixtures
from sqlalchemy.testing import fixtures
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Company(fixtures.ComparableEntity):
pass
class PolymorphTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global companies, people, engineers, managers, boss
companies = Table('companies', metadata,
Column('company_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('company_id', Integer, ForeignKey('companies.company_id'),
nullable=False),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
Column('engineer_name', String(50)),
Column('primary_language', String(50)),
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
Column('manager_name', String(50))
)
boss = Table('boss', metadata,
Column('boss_id', Integer, ForeignKey('managers.person_id'),
primary_key=True),
Column('golf_swing', String(30)),
)
metadata.create_all()
class InsertOrderTest(PolymorphTest):
def test_insert_order(self):
"""test that classes of multiple types mix up mapper inserts
so that insert order of individual tables is maintained"""
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
'person':people.select(people.c.type=='person'),
}, None, 'pjoin')
person_mapper = mapper(Person, people,
with_polymorphic=('*', person_join),
polymorphic_on=person_join.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers, inherits=person_mapper,
polymorphic_identity='engineer')
mapper(Manager, managers, inherits=person_mapper,
polymorphic_identity='manager')
mapper(Company, companies, properties={
'employees': relationship(Person,
backref='company',
order_by=person_join.c.person_id)
})
session = create_session()
c = Company(name='company1')
c.employees.append(Manager(status='AAB', manager_name='manager1'
, name='pointy haired boss'))
c.employees.append(Engineer(status='BBA',
engineer_name='engineer1',
primary_language='java', name='dilbert'))
c.employees.append(Person(status='HHH', name='joesmith'))
c.employees.append(Engineer(status='CGG',
engineer_name='engineer2',
primary_language='python', name='wally'))
c.employees.append(Manager(status='ABA', manager_name='manager2'
, name='jsmith'))
session.add(c)
session.flush()
session.expunge_all()
eq_(session.query(Company).get(c.company_id), c)
class RoundTripTest(PolymorphTest):
pass
def _generate_round_trip_test(include_base, lazy_relationship,
redefine_colprop, with_polymorphic):
"""generates a round trip test.
include_base - whether or not to include the base 'person' type in
the union.
lazy_relationship - whether or not the Company relationship to
People is lazy or eager.
redefine_colprop - if we redefine the 'name' column to be
'people_name' on the base Person class
use_literal_join - primary join condition is explicitly specified
"""
def test_roundtrip(self):
if with_polymorphic == 'unions':
if include_base:
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
'person':people.select(people.c.type=='person'),
}, None, 'pjoin')
else:
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, None, 'pjoin')
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ['*', person_join]
manager_with_polymorphic = ['*', manager_join]
elif with_polymorphic == 'joins':
person_join = people.outerjoin(engineers).outerjoin(managers).\
outerjoin(boss)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ['*', person_join]
manager_with_polymorphic = ['*', manager_join]
elif with_polymorphic == 'auto':
person_with_polymorphic = '*'
manager_with_polymorphic = '*'
else:
person_with_polymorphic = None
manager_with_polymorphic = None
if redefine_colprop:
person_mapper = mapper(Person, people,
with_polymorphic=person_with_polymorphic,
polymorphic_on=people.c.type,
polymorphic_identity='person',
properties= {'person_name':people.c.name})
else:
person_mapper = mapper(Person, people,
with_polymorphic=person_with_polymorphic,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers, inherits=person_mapper,
polymorphic_identity='engineer')
mapper(Manager, managers, inherits=person_mapper,
with_polymorphic=manager_with_polymorphic,
polymorphic_identity='manager')
mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss')
mapper(Company, companies, properties={
'employees': relationship(Person, lazy=lazy_relationship,
cascade="all, delete-orphan",
backref="company", order_by=people.c.person_id
)
})
if redefine_colprop:
person_attribute_name = 'person_name'
else:
person_attribute_name = 'name'
employees = [
Manager(status='AAB', manager_name='manager1',
**{person_attribute_name:'pointy haired boss'}),
Engineer(status='BBA', engineer_name='engineer1',
primary_language='java',
**{person_attribute_name:'dilbert'}),
]
if include_base:
employees.append(Person(**{person_attribute_name:'joesmith'}))
employees += [
Engineer(status='CGG', engineer_name='engineer2',
primary_language='python',
**{person_attribute_name:'wally'}),
Manager(status='ABA', manager_name='manager2',
**{person_attribute_name:'jsmith'})
]
pointy = employees[0]
jsmith = employees[-1]
dilbert = employees[1]
session = create_session()
c = Company(name='company1')
c.employees = employees
session.add(c)
session.flush()
session.expunge_all()
eq_(session.query(Person).get(dilbert.person_id), dilbert)
session.expunge_all()
eq_(session.query(Person).filter(
Person.person_id==dilbert.person_id).one(),
dilbert)
session.expunge_all()
def go():
cc = session.query(Company).get(c.company_id)
eq_(cc.employees, employees)
if not lazy_relationship:
if with_polymorphic != 'none':
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 5)
else:
if with_polymorphic != 'none':
self.assert_sql_count(testing.db, go, 2)
else:
self.assert_sql_count(testing.db, go, 6)
# test selecting from the query, using the base
# mapped table (people) as the selection criterion.
# in the case of the polymorphic Person query,
# the "people" selectable should be adapted to be "person_join"
eq_(
session.query(Person).filter(
getattr(Person, person_attribute_name)=='dilbert'
).first(),
dilbert
)
assert session.query(Person).filter(
getattr(Person, person_attribute_name)=='dilbert'
).first().person_id
eq_(
session.query(Engineer).filter(
getattr(Person, person_attribute_name)=='dilbert'
).first(),
dilbert
)
# test selecting from the query, joining against
# an alias of the base "people" table. test that
# the "palias" alias does *not* get sucked up
# into the "person_join" conversion.
palias = people.alias("palias")
dilbert = session.query(Person).get(dilbert.person_id)
assert dilbert is session.query(Person).filter(
(palias.c.name=='dilbert') & \
(palias.c.person_id==Person.person_id)).first()
assert dilbert is session.query(Engineer).filter(
(palias.c.name=='dilbert') & \
(palias.c.person_id==Person.person_id)).first()
assert dilbert is session.query(Person).filter(
(Engineer.engineer_name=="engineer1") & \
(engineers.c.person_id==people.c.person_id)
).first()
assert dilbert is session.query(Engineer).\
filter(Engineer.engineer_name=="engineer1")[0]
session.flush()
session.expunge_all()
def go():
session.query(Person).filter(getattr(Person,
person_attribute_name)=='dilbert').first()
self.assert_sql_count(testing.db, go, 1)
session.expunge_all()
dilbert = session.query(Person).filter(getattr(Person,
person_attribute_name)=='dilbert').first()
def go():
# assert that only primary table is queried for
# already-present-in-session
d = session.query(Person).filter(getattr(Person,
person_attribute_name)=='dilbert').first()
self.assert_sql_count(testing.db, go, 1)
# test standalone orphans
daboss = Boss(status='BBB',
manager_name='boss',
golf_swing='fore',
**{person_attribute_name:'daboss'})
session.add(daboss)
assert_raises(sa_exc.DBAPIError, session.flush)
c = session.query(Company).first()
daboss.company = c
manager_list = [e for e in c.employees
if isinstance(e, Manager)]
session.flush()
session.expunge_all()
eq_(session.query(Manager).order_by(Manager.person_id).all(),
manager_list)
c = session.query(Company).first()
session.delete(c)
session.flush()
eq_(people.count().scalar(), 0)
test_roundtrip = function_named(
test_roundtrip, "test_%s%s%s_%s" % (
(lazy_relationship and "lazy" or "eager"),
(include_base and "_inclbase" or ""),
(redefine_colprop and "_redefcol" or ""),
with_polymorphic))
setattr(RoundTripTest, test_roundtrip.__name__, test_roundtrip)
for lazy_relationship in [True, False]:
for redefine_colprop in [True, False]:
for with_polymorphic in ['unions', 'joins', 'auto', 'none']:
if with_polymorphic == 'unions':
for include_base in [True, False]:
_generate_round_trip_test(include_base,
lazy_relationship,
redefine_colprop, with_polymorphic)
else:
_generate_round_trip_test(False,
lazy_relationship,
redefine_colprop, with_polymorphic)
|
piffey/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/fortimanager/fortimanager.py
|
64
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
class AnsibleFortiManager(object):
def __init__(self, module, ip=None, username=None, passwd=None, use_ssl=True, verify_ssl=False, timeout=300):
self.ip = ip
self.username = username
self.passwd = passwd
self.use_ssl = use_ssl
self.verify_ssl = verify_ssl
self.timeout = timeout
self.fmgr_instance = None
if not HAS_PYFMGR:
module.fail_json(msg='Could not import the python library pyFMG required by this module')
self.module = module
def login(self):
if self.ip is not None:
self.fmgr_instance = FortiManager(self.ip, self.username, self.passwd, use_ssl=self.use_ssl,
verify_ssl=self.verify_ssl, timeout=self.timeout, debug=False,
disable_request_warnings=True)
return self.fmgr_instance.login()
def logout(self):
if self.fmgr_instance.sid is not None:
self.fmgr_instance.logout()
def get(self, url, data):
return self.fmgr_instance.get(url, **data)
def set(self, url, data):
return self.fmgr_instance.set(url, **data)
def update(self, url, data):
return self.fmgr_instance.update(url, **data)
def delete(self, url, data):
return self.fmgr_instance.delete(url, **data)
def add(self, url, data):
return self.fmgr_instance.add(url, **data)
def execute(self, url, data):
return self.fmgr_instance.execute(url, **data)
def move(self, url, data):
return self.fmgr_instance.move(url, **data)
def clone(self, url, data):
return self.fmgr_instance.clone(url, **data)
|
hojel/calibre
|
refs/heads/master
|
setup/install.py
|
6
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, textwrap, subprocess, shutil, tempfile, atexit, glob
from setup import (Command, islinux, isbsd, basenames, modules, functions,
__appname__, __version__)
HEADER = '''\
#!/usr/bin/env python2
"""
This is the standard runscript for all of calibre's tools.
Do not modify it unless you know what you are doing.
"""
import sys, os
path = os.environ.get('CALIBRE_PYTHON_PATH', {path!r})
if path not in sys.path:
sys.path.insert(0, path)
sys.resources_location = os.environ.get('CALIBRE_RESOURCES_PATH', {resources!r})
sys.extensions_location = os.environ.get('CALIBRE_EXTENSIONS_PATH', {extensions!r})
sys.executables_location = os.environ.get('CALIBRE_EXECUTABLES_PATH', {executables!r})
'''
TEMPLATE = HEADER+'''
from {module} import {func!s}
sys.exit({func!s}())
'''
COMPLETE_TEMPLATE = HEADER+'''
sys.path.insert(0, os.path.join(path, 'calibre', 'utils'))
import complete
sys.path = sys.path[1:]
sys.exit(complete.main())
'''
class Develop(Command):
description = textwrap.dedent('''\
Setup a development environment for calibre.
This allows you to run calibre directly from the source tree.
Binaries will be installed in <prefix>/bin where <prefix> is
the prefix of your python installation. This can be controlled
via the --prefix option.
''')
short_description = 'Setup a development environment for calibre'
MODE = 0o755
sub_commands = ['build', 'resources', 'iso639', 'iso3166', 'gui',]
def add_postinstall_options(self, parser):
parser.add_option('--make-errors-fatal', action='store_true', default=False,
dest='fatal_errors', help='If set die on post install errors.')
parser.add_option('--no-postinstall', action='store_false',
dest='postinstall', default=True,
help='Don\'t run post install actions like creating MAN pages, setting'+
' up desktop integration and so on')
def add_options(self, parser):
parser.add_option('--prefix',
help='Binaries will be installed in <prefix>/bin')
self.add_postinstall_options(parser)
def consolidate_paths(self):
opts = self.opts
if not opts.prefix:
opts.prefix = sys.prefix
for x in ('prefix', 'libdir', 'bindir', 'sharedir', 'staging_root',
'staging_libdir', 'staging_bindir', 'staging_sharedir'):
o = getattr(opts, x, None)
if o:
setattr(opts, x, os.path.abspath(o))
self.libdir = getattr(opts, 'libdir', None)
if self.libdir is None:
self.libdir = self.j(opts.prefix, 'lib')
self.bindir = getattr(opts, 'bindir', None)
if self.bindir is None:
self.bindir = self.j(opts.prefix, 'bin')
self.sharedir = getattr(opts, 'sharedir', None)
if self.sharedir is None:
self.sharedir = self.j(opts.prefix, 'share')
if not getattr(opts, 'staging_root', None):
opts.staging_root = opts.prefix
self.staging_libdir = getattr(opts, 'staging_libdir', None)
if self.staging_libdir is None:
self.staging_libdir = opts.staging_libdir = self.j(opts.staging_root, 'lib')
self.staging_bindir = getattr(opts, 'staging_bindir', None)
if self.staging_bindir is None:
self.staging_bindir = opts.staging_bindir = self.j(opts.staging_root, 'bin')
self.staging_sharedir = getattr(opts, 'staging_sharedir', None)
if self.staging_sharedir is None:
self.staging_sharedir = opts.staging_sharedir = self.j(opts.staging_root, 'share')
self.staging_libdir = opts.staging_libdir = self.j(self.staging_libdir, 'calibre')
self.staging_sharedir = opts.staging_sharedir = self.j(self.staging_sharedir, 'calibre')
if self.__class__.__name__ == 'Develop':
self.libdir = self.SRC
self.sharedir = self.RESOURCES
else:
self.libdir = self.j(self.libdir, 'calibre')
self.sharedir = self.j(self.sharedir, 'calibre')
self.info('INSTALL paths:')
self.info('\tLIB:', self.staging_libdir)
self.info('\tSHARE:', self.staging_sharedir)
def pre_sub_commands(self, opts):
if not (islinux or isbsd):
self.info('\nSetting up a source based development environment is only '
'supported on linux. On other platforms, see the User Manual'
' for help with setting up a development environment.')
raise SystemExit(1)
if os.geteuid() == 0:
self.drop_privileges()
# Ensure any calibre config files are created as correct user
import calibre.utils.config as c
c
def run(self, opts):
self.manifest = []
self.opts = opts
self.regain_privileges()
self.consolidate_paths()
self.write_templates()
self.install_files()
self.run_postinstall()
self.install_env_module()
self.success()
def install_env_module(self):
import distutils.sysconfig as s
libdir = s.get_python_lib(prefix=self.opts.staging_root)
try:
if not os.path.exists(libdir):
os.makedirs(libdir)
except EnvironmentError:
self.warn('Cannot install calibre environment module to: '+libdir)
else:
path = os.path.join(libdir, 'init_calibre.py')
self.info('Installing calibre environment module: '+path)
with open(path, 'wb') as f:
f.write(HEADER.format(**self.template_args()))
def install_files(self):
pass
def run_postinstall(self):
if self.opts.postinstall:
from calibre.linux import PostInstall
PostInstall(self.opts, info=self.info, warn=self.warn,
manifest=self.manifest)
def success(self):
self.info('\nDevelopment environment successfully setup')
def write_templates(self):
for typ in ('console', 'gui'):
for name, mod, func in zip(basenames[typ], modules[typ],
functions[typ]):
self.write_template(name, mod, func)
def template_args(self):
return {
'path':self.libdir,
'resources':self.sharedir,
'executables':self.bindir,
'extensions':self.j(self.libdir, 'calibre', 'plugins')
}
def write_template(self, name, mod, func):
template = COMPLETE_TEMPLATE if name == 'calibre-complete' else TEMPLATE
args = self.template_args()
args['module'] = mod
args['func'] = func
script = template.format(**args)
path = self.j(self.staging_bindir, name)
if not os.path.exists(self.staging_bindir):
os.makedirs(self.staging_bindir)
self.info('Installing binary:', path)
if os.path.lexists(path) and not os.path.exists(path):
os.remove(path)
open(path, 'wb').write(script)
os.chmod(path, self.MODE)
self.manifest.append(path)
class Install(Develop):
description = textwrap.dedent('''\
Install calibre to your system. By default, calibre
is installed to <prefix>/bin, <prefix>/lib/calibre,
<prefix>/share/calibre. These can all be controlled via options.
The default <prefix> is the prefix of your python installation.
The .desktop, .mime and icon files are installed using XDG. The
location they are installed to can be controlled by setting
the environment variables:
XDG_DATA_DIRS=/usr/share equivalent
XDG_UTILS_INSTALL_MODE=system
''')
short_description = 'Install calibre from source'
sub_commands = ['build', 'gui']
def add_options(self, parser):
parser.add_option('--prefix', help='Installation prefix.')
parser.add_option('--libdir',
help='Where to put calibre library files. Default is <prefix>/lib')
parser.add_option('--bindir',
help='Where to put the calibre binaries. Default is <prefix>/bin')
parser.add_option('--sharedir',
help='Where to put the calibre data files. Default is <prefix>/share')
parser.add_option('--staging-root', '--root', default=None,
help=('Use a different installation root (mainly for packaging).'
' The prefix option controls the paths written into '
'the launcher scripts. This option controls the prefix '
'to which the install will actually copy files. By default '
'it is set to the value of --prefix.'))
parser.add_option('--staging-libdir',
help='Where to put calibre library files. Default is <root>/lib')
parser.add_option('--staging-bindir',
help='Where to put the calibre binaries. Default is <root>/bin')
parser.add_option('--staging-sharedir',
help='Where to put the calibre data files. Default is <root>/share')
self.add_postinstall_options(parser)
def install_files(self):
dest = self.staging_libdir
if os.path.exists(dest):
shutil.rmtree(dest)
self.info('Installing code to', dest)
self.manifest.append(dest)
for x in os.walk(self.SRC):
reldir = os.path.relpath(x[0], self.SRC)
destdir = os.path.join(dest, reldir)
for f in x[-1]:
if os.path.splitext(f)[1] in ('.py', '.so'):
if not os.path.exists(destdir):
os.makedirs(destdir)
shutil.copy2(self.j(x[0], f), destdir)
dest = self.staging_sharedir
if os.path.exists(dest):
shutil.rmtree(dest)
self.info('Installing resources to', dest)
shutil.copytree(self.RESOURCES, dest)
self.manifest.append(dest)
def success(self):
self.info('\n\ncalibre successfully installed. You can start'
' it by running the command calibre')
class Sdist(Command):
description = 'Create a source distribution'
DEST = os.path.join('dist', '%s-%s.tar.xz'%(__appname__, __version__))
def run(self, opts):
if not self.e(self.d(self.DEST)):
os.makedirs(self.d(self.DEST))
tdir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tdir)
tdir = self.j(tdir, 'calibre-%s' % __version__)
self.info('\tRunning git export...')
os.mkdir(tdir)
subprocess.check_call('git archive HEAD | tar -x -C ' + tdir, shell=True)
for x in open('.gitignore').readlines():
if not x.startswith('resources/'):
continue
p = x.strip().replace('/', os.sep)
for p in glob.glob(p):
d = self.j(tdir, os.path.dirname(p))
if not self.e(d):
os.makedirs(d)
if os.path.isdir(p):
shutil.copytree(p, self.j(tdir, p))
else:
shutil.copy2(p, d)
for x in os.walk(os.path.join(self.SRC, 'calibre')):
for f in x[-1]:
if not f.endswith('_ui.py'):
continue
f = os.path.join(x[0], f)
f = os.path.relpath(f)
dest = os.path.join(tdir, self.d(f))
shutil.copy2(f, dest)
tbase = self.j(self.d(self.SRC), 'translations')
for x in ('iso_639', 'calibre'):
destdir = self.j(tdir, 'translations', x)
if not os.path.exists(destdir):
os.makedirs(destdir)
for y in glob.glob(self.j(tbase, x, '*.po')) + glob.glob(self.j(tbase, x, '*.pot')):
dest = self.j(destdir, self.b(y))
if not os.path.exists(dest):
shutil.copy2(y, dest)
shutil.copytree(self.j(tbase, 'manual'), self.j(tdir, 'translations', 'manual'))
self.info('\tCreating tarfile...')
dest = self.DEST.rpartition('.')[0]
subprocess.check_call(['tar', '-cf', self.a(dest), 'calibre-%s' % __version__], cwd=self.d(tdir))
self.info('\tCompressing tarfile...')
if os.path.exists(self.a(self.DEST)):
os.remove(self.a(self.DEST))
subprocess.check_call(['xz', '-9', self.a(dest)])
def clean(self):
if os.path.exists(self.DEST):
os.remove(self.DEST)
class Bootstrap(Command):
description = 'Bootstrap a fresh checkout of calibre from git to a state where it can be installed. Requires various development tools/libraries/headers'
TRANSLATIONS_REPO = 'https://github.com/kovidgoyal/calibre-translations.git'
sub_commands = 'build iso639 iso3166 translations gui resources'.split()
def pre_sub_commands(self, opts):
tdir = self.j(self.d(self.SRC), 'translations')
if os.path.exists(tdir):
subprocess.check_call(['git', 'pull'], cwd=tdir)
else:
subprocess.check_call(['git', 'clone', self.TRANSLATIONS_REPO, 'translations'], cwd=self.d(self.SRC))
def run(self, opts):
self.info('\n\nAll done! You should now be able to run "%s setup.py install" to install calibre' % sys.executable)
|
dimven/ClockworkForDynamo
|
refs/heads/master
|
nodes/0.9.x/python/View.Phase.py
|
8
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
views = UnwrapElement(IN[0])
elementlist = list()
for view in views:
try:
elementlist.append(view.Document.GetElement(view.get_Parameter(BuiltInParameter.VIEW_PHASE).AsElementId()))
except:
elementlist.append(list())
OUT = elementlist
|
Eficent/odoomrp-wip
|
refs/heads/8.0
|
machine_purchase/__openerp__.py
|
27
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
'name': 'Machine Purchase',
'version': '1.0',
'description': """
This module allows to generate machines through purchase invoices.
""",
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
'website': 'http://www.odoomrp.com',
"depends": ['machine_manager', 'stock_account', 'purchase'],
"category": "Generic Modules",
"data": [],
"installable": True,
"auto_install": True,
}
|
kernc/pelican-plugins
|
refs/heads/master
|
summary/test_summary.py
|
339
|
# -*- coding: utf-8 -*-
import unittest
from jinja2.utils import generate_lorem_ipsum
# generate one paragraph, enclosed with <p>
TEST_CONTENT = str(generate_lorem_ipsum(n=1))
TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False)
from pelican.contents import Page
import summary
class TestSummary(unittest.TestCase):
def setUp(self):
super(TestSummary, self).setUp()
summary.register()
summary.initialized(None)
self.page_kwargs = {
'content': TEST_CONTENT,
'context': {
'localsiteurl': '',
},
'metadata': {
'summary': TEST_SUMMARY,
'title': 'foo bar',
'author': 'Blogger',
},
}
def _copy_page_kwargs(self):
# make a deep copy of page_kwargs
page_kwargs = dict([(key, self.page_kwargs[key]) for key in
self.page_kwargs])
for key in page_kwargs:
if not isinstance(page_kwargs[key], dict):
break
page_kwargs[key] = dict([(subkey, page_kwargs[key][subkey])
for subkey in page_kwargs[key]])
return page_kwargs
def test_end_summary(self):
page_kwargs = self._copy_page_kwargs()
del page_kwargs['metadata']['summary']
page_kwargs['content'] = (
TEST_SUMMARY + '<!-- PELICAN_END_SUMMARY -->' + TEST_CONTENT)
page = Page(**page_kwargs)
# test both the summary and the marker removal
self.assertEqual(page.summary, TEST_SUMMARY)
self.assertEqual(page.content, TEST_SUMMARY + TEST_CONTENT)
def test_begin_summary(self):
page_kwargs = self._copy_page_kwargs()
del page_kwargs['metadata']['summary']
page_kwargs['content'] = (
'FOOBAR<!-- PELICAN_BEGIN_SUMMARY -->' + TEST_CONTENT)
page = Page(**page_kwargs)
# test both the summary and the marker removal
self.assertEqual(page.summary, TEST_CONTENT)
self.assertEqual(page.content, 'FOOBAR' + TEST_CONTENT)
def test_begin_end_summary(self):
page_kwargs = self._copy_page_kwargs()
del page_kwargs['metadata']['summary']
page_kwargs['content'] = (
'FOOBAR<!-- PELICAN_BEGIN_SUMMARY -->' + TEST_SUMMARY +
'<!-- PELICAN_END_SUMMARY -->' + TEST_CONTENT)
page = Page(**page_kwargs)
# test both the summary and the marker removal
self.assertEqual(page.summary, TEST_SUMMARY)
self.assertEqual(page.content, 'FOOBAR' + TEST_SUMMARY + TEST_CONTENT)
|
tmroeder/cloudproxy
|
refs/heads/master
|
src/third_party/googlemock/gtest/xcode/Scripts/versiongenerate.py
|
3088
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
|
djungelorm/sphinx-tabs
|
refs/heads/master
|
sphinx_tabs/__init__.py
|
1
|
__version__ = "2.1.0"
__import__("pkg_resources").declare_namespace(__name__)
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/ads/googleads/v5/googleads-py/tests/unit/gapic/googleads.v5/services/test_user_interest_service.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v5.common.types import criterion_category_availability
from google.ads.googleads.v5.enums.types import user_interest_taxonomy_type
from google.ads.googleads.v5.resources.types import user_interest
from google.ads.googleads.v5.services.services.user_interest_service import UserInterestServiceClient
from google.ads.googleads.v5.services.services.user_interest_service import transports
from google.ads.googleads.v5.services.types import user_interest_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import wrappers_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert UserInterestServiceClient._get_default_mtls_endpoint(None) is None
assert UserInterestServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert UserInterestServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert UserInterestServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert UserInterestServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert UserInterestServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_user_interest_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = UserInterestServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_user_interest_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = UserInterestServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = UserInterestServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_user_interest_service_client_get_transport_class():
transport = UserInterestServiceClient.get_transport_class()
assert transport == transports.UserInterestServiceGrpcTransport
transport = UserInterestServiceClient.get_transport_class("grpc")
assert transport == transports.UserInterestServiceGrpcTransport
@mock.patch.object(UserInterestServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(UserInterestServiceClient))
def test_user_interest_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.UserInterestServiceClient.get_transport_class') as gtc:
transport = transports.UserInterestServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = UserInterestServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.UserInterestServiceClient.get_transport_class') as gtc:
client = UserInterestServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = UserInterestServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = UserInterestServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = UserInterestServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = UserInterestServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = UserInterestServiceClient()
@mock.patch.object(UserInterestServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(UserInterestServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_user_interest_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = UserInterestServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = UserInterestServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = UserInterestServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_user_interest_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = UserInterestServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_user_interest(transport: str = 'grpc', request_type=user_interest_service.GetUserInterestRequest):
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_user_interest),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = user_interest.UserInterest(
resource_name='resource_name_value',
taxonomy_type=user_interest_taxonomy_type.UserInterestTaxonomyTypeEnum.UserInterestTaxonomyType.UNKNOWN,
)
response = client.get_user_interest(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == user_interest_service.GetUserInterestRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, user_interest.UserInterest)
assert response.resource_name == 'resource_name_value'
assert response.taxonomy_type == user_interest_taxonomy_type.UserInterestTaxonomyTypeEnum.UserInterestTaxonomyType.UNKNOWN
def test_get_user_interest_from_dict():
test_get_user_interest(request_type=dict)
def test_get_user_interest_field_headers():
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = user_interest_service.GetUserInterestRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_user_interest),
'__call__') as call:
call.return_value = user_interest.UserInterest()
client.get_user_interest(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_user_interest_flattened():
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_user_interest),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = user_interest.UserInterest()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_user_interest(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_user_interest_flattened_error():
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_user_interest(
user_interest_service.GetUserInterestRequest(),
resource_name='resource_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.UserInterestServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.UserInterestServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = UserInterestServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.UserInterestServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.UserInterestServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.UserInterestServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_user_interest_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.UserInterestServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_user_interest',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_user_interest_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v5.services.services.user_interest_service.transports.UserInterestServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.UserInterestServiceTransport()
adc.assert_called_once()
def test_user_interest_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
UserInterestServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_user_interest_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.UserInterestServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_user_interest_service_host_no_port():
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_user_interest_service_host_with_port():
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_user_interest_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.UserInterestServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.UserInterestServiceGrpcTransport])
def test_user_interest_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.UserInterestServiceGrpcTransport,])
def test_user_interest_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_user_interest_path():
customer = "squid"
user_interest = "clam"
expected = "customers/{customer}/userInterests/{user_interest}".format(customer=customer, user_interest=user_interest, )
actual = UserInterestServiceClient.user_interest_path(customer, user_interest)
assert expected == actual
def test_parse_user_interest_path():
expected = {
"customer": "whelk",
"user_interest": "octopus",
}
path = UserInterestServiceClient.user_interest_path(**expected)
# Check that the path construction is reversible.
actual = UserInterestServiceClient.parse_user_interest_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = UserInterestServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = UserInterestServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = UserInterestServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = UserInterestServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = UserInterestServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = UserInterestServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = UserInterestServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = UserInterestServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = UserInterestServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = UserInterestServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = UserInterestServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = UserInterestServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = UserInterestServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = UserInterestServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = UserInterestServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.UserInterestServiceTransport, '_prep_wrapped_messages') as prep:
client = UserInterestServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.UserInterestServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = UserInterestServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
mszewczy/odoo
|
refs/heads/8.0
|
addons/sales_team/res_config.py
|
366
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class sales_team_configuration(osv.TransientModel):
_name = 'sale.config.settings'
_inherit = ['sale.config.settings']
def set_group_multi_salesteams(self, cr, uid, ids, context=None):
""" This method is automatically called by res_config as it begins
with set. It is used to implement the 'one group or another'
behavior. We have to perform some group manipulation by hand
because in res_config.execute(), set_* methods are called
after group_*; therefore writing on an hidden res_config file
could not work.
If group_multi_salesteams is checked: remove group_mono_salesteams
from group_user, remove the users. Otherwise, just add
group_mono_salesteams in group_user.
The inverse logic about group_multi_salesteams is managed by the
normal behavior of 'group_multi_salesteams' field.
"""
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return self.pool['ir.model.data'].get_object(cr, uid, mod, xml, context)
for obj in self.browse(cr, uid, ids, context=context):
config_group = ref('base.group_mono_salesteams')
base_group = ref('base.group_user')
if obj.group_multi_salesteams:
base_group.write({'implied_ids': [(3, config_group.id)]})
config_group.write({'users': [(3, u.id) for u in base_group.users]})
else:
base_group.write({'implied_ids': [(4, config_group.id)]})
return True
_columns = {
'group_multi_salesteams': fields.boolean("Organize Sales activities into multiple Sales Teams",
implied_group='base.group_multi_salesteams',
help="""Allows you to use Sales Teams to manage your leads and opportunities."""),
}
|
thurt/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/build/gyp/test/cxxflags/gyptest-cxxflags.py
|
142
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
env_stack = []
def PushEnv():
env_copy = os.environ.copy()
env_stack.append(env_copy)
def PopEnv():
os.eniron=env_stack.pop()
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
try:
PushEnv()
os.environ['CXXFLAGS'] = '-O0'
test.run_gyp('cxxflags.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
PopEnv()
test.build('cxxflags.gyp')
expect = """\
Using no optimization flag
"""
test.run_built_executable('cxxflags', stdout=expect)
test.sleep()
try:
PushEnv()
os.environ['CXXFLAGS'] = '-O2'
test.run_gyp('cxxflags.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
PopEnv()
test.build('cxxflags.gyp')
expect = """\
Using an optimization flag
"""
test.run_built_executable('cxxflags', stdout=expect)
test.pass_test()
|
slint/zenodo
|
refs/heads/master
|
tests/unit/records/test_schemas_datacite.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016-2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo Dublin Core mapping test."""
from __future__ import absolute_import, print_function
import json
from datetime import datetime, timedelta
import pytest
from zenodo.modules.records.serializers import datacite_v31, datacite_v41
def today():
"""Get todays UTC date."""
return datetime.utcnow().date()
def test_minimal(db, minimal_record_model, recid_pid):
"""Test minimal."""
minimal_record_model['doi'] = '10.5072/foo'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj == {
'identifier': {'identifier': '10.5072/foo', 'identifierType': 'DOI'},
'creators': [{'creatorName': 'Test', 'nameIdentifier': {}}],
'titles': [{'title': 'Test'}],
'publisher': 'Zenodo',
'publicationYear': str(today().year),
'dates': [{'dateType': 'Issued', 'date': today().isoformat()}],
'subjects': [],
'contributors': [],
'resourceType': {
'resourceType': None, 'resourceTypeGeneral': 'Software'},
'alternateIdentifiers': [{
'alternateIdentifier': 'http://localhost/record/123',
'alternateIdentifierType': 'url',
}],
'relatedIdentifiers': [],
'rightsList': [
{'rights': 'Open Access',
'rightsURI': 'info:eu-repo/semantics/openAccess'}],
'descriptions': [
{'description': 'My description', 'descriptionType': 'Abstract'}]
}
def test_non_local_doi(db, minimal_record_model, recid_pid):
"""Test non-local DOI."""
minimal_record_model['doi'] = '10.1234/foo'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['identifier'] == {'identifier': 'http://localhost/record/123',
'identifierType': 'URL'}
assert obj['relatedIdentifiers'] == [{
'relatedIdentifier': '10.1234/foo',
'relatedIdentifierType': 'DOI',
'relationType': 'IsIdenticalTo',
}]
def test_full(db, record_with_bucket, recid_pid):
"""Test full record metadata."""
_, full_record_model = record_with_bucket
full_record_model['doi'] = '10.5072/foo'
obj = datacite_v31.transform_record(recid_pid, full_record_model)
expected = {
"alternateIdentifiers": [
{
"alternateIdentifier": "urn:lsid:ubio.org:namebank:11815",
"alternateIdentifierType": "lsid"
},
{
"alternateIdentifier": "2011ApJS..192...18K",
"alternateIdentifierType": "ads"
},
{
'alternateIdentifier': '0317-8471',
'alternateIdentifierType': 'issn',
},
{
"alternateIdentifier": "10.1234/alternate.doi",
"alternateIdentifierType": "doi"
},
{
"alternateIdentifier": "http://localhost/record/12345",
"alternateIdentifierType": "url"
},
],
"contributors": [
{
"affiliation": "CERN",
"contributorName": "Smith, Other",
"contributorType": "Other",
"nameIdentifier": {
"nameIdentifier": "0000-0002-1825-0097",
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
}
},
{
"affiliation": "",
"contributorName": "Hansen, Viggo",
"contributorType": "Other",
"nameIdentifier": {}
},
{
"affiliation": "CERN",
"contributorName": "Kowalski, Manager",
"contributorType": "DataManager",
"nameIdentifier": {}
}
],
"creators": [
{
"affiliation": "CERN",
"creatorName": "Doe, John",
"nameIdentifier": {
"nameIdentifier": "0000-0002-1694-233X",
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
}
},
{
"affiliation": "CERN",
"creatorName": "Doe, Jane",
"nameIdentifier": {
"nameIdentifier": "0000-0002-1825-0097",
"nameIdentifierScheme": "ORCID",
"schemeURI": "http://orcid.org/"
}
},
{
"affiliation": "CERN",
"creatorName": "Smith, John",
"nameIdentifier": {}
},
{
"affiliation": "CERN",
"creatorName": "Nowak, Jack",
"nameIdentifier": {
"nameIdentifier": "170118215",
"nameIdentifierScheme": "GND"
}
}
],
"dates": [
{"date": "2014-02-27", "dateType": "Issued"},
{"date": "2019-01-01/", "dateType": "Valid"},
# NOTE: "Withdrawn" is not in the DataCite v3.1 dateType vocabulary
# {"date": "2019-01-01", "dateType": "Withdrawn"},
{"date": "/2019-01-01", "dateType": "Collected"},
{"date": "2019-01-01/2019-02-01", "dateType": "Collected"},
],
"descriptions": [
{
"description": "Test Description",
"descriptionType": "Abstract"
},
{
"description": "notes",
"descriptionType": "Other"
},
{
"description": (
"{\"references\": [\"Doe, John et al (2012). "
"Some title. Zenodo. 10.5281/zenodo.12\", \"Smith, "
"Jane et al (2012). Some title. Zenodo. "
"10.5281/zenodo.34\"]}"
),
"descriptionType": "Other"
},
{'description': 'microscopic supersampling',
'descriptionType': 'Methods'}
],
"identifier": {"identifier": "10.5072/foo", "identifierType": "DOI"},
"language": "en",
"geoLocations": [{
"geoLocationPlace": "my place",
"geoLocationPoint": "2.35 1.534"
}, {
'geoLocationPlace': 'New York'
}],
"publicationYear": "2014",
"publisher": "Zenodo",
"relatedIdentifiers": [
{
"relationType": "Cites",
"relatedIdentifier": "10.1234/foo.bar",
"relatedIdentifierType": "DOI"
},
{
"relationType": "IsIdenticalTo",
"relatedIdentifier": "1234.4325",
"relatedIdentifierType": "arXiv"
},
{
"relationType": "Cites",
"relatedIdentifier": "1234.4321",
"relatedIdentifierType": "arXiv"
},
{
"relationType": "References",
"relatedIdentifier": "1234.4328",
"relatedIdentifierType": "arXiv"
},
{
"relationType": "IsPartOf",
"relatedIdentifier": "10.1234/zenodo.4321",
"relatedIdentifierType": "DOI",
"resourceTypeGeneral": "Software"
},
{
"relationType": "HasPart",
"relatedIdentifier": "10.1234/zenodo.1234",
"relatedIdentifierType": "DOI",
"resourceTypeGeneral": "Text"
},
{
"relationType": "IsPartOf",
"relatedIdentifier": "http://localhost/communities/zenodo",
"relatedIdentifierType": "URL"
}
],
"resourceType": {
"resourceType": "Book",
"resourceTypeGeneral": "Text"
},
"rightsList": [
{
"rights": "Creative Commons Attribution 4.0",
"rightsURI": "https://creativecommons.org/licenses/by/4.0/"
},
{
"rights": "Open Access",
"rightsURI": "info:eu-repo/semantics/openAccess"
}
],
"subjects": [
{"subject": "kw1"},
{"subject": "kw2"},
{"subject": "kw3"},
{
"subject": "http://id.loc.gov/authorities/subjects/sh85009003",
"subjectScheme": "url"
}
],
"titles": [{"title": "Test title"}],
"version": "1.2.5"
}
assert obj == expected
obj = datacite_v41.transform_record(recid_pid, full_record_model)
expected['creators'] = [
{
'affiliations': ['CERN'],
'creatorName': 'Doe, John',
'familyName': 'Doe',
'givenName': 'John',
'nameIdentifiers': [
{
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/',
'nameIdentifier': '0000-0002-1694-233X'
},
{
'nameIdentifierScheme': 'GND',
'nameIdentifier': '170118215'
}
],
},
{
'affiliations': ['CERN'],
'creatorName': 'Doe, Jane',
'familyName': 'Doe',
'givenName': 'Jane',
'nameIdentifiers': [
{
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/',
'nameIdentifier': '0000-0002-1825-0097'
}
],
},
{
'affiliations': ['CERN'],
'creatorName': 'Smith, John',
'familyName': 'Smith',
'givenName': 'John',
'nameIdentifiers': [],
},
{
'affiliations': ['CERN'],
'creatorName': 'Nowak, Jack',
'familyName': 'Nowak',
'givenName': 'Jack',
'nameIdentifiers': [
{
'nameIdentifierScheme': 'GND',
'nameIdentifier': '170118215'
}
],
}
]
expected['contributors'] = [
{
'affiliations': ['CERN'],
'nameIdentifiers': [
{
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/',
'nameIdentifier': '0000-0002-1825-0097'
}
],
'contributorName': 'Smith, Other',
'familyName': 'Smith',
'givenName': 'Other',
'contributorType': 'Other',
},
{
'affiliations': [''],
'nameIdentifiers': [],
'contributorName': 'Hansen, Viggo',
'familyName': 'Hansen',
'givenName': 'Viggo',
'contributorType': 'Other',
},
{
'affiliations': ['CERN'],
'nameIdentifiers': [],
'contributorName': 'Kowalski, Manager',
'familyName': 'Kowalski',
'givenName': 'Manager',
'contributorType': 'DataManager',
},
{
'contributorName': 'Smith, Professor',
'familyName': 'Smith',
'givenName': 'Professor',
'nameIdentifiers': [],
'contributorType': 'Supervisor',
}
]
expected['fundingReferences'] = []
expected["dates"] = [
{"date": "2014-02-27", "dateType": "Issued"},
{"date": "2019-01-01/", "dateType": "Valid",
"dateInformation": "Bongo"},
{"date": "/2019-01-01", "dateType": "Collected"},
{"date": "2019-01-01", "dateType": "Withdrawn"},
{"date": "2019-01-01/2019-02-01", "dateType": "Collected"},
]
expected['geoLocations'] = [{
"geoLocationPlace": "my place",
"geoLocationPoint": {
"pointLatitude": 2.35,
"pointLongitude": 1.534
}
}, {
'geoLocationPlace': 'New York'
}]
assert obj == expected
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_identifier(db, minimal_record_model, recid_pid, serializer):
"""Test identifier."""
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['identifier'] == {
'identifier': '10.5072/zenodo.123',
'identifierType': 'DOI',
}
def test_creators(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'creators': [
{'name': 'A', 'affiliation': 'AA', 'gnd': '1234'},
{'name': 'B', 'affiliation': 'BA', 'orcid': '0000-0000-0000-0000',
'gnd': '4321'},
]})
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['creators'] == [
{'affiliation': 'AA', 'creatorName': 'A', 'nameIdentifier': {
'nameIdentifier': '1234', 'nameIdentifierScheme': 'GND'}},
{'affiliation': 'BA', 'creatorName': 'B', 'nameIdentifier': {
'nameIdentifier': '0000-0000-0000-0000',
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'}}
]
def test_creators_v4(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'creators': [
{'name': 'A, B', 'affiliation': 'AA', 'gnd': '1234'},
{
'name': 'B',
'affiliation': 'BA',
'orcid': '0000-0000-0000-0000',
'gnd': '4321'
},
]})
obj = datacite_v41.transform_record(recid_pid, minimal_record_model)
assert obj['creators'] == [{
'affiliations': ['AA'],
'creatorName': 'A, B',
'givenName': 'B',
'familyName': 'A',
'nameIdentifiers': [{
'nameIdentifier': '1234',
'nameIdentifierScheme': 'GND'
}]},
{
'affiliations': ['BA'],
'creatorName': 'B',
'givenName': '',
'familyName': '',
'nameIdentifiers': [{
'nameIdentifier': '0000-0000-0000-0000',
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'
}, {
'nameIdentifier': '4321',
'nameIdentifierScheme': 'GND'
}]
}
]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_embargo_date(db, minimal_record_model, recid_pid, serializer):
"""Test embargo date."""
dt = (today() + timedelta(days=1)).isoformat()
minimal_record_model.update({
'embargo_date': dt,
'access_right': 'embargoed',
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['dates'] == [
{'dateType': 'Available', 'date': dt},
{'dateType': 'Accepted', 'date': today().isoformat()},
]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_subjects(db, minimal_record_model, recid_pid, serializer):
"""Test subjects date."""
minimal_record_model.update({
'keywords': ['kw1'],
'subjects': [{'term': 'test', 'identifier': 'id', 'scheme': 'loc'}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['subjects'] == [
{'subject': 'kw1'},
{'subject': 'id', 'subjectScheme': 'loc'},
]
def test_contributors(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'contributors': [{
'name': 'A',
'affiliation': 'AA',
'gnd': '1234',
'type': 'Researcher'
}, ],
'thesis_supervisors': [{
'name': 'B',
'affiliation': 'BA',
'type': 'Supervisor'
}, ],
'grants': [{
'funder': {
'name': 'European Commission',
},
'identifiers': {
'eurepo': 'info:eu-repo/grantAgreement/EC/FP7/244909'
},
}],
})
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['contributors'] == [
{
'affiliation': 'AA',
'contributorName': 'A',
'contributorType': 'Researcher',
'nameIdentifier': {
'nameIdentifier': '1234',
'nameIdentifierScheme': 'GND'}
},
{
'affiliation': 'BA',
'contributorName': 'B',
'contributorType': 'Supervisor',
'nameIdentifier': {},
},
{
'contributorName': 'European Commission',
'contributorType': 'Funder',
'nameIdentifier': {
'nameIdentifier': 'info:eu-repo/grantAgreement/EC/FP7/244909',
'nameIdentifierScheme': 'info'}
},
]
def test_contributors_v4(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'contributors': [{
'name': 'A, B',
'affiliation': 'AA',
'gnd': '1234',
'orcid': '0000-0000-0000-0000',
'type': 'Researcher'
}, ],
'thesis': {
'supervisors': [{
'name': 'B',
'affiliation': 'BA',
'type': 'Supervisor'
}]
}
})
obj = datacite_v41.transform_record(recid_pid, minimal_record_model)
assert obj['contributors'] == [
{
'affiliations': ['AA'],
'contributorName': 'A, B',
'givenName': 'B',
'familyName': 'A',
'contributorType': 'Researcher',
'nameIdentifiers': [
{
'nameIdentifier': '0000-0000-0000-0000',
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'},
{
'nameIdentifier': '1234',
'nameIdentifierScheme': 'GND'},
]
},
{
'affiliations': ['BA'],
'contributorName': 'B',
'givenName': '',
'familyName': '',
'contributorType': 'Supervisor',
'nameIdentifiers': [],
},
]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_language(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
assert 'language' not in minimal_record_model
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert 'language' not in obj
minimal_record_model['language'] = 'eng'
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['language'] == 'en' # DataCite supports ISO 639-1 (2-letter)
minimal_record_model['language'] = 'twa' # No ISO 639-1 code
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert 'language' not in obj
# This should never happen, but in case of dirty data
minimal_record_model['language'] = 'Esperanto'
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert 'language' not in obj
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_resource_type(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
minimal_record_model['resource_type'] = {'type': 'poster'}
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['resourceType'] == {
'resourceTypeGeneral': 'Text',
'resourceType': 'Poster',
}
# If the record is not in 'c1', OpenAIRE subtype should not be serialized
minimal_record_model['resource_type'] = {'type': 'software',
'openaire_subtype': 'foo:t1'}
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['resourceType'] == {
'resourceTypeGeneral': 'Software',
'resourceType': None
}
# Add 'c1' to communities. 'foo:t1' should be serialized as a type
minimal_record_model['communities'] = ['c1', ]
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['resourceType'] == {
'resourceTypeGeneral': 'Software',
'resourceType': 'openaire:foo:t1',
}
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_alt_ids(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
minimal_record_model.update({
'alternate_identifiers': [{
'identifier': '10.1234/foo.bar',
'scheme': 'doi'
}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['alternateIdentifiers'] == [{
'alternateIdentifier': '10.1234/foo.bar',
'alternateIdentifierType': 'doi',
}, {
'alternateIdentifier': 'http://localhost/record/123',
'alternateIdentifierType': 'url',
}]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_related_identifiers(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
tests = [
('handle', 'Handle'),
('arxiv', 'arXiv'),
('ads', 'bibcode'),
('doi', 'DOI'),
]
for t, dc_t in tests:
minimal_record_model.update({
'related_identifiers': [{
'identifier': '1234',
'scheme': t,
'relation': 'isCitedBy',
'resource_type': {
'type': 'publication',
'subtype': 'section'
}
}, {
'identifier': '1234',
'scheme': 'invalid',
'relation': 'isCitedBy',
}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
expected_result = [{
'relatedIdentifier': '1234',
'relatedIdentifierType': dc_t,
'relationType': 'IsCitedBy',
'resourceTypeGeneral': 'Text'
}]
assert obj['relatedIdentifiers'] == expected_result
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_communities_rel_ids(db, minimal_record_model, recid_pid, serializer):
"""Test communities in related identifiers."""
for communities in (['zenodo'], ['c1', 'c2', 'c3']):
minimal_record_model['communities'] = communities
obj = serializer.transform_record(recid_pid, minimal_record_model)
for comm in communities:
assert {
'relatedIdentifier':
'http://localhost/communities/{}'.format(comm),
'relatedIdentifierType': 'URL',
'relationType': 'IsPartOf',
} in obj['relatedIdentifiers']
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_rights(db, minimal_record_model, recid_pid, serializer):
"""Test language."""
minimal_record_model.update({
'license': {
'identifier': 'cc-by-sa',
'title': 'Creative Commons Attribution Share-Alike',
'source': 'opendefinition.org',
'url': 'http://www.opendefinition.org/licenses/cc-by-sa'
}
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['rightsList'] == [{
'rights': 'Creative Commons Attribution Share-Alike',
'rightsURI': 'http://www.opendefinition.org/licenses/cc-by-sa',
}, {
'rights': 'Open Access',
'rightsURI': 'info:eu-repo/semantics/openAccess',
}]
@pytest.mark.parametrize("serializer", [
datacite_v31,
datacite_v41,
])
def test_descriptions(db, minimal_record_model, recid_pid, serializer):
"""Test descriptions."""
minimal_record_model.update({
'description': 'test',
'notes': 'again',
'references': [{'raw_reference': 'A'}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert obj['descriptions'] == [{
'description': 'test',
'descriptionType': 'Abstract',
}, {
'description': 'again',
'descriptionType': 'Other',
}, {
'description': json.dumps({'references': ['A']}),
'descriptionType': 'Other',
}]
minimal_record_model.update({
'description': (20000 * 'A') + 'BBB',
'notes': (20000 * 'A') + 'BBB',
'references': [{'raw_reference': (20000 * 'A') + 'BBB'}],
})
obj = serializer.transform_record(recid_pid, minimal_record_model)
assert all(len(d['description']) == 20000 and 'B' not in d['description']
for d in obj['descriptions'])
def test_funding_ref_v4(db, minimal_record_model, recid_pid):
"""Test creators."""
minimal_record_model.update({
'grants': [
{'title': 'Grant Title',
'code': '1234',
'identifiers': {'eurepo': 'eurepo 1'},
'internal_id': '10.1234/foo::1234',
'funder': {'name': 'EC', 'doi': '10.1234/foo'}},
{'title': 'Title Grant',
'code': '4321',
'identifiers': {'eurepo': 'eurepo 2'},
'internal_id': '10.1234/foo::4321',
'funder': {'name': 'EC', 'doi': '10.1234/foo'}},
]})
obj = datacite_v41.transform_record(recid_pid, minimal_record_model)
assert obj['fundingReferences'] == [
{
'funderName': 'EC',
'funderIdentifier': {
'funderIdentifier': '10.1234/foo',
'funderIdentifierType': 'Crossref Funder ID',
},
'awardNumber': {
'awardNumber': '1234',
'awardURI': 'eurepo 1'
},
'awardTitle': 'Grant Title'
},
{
'funderName': 'EC',
'funderIdentifier': {
'funderIdentifier': '10.1234/foo',
'funderIdentifierType': 'Crossref Funder ID',
},
'awardNumber': {
'awardNumber': '4321',
'awardURI': 'eurepo 2'
},
'awardTitle': 'Title Grant'
}
]
def test_titles(db, minimal_record_model, recid_pid):
"""Test title."""
# NOTE: There used to be a bug which was modifying the case of the title
minimal_record_model['title'] = 'a lower-case title'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['titles'] == [{'title': 'a lower-case title'}]
minimal_record_model['title'] = 'Mixed-caSe titLE'
obj = datacite_v31.transform_record(recid_pid, minimal_record_model)
assert obj['titles'] == [{'title': 'Mixed-caSe titLE'}]
|
mahendra-r/edx-platform
|
refs/heads/master
|
cms/envs/aws.py
|
15
|
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import json
from .common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', ENV_TOKENS.get('MITX_FEATURES', {}))
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
STUDIO_NAME = ENV_TOKENS.get('STUDIO_NAME', 'edX Studio')
STUDIO_SHORT_NAME = ENV_TOKENS.get('STUDIO_SHORT_NAME', 'Studio')
TENDER_DOMAIN = ENV_TOKENS.get('TENDER_DOMAIN', TENDER_DOMAIN)
TENDER_SUBDOMAIN = ENV_TOKENS.get('TENDER_SUBDOMAIN', TENDER_SUBDOMAIN)
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
if 'url_root' in DJFS:
DJFS['url_root'] = DJFS['url_root'].format(platform_revision=EDX_PLATFORM_REVISION)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
# If Segment.io key specified, load it and turn on Segment.io if the feature flag is set
# Note that this is the Studio key. There is a separate key for the LMS.
SEGMENT_IO_KEY = AUTH_TOKENS.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = ENV_TOKENS.get('SEGMENT_IO', False)
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DATABASES = AUTH_TOKENS['DATABASES']
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get('ADVANCED_COMPONENT_TYPES', ADVANCED_COMPONENT_TYPES)
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES)
DEPRECATED_ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get(
'DEPRECATED_ADVANCED_COMPONENT_TYPES', DEPRECATED_ADVANCED_COMPONENT_TYPES
)
################ VIDEO UPLOAD PIPELINE ###############
VIDEO_UPLOAD_PIPELINE = ENV_TOKENS.get('VIDEO_UPLOAD_PIPELINE', VIDEO_UPLOAD_PIPELINE)
################ PUSH NOTIFICATIONS ###############
PARSE_KEYS = AUTH_TOKENS.get("PARSE_KEYS", {})
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
if FEATURES['ENABLE_COURSEWARE_INDEX'] or FEATURES['ENABLE_LIBRARY_INDEX']:
# Use ElasticSearch for the search engine
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
XBLOCK_SETTINGS.setdefault("VideoModule", {})['YOUTUBE_API_KEY'] = AUTH_TOKENS.get('YOUTUBE_API_KEY', YOUTUBE_API_KEY)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = AUTH_TOKENS.get("PROCTORING_BACKEND_PROVIDER", PROCTORING_BACKEND_PROVIDER)
PROCTORING_SETTINGS = ENV_TOKENS.get("PROCTORING_SETTINGS", PROCTORING_SETTINGS)
|
anbangleo/NlsdeWeb
|
refs/heads/master
|
Python-3.6.0/Lib/idlelib/delegator.py
|
17
|
class Delegator:
def __init__(self, delegate=None):
self.delegate = delegate
self.__cache = set()
# Cache is used to only remove added attributes
# when changing the delegate.
def __getattr__(self, name):
attr = getattr(self.delegate, name) # May raise AttributeError
setattr(self, name, attr)
self.__cache.add(name)
return attr
def resetcache(self):
"Removes added attributes while leaving original attributes."
# Function is really about resetting delagator dict
# to original state. Cache is just a means
for key in self.__cache:
try:
delattr(self, key)
except AttributeError:
pass
self.__cache.clear()
def setdelegate(self, delegate):
"Reset attributes and change delegate."
self.resetcache()
self.delegate = delegate
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_delegator', verbosity=2)
|
surajssd/kuma
|
refs/heads/master
|
kuma/attachments/migrations/__init__.py
|
12133432
| |
yubang/message
|
refs/heads/master
|
message/__init__.py
|
12133432
| |
kieslee/c_amz
|
refs/heads/master
|
c_amz/spiders/amz_1.py
|
1
|
# -*- coding: utf-8 -*-
import scrapy
import re
import sys
import json
import pdb
import redis
from retrying import retry
r_host = '127.0.0.1'
r_port = 6379
r_db = 1
pool = None
r = None
def get_cache():
global pool, r
if pool is None:
pool = redis.ConnectionPool(host=r_host, port=r_port, db=r_db, socket_timeout=10)
r = redis.Redis(connection_pool=pool)
return r
def get_keys():
global r
if r is None:
r = get_cache()
return r.keys()
def get_value(k):
global r
if r is None:
r = get_cache()
return r.get(k)
def set_value(k, v):
global r
if r is None:
r = get_cache()
r.set(k, v)
class AmazonSpider(scrapy.Spider):
name = "amazon_collect_all_tags"
allowed_domains = ["amazon.com", "baidu.com"]
start_urls = (
'https://www.amazon.com/Best-Sellers/zgbs/ref=zg_mg_tab',
)
def parse(self, response):
lis = response.xpath('/html/body/div[4]/div[2]/div/div[1]/div/div[2]/div/div[2]/ul/ul/li')
for li in lis:
url = response.urljoin(li.xpath('./a/@href').extract_first())
class_name = li.xpath('./a/text()').extract_first()
r = scrapy.Request(url, callback=self.parse_class_pages)
r.meta['class_name'] = class_name
yield r
def tag_selected_parse(self, response):
#pdb.set_trace()
ul = response.xpath('//*[@id="zg_browseRoot"]')
while True:
try:
li = ul.xpath('./li')
if len(li) == 0:
return None
ul = ul.xpath('./ul')
span_class_name = li.xpath('./span/@class').extract_first()
if span_class_name == 'zg_selected':
return ul
except Exception, e:
return None
return None
def parse_class_pages(self, response):
class_name = response.meta['class_name']
yield {'class_name': class_name, 'url': response.url}
ul = self.tag_selected_parse(response)
if ul is not None:
lis = ul.xpath('./li')
for li in lis:
url = li.xpath('./a/@href').extract_first()
sub_class_name = li.xpath('./a/text()').extract_first()
r = scrapy.Request(url, callback=self.parse_class_pages)
r.meta['class_name'] = class_name + ":" + sub_class_name
yield r
class AmazonSpider2(scrapy.Spider):
name = "amazon_collect_all_products"
allowed_domains = ["amazon.com"]
'''
start_urls = (
'https://www.amazon.com/Best-Sellers/zgbs/ref=zg_mg_tab',
)
'''
def start_requests(self):
key_list = get_keys()
for k in key_list:
url = get_value(k)
class_name = k
r = scrapy.Request(url, callback=self.parse)
r.meta['class_name'] = class_name
yield r
def parse(self, response):
class_name = response.meta['class_name']
divs = response.xpath('//*[@id="zg_centerListWrapper"]/div[contains(@class, "zg_itemImmersion")]')
for div in divs:
url = div.xpath('./div/div[2]/a/@href').extract_first().strip('\n')
rank = div.xpath('./div/span/text()').extract_first().strip('.')
#r = scrapy.Request(url, callback=self.parse_products)
#r.meta['class_name'] = class_name
#r.meta['rank'] = rank
yield {'class_name': class_name, 'rank': rank, 'url': url}
page2 = response.xpath('//*[@id="zg_page2"]/a/@href').extract_first()
r = scrapy.Request(page2, callback=self.parse_class_pages)
r.meta['class_name'] = class_name
yield r
page3 = response.xpath('//*[@id="zg_page3"]/a/@href').extract_first()
r = scrapy.Request(page3, callback=self.parse_class_pages)
r.meta['class_name'] = class_name
yield r
page4 = response.xpath('//*[@id="zg_page4"]/a/@href').extract_first()
r = scrapy.Request(page4, callback=self.parse_class_pages)
r.meta['class_name'] = class_name
yield r
page5 = response.xpath('//*[@id="zg_page5"]/a/@href').extract_first()
r = scrapy.Request(page5, callback=self.parse_class_pages)
r.meta['class_name'] = class_name
yield r
def parse_class_pages(self, response):
class_name = response.meta['class_name']
divs = response.xpath('//*[@id="zg_centerListWrapper"]/div[contains(@class, "zg_itemImmersion")]')
for div in divs:
url = div.xpath('./div/div[2]/a/@href').extract_first().strip('\n')
rank = div.xpath('./div/span/text()').extract_first().strip('.')
#r = scrapy.Request(url, callback=self.parse_products, dont_filter=True)
#r.meta['class_name'] = class_name
#r.meta['rank'] = rank
yield {'class_name': class_name, 'rank': rank, 'url': url}
|
IndonesiaX/edx-platform
|
refs/heads/master
|
common/test/acceptance/tests/test_cohorted_courseware.py
|
123
|
"""
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
import json
from nose.plugins.attrib import attr
from studio.base_studio_test import ContainerBase
from ..pages.studio.settings_group_configurations import GroupConfigurationsPage
from ..pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ..fixtures.course import XBlockFixtureDesc
from ..fixtures import LMS_BASE_URL
from ..pages.studio.component_editor import ComponentVisibilityEditorView
from ..pages.lms.instructor_dashboard import InstructorDashboardPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from ..tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from bok_choy.promise import EmptyPromise
@attr('shard_5')
class EndToEndCohortedCoursewareTest(ContainerBase):
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A"
self.content_group_b = "Content Group B"
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "cohort_a_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "cohort_b_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
StudioAutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def enable_cohorting(self, course_fixture):
"""
Enables cohorting for the current course.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': True})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to enable cohorts")
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(problem_index, content_group, second_content_group=None):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems):
LmsAutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem, self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.visible_to_all_problem]
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 4 problems, 1 staff member, and 3 students
When I enable cohorts in the course
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And one student remains in the default cohort
Then the staff member can see all 4 problems
And the student in Cohort A can see all the problems except the one linked to Content Group B
And the student in Cohort B can see all the problems except the one linked to Content Group A
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
|
alaunay/bigtop
|
refs/heads/master
|
bigtop-packages/src/charm/zeppelin/layer-zeppelin/reactive/zeppelin.py
|
8
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from charms.reactive import is_state, remove_state, set_state, when, when_not
from charmhelpers.core import hookenv, unitdata
from charms.layer.apache_bigtop_base import Bigtop, get_package_version
from charms.layer.bigtop_zeppelin import Zeppelin
from charms.reactive.helpers import data_changed
@when('zeppelin.installed')
def update_status():
hadoop_joined = is_state('hadoop.joined')
hadoop_ready = is_state('hadoop.ready')
hive_joined = is_state('hive.joined')
hive_ready = is_state('hive.ready')
spark_joined = is_state('spark.joined')
spark_ready = is_state('spark.ready')
spark_blocked = is_state('spark.master.unusable')
# handle blockers first; then report what's ready/waiting
if spark_blocked:
hookenv.status_set('blocked',
'remote spark must be in standalone mode')
else:
waiting_apps = []
ready_apps = []
# Check status of the hadoop plugin
if hadoop_joined and not hadoop_ready:
waiting_apps.append('hadoop')
elif hadoop_ready:
ready_apps.append('hadoop')
# Check status of Hive
if hive_joined and not hive_ready:
waiting_apps.append('hive')
elif hive_ready:
ready_apps.append('hive')
# Check status of Spark
if spark_joined and not spark_ready:
waiting_apps.append('spark')
elif spark_ready:
ready_apps.append('spark')
# Set appropriate status
repo_ver = unitdata.kv().get('zeppelin.version.repo', False)
if repo_ver:
# Pending upgrade takes precedent over other status messages
msg = "install version {} with the 'reinstall' action".format(repo_ver)
hookenv.status_set('active', msg)
elif waiting_apps:
# Waiting takes precedent over active status messages
msg = "waiting for: {}".format(' & '.join(waiting_apps))
hookenv.status_set('waiting', msg)
elif ready_apps:
msg = "ready with: {}".format(' & '.join(ready_apps))
hookenv.status_set('active', msg)
else:
hookenv.status_set('active', 'ready')
@when('bigtop.available')
@when_not('zeppelin.installed')
def initial_setup():
hookenv.status_set('maintenance', 'installing zeppelin')
zeppelin = Zeppelin()
zeppelin.install()
zeppelin.open_ports()
set_state('zeppelin.installed')
update_status()
# set app version string for juju status output
zeppelin_version = get_package_version('zeppelin') or 'unknown'
hookenv.application_version_set(zeppelin_version)
@when('zeppelin.installed', 'bigtop.version.changed')
def check_repo_version():
"""
Configure a bigtop site.yaml if a new version of zeppelin is available.
This method will set unitdata if a different version of zeppelin is
available in the newly configured bigtop repo. This unitdata allows us to
configure site.yaml while gating the actual puppet apply. The user must do
the puppet apply by calling the 'reinstall' action.
"""
repo_ver = Bigtop().check_bigtop_repo_package('zeppelin')
if repo_ver:
unitdata.kv().set('zeppelin.version.repo', repo_ver)
unitdata.kv().flush(True)
zeppelin = Zeppelin()
zeppelin.trigger_bigtop()
else:
unitdata.kv().unset('zeppelin.version.repo')
update_status()
@when('zeppelin.installed', 'hadoop.ready')
@when_not('zeppelin.hadoop.configured')
def configure_hadoop(hadoop):
zeppelin = Zeppelin()
zeppelin.configure_hadoop()
zeppelin.register_hadoop_notebooks()
set_state('zeppelin.hadoop.configured')
@when('zeppelin.installed', 'zeppelin.hadoop.configured')
@when_not('hadoop.ready')
def unconfigure_hadoop():
zeppelin = Zeppelin()
zeppelin.remove_hadoop_notebooks()
remove_state('zeppelin.hadoop.configured')
@when('zeppelin.installed', 'hive.ready')
def configure_hive(hive):
hive_ip = hive.get_private_ip()
hive_port = hive.get_port()
hive_url = 'jdbc:hive2://%s:%s' % (hive_ip, hive_port)
if data_changed('hive.connect', hive_url):
hookenv.status_set('maintenance', 'configuring hive')
zeppelin = Zeppelin()
zeppelin.configure_hive(hive_url)
set_state('zeppelin.hive.configured')
update_status()
@when('zeppelin.installed', 'zeppelin.hive.configured')
@when_not('hive.ready')
def unconfigure_hive():
hookenv.status_set('maintenance', 'removing hive relation')
zeppelin = Zeppelin()
zeppelin.configure_hive('jdbc:hive2://:')
remove_state('zeppelin.hive.configured')
update_status()
@when('zeppelin.installed', 'spark.ready')
def configure_spark(spark):
'''
Configure Zeppelin to use remote Spark resources.
'''
# NB: Use the master_url string if it already starts with spark://.
# Otherwise, it means the remote spark is in local or yarn mode -- that's
# bad because using 'local' or 'yarn' here would cause zepp's spark-submit
# to use the builtin spark, hence ignoring the remote spark. In this case,
# set a state so we can inform the user that the remote spark is unusable.
master_url = spark.get_master_url()
if master_url.startswith('spark'):
remove_state('spark.master.unusable')
# Only (re)configure if our master url has changed.
if data_changed('spark.master', master_url):
hookenv.status_set('maintenance', 'configuring spark')
zeppelin = Zeppelin()
zeppelin.configure_spark(master_url)
set_state('zeppelin.spark.configured')
else:
remove_state('zeppelin.spark.configured')
set_state('spark.master.unusable')
update_status()
@when('zeppelin.installed', 'zeppelin.spark.configured')
@when_not('spark.ready')
def unconfigure_spark():
'''
Remove remote Spark; reconfigure Zeppelin to use embedded Spark.
'''
hookenv.status_set('maintenance', 'removing spark relation')
zeppelin = Zeppelin()
# Zepp includes the spark-client role, so reconfigure our built-in spark
# if our related spark has gone away.
if is_state('zeppelin.hadoop.configured'):
local_master = 'yarn-client'
else:
local_master = 'local[*]'
zeppelin.configure_spark(local_master)
data_changed('spark.master', local_master) # ensure updated if re-added
remove_state('zeppelin.spark.configured')
update_status()
@when('zeppelin.installed', 'client.notebook.registered')
def register_notebook(client):
zeppelin = Zeppelin()
for notebook in client.unregistered_notebooks():
notebook_md5 = hashlib.md5(notebook.encode('utf8')).hexdigest()
if zeppelin.register_notebook(notebook_md5, notebook):
client.accept_notebook(notebook)
else:
client.reject_notebook(notebook)
@when('zeppelin.installed', 'client.notebook.removed')
def remove_notebook(client):
zeppelin = Zeppelin()
for notebook in client.unremoved_notebooks():
notebook_md5 = hashlib.md5(notebook.encode('utf8')).hexdigest()
zeppelin.remove_notebook(notebook_md5)
client.remove_notebook(notebook)
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/conf/locale/zh_TW/formats.py
|
1293
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
tedelhourani/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_datacenter.py
|
56
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_datacenter
short_description: Module to manage data centers in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage data centers in oVirt/RHV"
options:
name:
description:
- "Name of the data center to manage."
required: true
state:
description:
- "Should the data center be present or absent"
choices: ['present', 'absent']
default: present
description:
description:
- "Description of the data center."
comment:
description:
- "Comment of the data center."
local:
description:
- "I(True) if the data center should be local, I(False) if should be shared."
- "Default value is set by engine."
compatibility_version:
description:
- "Compatibility version of the data center."
quota_mode:
description:
- "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)"
choices: ['disabled', 'audit', 'enabled']
mac_pool:
description:
- "MAC pool to be used by this datacenter."
- "IMPORTANT: This option is deprecated in oVirt/RHV 4.1. You should
use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are
set per cluster since 4.1."
force:
description:
- "This parameter can be used only when removing a data center.
If I(True) data center will be forcibly removed, even though it
contains some clusters. Default value is I(False), which means
that only empty data center can be removed."
version_added: "2.5"
default: False
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create datacenter
- ovirt_datacenter:
name: mydatacenter
local: True
compatibility_version: 4.0
quota_mode: enabled
# Remove datacenter
- ovirt_datacenter:
state: absent
name: mydatacenter
'''
RETURN = '''
id:
description: "ID of the managed datacenter"
returned: "On success if datacenter is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
data_center:
description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/datacenter."
returned: "On success if datacenter is found."
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class DatacentersModule(BaseModule):
def __get_major(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.major
return int(full_version.split('.')[0])
def __get_minor(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.minor
return int(full_version.split('.')[1])
def _get_mac_pool(self):
mac_pool = None
if self._module.params.get('mac_pool'):
mac_pool = search_by_name(
self._connection.system_service().mac_pools_service(),
self._module.params.get('mac_pool'),
)
return mac_pool
def build_entity(self):
return otypes.DataCenter(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
mac_pool=otypes.MacPool(
id=getattr(self._get_mac_pool(), 'id', None),
) if self._module.params.get('mac_pool') else None,
quota_mode=otypes.QuotaModeType(
self._module.params['quota_mode']
) if self._module.params['quota_mode'] else None,
local=self._module.params['local'],
version=otypes.Version(
major=self.__get_major(self._module.params['compatibility_version']),
minor=self.__get_minor(self._module.params['compatibility_version']),
) if self._module.params['compatibility_version'] else None,
)
def update_check(self, entity):
minor = self.__get_minor(self._module.params.get('compatibility_version'))
major = self.__get_major(self._module.params.get('compatibility_version'))
return (
equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and
equal(self._module.params.get('local'), entity.local) and
equal(minor, self.__get_minor(entity.version)) and
equal(major, self.__get_major(entity.version))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
description=dict(default=None),
local=dict(type='bool'),
compatibility_version=dict(default=None),
quota_mode=dict(choices=['disabled', 'audit', 'enabled']),
comment=dict(default=None),
mac_pool=dict(default=None),
force=dict(default=None, type='bool'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if module._name == 'ovirt_datacenters':
module.deprecate("The 'ovirt_datacenters' module is being renamed 'ovirt_datacenter'", version=2.8)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
data_centers_service = connection.system_service().data_centers_service()
clusters_module = DatacentersModule(
connection=connection,
module=module,
service=data_centers_service,
)
state = module.params['state']
if state == 'present':
ret = clusters_module.create()
elif state == 'absent':
ret = clusters_module.remove(force=module.params['force'])
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
warped-rudi/xbmc
|
refs/heads/Gotham-13.1-AE-fixes
|
lib/libUPnP/Neptune/Extras/Scripts/GenTrustAnchorsTables.py
|
264
|
#! /usr/bin/env python
###
### Generate trust anchor tables from a text file
### like, for example, TLS-Trust-Anchors-base.crt
## and TLS-Trust-Anchors-extended.crt located under Extras/Data
###
### imports
import sys
import base64
### generate a C file with bult-in TLS trust anchors
FILE_HEADER = """/*****************************************************************
|
| Neptune - Trust Anchors
|
| This file is automatically generated by a script, do not edit!
|
| Copyright (c) 2002-2010, Axiomatic Systems, LLC.
| All rights reserved.
|
| Redistribution and use in source and binary forms, with or without
| modification, are permitted provided that the following conditions are met:
| * Redistributions of source code must retain the above copyright
| notice, this list of conditions and the following disclaimer.
| * Redistributions in binary form must reproduce the above copyright
| notice, this list of conditions and the following disclaimer in the
| documentation and/or other materials provided with the distribution.
| * Neither the name of Axiomatic Systems nor the
| names of its contributors may be used to endorse or promote products
| derived from this software without specific prior written permission.
|
| THIS SOFTWARE IS PROVIDED BY AXIOMATIC SYSTEMS ''AS IS'' AND ANY
| EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
| WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
| DISCLAIMED. IN NO EVENT SHALL AXIOMATIC SYSTEMS BE LIABLE FOR ANY
| DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
| (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
| LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
| ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
| SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
****************************************************************/
"""
if len(sys.argv) != 3:
print "usage: GenTrustAnchosTable.py <input-file> <category>"
print " where category may be 'Base', 'Extended', or other"
sys.exit(1)
INPUT_FILE = sys.argv[1]
CERT_CATEGORY = sys.argv[2]
digest_oid_pattern = "\x2a\x86\x48\x86\xf7\x0d\x01\x01"
in_cert = False
prev = ''
prev_prev = ''
index = 0
Certs = []
CertNames = []
CertComments = []
for line in open(sys.argv[1]).readlines():
if line.startswith('-----BEGIN CERTIFICATE-----'):
in_cert = True
b64 = ''
continue;
if line.startswith('-----END CERTIFICATE-----'):
cert = base64.decodestring(b64);
if not digest_oid_pattern in cert:
sys.stderr.write("-------- skipping cert (digest not supported) -------\n")
continue
Certs.append(cert)
cert_name = 'NptTlsTrustAnchor_%s_%04d' % (CERT_CATEGORY, index)
#cert_comment = eval('"'+prev_prev.rstrip('\r\n')+'"')
cert_comment = prev_prev.rstrip('\r\n')
CertNames.append(cert_name)
CertComments.append(cert_comment)
out = open(CERT_CATEGORY+'/'+cert_name+'.cpp', 'w+b')
out.write(FILE_HEADER)
out.write('/* %s */\n' % (cert_comment))
out.write('const unsigned char %s_Data[%d] = {\n' % (cert_name, len(cert)))
counter = 0
sep = ''
for byte in cert:
out.write('%s0x%02x' % (sep, ord(byte)))
counter += 1
sep = ','
if counter == 8:
out.write('\n')
counter = 0
in_cert = False
out.write('};\n')
out.write('const unsigned int %s_Size = %d;\n' % (cert_name, len(cert)))
index += 1
out.close()
continue
if in_cert:
b64 += line.rstrip('\r\n')
else:
prev_prev = prev
prev = line
out = open('NptTlsDefaultTrustAnchors'+CERT_CATEGORY+'.cpp', 'w+b')
out.write(FILE_HEADER)
out.write("/* This file is automatically generated by GenTrustAnchorsTables.py, do not edit */\n\n")
out.write('#include "NptTls.h"\n')
total_size = 0
for i in xrange(0, len(CertNames)):
out.write('#include "'+CERT_CATEGORY+'/'+CertNames[i]+'.cpp" /* '+CertComments[i]+' */\n')
total_size += len(Certs[i])
out.write("/* total anchors size ="+ str(total_size)+" */\n\n")
out.write('const NPT_TlsTrustAnchorData NptTlsDefaultTrustAnchors%s[%s] = {\r\n' % (CERT_CATEGORY, 1+len(Certs)))
sep = ' '
for i in xrange(0, len(Certs)):
out.write('%s{ %s_Data, %s_Size} /* %s */' % (sep, CertNames[i], CertNames[i], CertComments[i]))
sep = ',\r\n '
out.write(sep+'{0, 0} /* sentinel */\n')
out.write('};\n')
out.close()
out = open('NptTlsDefaultTrustAnchors'+CERT_CATEGORY+'.h', 'w+b')
out.write(FILE_HEADER)
out.write("/* This file is automatically generated by GenTrustAnchorsTables.py, do not edit */\n\n")
out.write('#include "NptTls.h"\n\n')
out.write('extern const NPT_TlsTrustAnchorData NptTlsDefaultTrustAnchors%s[%d];\n\n' % (CERT_CATEGORY, 1+len(Certs)))
for i in xrange(0, len(CertNames)):
out.write('/* '+CertComments[i]+' */\n')
out.write('extern const unsigned int %s_Size;\n' % (CertNames[i]))
out.write('extern const unsigned char %s_Data[];\n\n' % (CertNames[i]))
out.close()
|
PnCevennes/porteconnaissance_pq
|
refs/heads/master
|
app/pq/models.py
|
1
|
#coding: utf8
from server import db
from geoalchemy2 import Geometry
from shapely.wkb import loads
from shapely.geometry import asShape
from geoalchemy2.shape import to_shape, from_shape
from geojson import Feature
class PqData(db.Model):
__tablename__ = 'v_perimetres_quietude'
__table_args__ = {'schema':'pq'}
r = db.Column(db.Integer, primary_key=True)
code_sp = db.Column(db.Unicode)
code_etat = db.Column(db.Unicode)
max_etat_annee = db.Column(db.Integer)
zone_pnc = db.Column(db.Unicode)
massifs = db.Column(db.Unicode)
id_secteur = db.Column(db.Unicode)
qtd_nom = db.Column(db.Unicode)
geom = db.Column('geom', Geometry('MULTIPOLYGON', srid=4326))
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def as_geofeature(self):
geometry = to_shape(self.geom)
feature = Feature(
id=self.r,
geometry=geometry,
properties= {c.name: getattr(self, c.name) for c in self.__table__.columns if c.name!='geom'}
)
return feature
class Communes(db.Model):
__tablename__ = 'communes'
__table_args__ = {'schema':'lim_admin'}
code_insee= db.Column(db.Integer, primary_key=True)
geom = db.Column('geom_4326', Geometry('MULTIPOLYGON', srid=4326))
geom_buffer = db.Column('geom_buffer', Geometry('MULTIPOLYGON', srid=4326))
nom_com = db.Column(db.Unicode)
def as_dict(self):
return {
c.name: getattr(self, c.name)
for c in self.__table__.columns
}
def as_geofeature(self):
geometry = to_shape(self.geom)
feature = Feature(
id=self.code_insee,
geometry=geometry
)
return feature
class CommunesEmprises(db.Model):
__tablename__ = 'v_communes_emprise'
__table_args__ = {'schema':'lim_admin'}
code_insee= db.Column(db.Integer, primary_key=True)
label = db.Column(db.Unicode)
st_xmax = db.Column(db.Unicode)
st_xmin= db.Column(db.Unicode)
st_ymax = db.Column(db.Unicode)
st_ymin= db.Column(db.Unicode)
def as_dict(self):
return {
c.name: getattr(self, c.name)
for c in self.__table__.columns
}
class ContactMassifs(db.Model):
__tablename__ = 'contact_massifs'
__table_args__ = {'schema':'pq'}
id= db.Column(db.Integer, primary_key=True)
nom_massif = db.Column(db.Unicode)
nom_agent = db.Column(db.Unicode)
tel_portable = db.Column(db.Unicode)
tel_fixe = db.Column(db.Unicode)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class ContactDt(db.Model):
__tablename__ = 'contact_dt'
__table_args__ = {'schema':'pq'}
id= db.Column(db.Integer, primary_key=True)
nom_massif = db.Column(db.Unicode)
nom_dt = db.Column(db.Unicode)
tel_portable = db.Column(db.Unicode)
tel_fixe = db.Column(db.Unicode)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class ContactSecteurs(db.Model):
__tablename__ = 'contact_secteurs'
__table_args__ = {'schema':'pq'}
id= db.Column(db.Integer, primary_key=True)
id_secteur = db.Column(db.Integer)
nom_agent = db.Column(db.Unicode)
tel_portable = db.Column(db.Unicode)
tel_fixe = db.Column(db.Unicode)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
|
tanmaythakur/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/migrations/__init__.py
|
12133432
| |
MichaelNedzelsky/intellij-community
|
refs/heads/master
|
python/testData/formatter/parenthesisAroundGeneratorExpression.py
|
79
|
gen = ( for c in 'abrakadabra' )
|
PatrickKennedy/Sybil
|
refs/heads/master
|
console/app/console.py
|
1
|
# The WSGI entry-point for App Engine Console
#
# Copyright 2008 Proven Corporation Co., Ltd., Thailand
#
# This file is part of App Engine Console.
#
# App Engine Console is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# App Engine Console is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with App Engine Console; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import re
import sys
import cgi
import code
import logging
from os.path import join, dirname
sys.path.insert(0, dirname(__file__))
sys.path.insert(0, dirname(dirname(__file__)))
import util
import controller
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
debug = util.is_dev()
logging.info('WSGI debugging: %s' % debug)
application = webapp.WSGIApplication([
('/' , controller.Root),
('/console/dashboard/', controller.Dashboard),
('/console/help.*' , controller.Help),
('/console/statement' , controller.Statement),
('/console/banner' , controller.Banner),
('/console.*' , controller.Console),
], debug=debug)
def main():
logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
ryanahall/django
|
refs/heads/master
|
django/contrib/gis/gdal/error.py
|
535
|
"""
This module houses the GDAL & SRS Exception objects, and the
check_err() routine which checks the status code returned by
GDAL/OGR methods.
"""
# #### GDAL & SRS Exceptions ####
class GDALException(Exception):
pass
# Legacy name
OGRException = GDALException
class SRSException(Exception):
pass
class OGRIndexError(GDALException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
# #### GDAL/OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = {
1: (GDALException, 'Not enough data.'),
2: (GDALException, 'Not enough memory.'),
3: (GDALException, 'Unsupported geometry type.'),
4: (GDALException, 'Unsupported operation.'),
5: (GDALException, 'Corrupt data.'),
6: (GDALException, 'OGR failure.'),
7: (SRSException, 'Unsupported SRS.'),
8: (GDALException, 'Invalid handle.'),
}
# CPL Error Codes
# http://www.gdal.org/cpl__error_8h.html
CPLERR_DICT = {
1: (GDALException, 'AppDefined'),
2: (GDALException, 'OutOfMemory'),
3: (GDALException, 'FileIO'),
4: (GDALException, 'OpenFailed'),
5: (GDALException, 'IllegalArg'),
6: (GDALException, 'NotSupported'),
7: (GDALException, 'AssertionFailed'),
8: (GDALException, 'NoWriteAccess'),
9: (GDALException, 'UserInterrupt'),
10: (GDALException, 'ObjectNull'),
}
ERR_NONE = 0
def check_err(code, cpl=False):
"""
Checks the given CPL/OGRERR, and raises an exception where appropriate.
"""
err_dict = CPLERR_DICT if cpl else OGRERR_DICT
if code == ERR_NONE:
return
elif code in err_dict:
e, msg = err_dict[code]
raise e(msg)
else:
raise GDALException('Unknown error code: "%s"' % code)
|
cosenal/osf.io
|
refs/heads/develop
|
website/files/models/dataverse.py
|
39
|
from framework.auth.core import _get_current_user
from website.files.models.base import File, Folder, FileNode, FileVersion
__all__ = ('DataverseFile', 'DataverseFolder', 'DataverseFileNode')
class DataverseFileNode(FileNode):
provider = 'dataverse'
class DataverseFolder(DataverseFileNode, Folder):
pass
class DataverseFile(DataverseFileNode, File):
version_identifier = 'version'
def update(self, revision, data, user=None):
"""Note: Dataverse only has psuedo versions, don't save them
Dataverse requires a user for the weird check below
and Django dies when _get_current_user is called
"""
self.name = data['name']
self.materialized_path = data['materialized']
self.save()
version = FileVersion(identifier=revision)
version.update_metadata(data, save=False)
user = user or _get_current_user()
if not user or not self.node.can_edit(user=user):
try:
# Users without edit permission can only see published files
if not data['extra']['hasPublishedVersion']:
# Blank out name and path for the render
# Dont save because there's no reason to persist the change
self.name = ''
self.materialized_path = ''
return (version, '<div class="alert alert-info" role="alert">This file does not exist.</div>')
except (KeyError, IndexError):
pass
return version
|
DylannCordel/djangocms-text-ckeditor
|
refs/heads/master
|
djangocms_text_ckeditor/tests/test_html.py
|
4
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from .. import html
from .. import settings
class HtmlSanitizerAdditionalProtocolsTests(TestCase):
def tearDown(self):
settings.TEXT_ADDITIONAL_PROTOCOLS = []
def test_default_protocol_escaping(self):
settings.TEXT_ADDITIONAL_PROTOCOLS = []
parser = html._get_default_parser()
text = html.clean_html('''<source src="rtmp://testurl.com/">''',
full=False,
parser=parser)
self.assertEqual('<source>', text)
def test_custom_protocol_enabled(self):
settings.TEXT_ADDITIONAL_PROTOCOLS = ('rtmp',)
parser = html._get_default_parser()
text = html.clean_html('''<source src="rtmp://testurl.com/">''',
full=False,
parser=parser)
self.assertEqual('''<source src="rtmp://testurl.com/">''', text)
|
nikolas/readthedocs.org
|
refs/heads/master
|
readthedocs/projects/signals.py
|
8
|
import logging
import json
import django.dispatch
from django.conf import settings
from django.contrib import messages
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from builds import utils as build_utils
from oauth import utils as oauth_utils
before_vcs = django.dispatch.Signal(providing_args=["version"])
after_vcs = django.dispatch.Signal(providing_args=["version"])
before_build = django.dispatch.Signal(providing_args=["version"])
after_build = django.dispatch.Signal(providing_args=["version"])
project_import = django.dispatch.Signal(providing_args=["project"])
log = logging.getLogger(__name__)
@receiver(project_import)
def handle_project_import(sender, **kwargs):
"""
Add post-commit hook on project import.
"""
project = sender
request = kwargs.get('request')
for provider in ['github', 'bitbucket']:
if provider in project.repo:
session = oauth_utils.get_oauth_session(user=request.user, provider=provider)
if not session:
break
if provider == 'github':
try:
owner, repo = build_utils.get_github_username_repo(version=None, repo_url=project.repo)
data = json.dumps({
'name': 'readthedocs',
'active': True,
'config': {'url': 'https://{domain}/github'.format(domain=settings.PRODUCTION_DOMAIN)}
})
resp = session.post(
'https://api.github.com/repos/{owner}/{repo}/hooks'.format(owner=owner, repo=repo),
data=data,
headers={'content-type': 'application/json'}
)
log.info("Creating GitHub webhook response code: {code}".format(code=resp.status_code))
if resp.status_code == 201:
messages.success(request, _('GitHub webhook activated'))
except:
log.exception('GitHub Hook creation failed', exc_info=True)
elif provider == 'bitbucket':
try:
owner, repo = build_utils.get_bitbucket_username_repo(version=None, repo_url=project.repo)
data = {
'type': 'POST',
'url': 'https://{domain}/bitbucket'.format(domain=settings.PRODUCTION_DOMAIN),
}
resp = session.post(
'https://api.bitbucket.org/1.0/repositories/{owner}/{repo}/services'.format(owner=owner, repo=repo),
data=data,
)
log.info("Creating BitBucket webhook response code: {code}".format(code=resp.status_code))
if resp.status_code == 200:
messages.success(request, _('BitBucket webhook activated'))
except:
log.exception('BitBucket Hook creation failed', exc_info=True)
|
cast051/ardupilot_cast
|
refs/heads/master
|
mk/PX4/Tools/genmsg/test/test_genmsg_base.py
|
216
|
def test_log():
from genmsg.base import log
log("hello", "there")
def test_plog():
class Foo(object):
pass
from genmsg.base import plog
plog("hello", Foo())
def test_exceptions():
from genmsg import InvalidMsgSpec
try:
raise InvalidMsgSpec('hello')
except InvalidMsgSpec:
pass
|
gimite/personfinder
|
refs/heads/master
|
app/vendors/xlrd/sheet.py
|
27
|
# -*- coding: cp1252 -*-
##
# <p> Portions copyright © 2005-2013 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# 2010-04-25 SJM fix zoom factors cooking logic
# 2010-04-15 CW r4253 fix zoom factors cooking logic
# 2010-04-09 CW r4248 add a flag so xlutils knows whether or not to write a PANE record
# 2010-03-29 SJM Fixed bug in adding new empty rows in put_cell_ragged
# 2010-03-28 SJM Tailored put_cell method for each of ragged_rows=False (fixed speed regression) and =True (faster)
# 2010-03-25 CW r4236 Slight refactoring to remove method calls
# 2010-03-25 CW r4235 Collapse expand_cells into put_cell and enhance the raggedness. This should save even more memory!
# 2010-03-25 CW r4234 remove duplicate chunks for extend_cells; refactor to remove put_number_cell and put_blank_cell which essentially duplicated the code of put_cell
# 2010-03-10 SJM r4222 Added reading of the PANE record.
# 2010-03-10 SJM r4221 Preliminary work on "cooked" mag factors; use at own peril
# 2010-03-01 SJM Reading SCL record
# 2010-03-01 SJM Added ragged_rows functionality
# 2009-08-23 SJM Reduced CPU time taken by parsing MULBLANK records.
# 2009-08-18 SJM Used __slots__ and sharing to reduce memory consumed by Rowinfo instances
# 2009-05-31 SJM Fixed problem with no CODEPAGE record on extremely minimal BIFF2.x 3rd-party file
# 2009-04-27 SJM Integrated on_demand patch by Armando Serrano Lombillo
# 2008-02-09 SJM Excel 2.0: build XFs on the fly from cell attributes
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-10-11 SJM Added missing entry for blank cell type to ctype_text
# 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file
# 2007-04-22 SJM Remove experimental "trimming" facility.
from __future__ import print_function
from array import array
from struct import unpack, calcsize
from .biffh import *
from .timemachine import *
from .formula import dump_formula, decompile_formula, rangename2d, FMLA_TYPE_CELL, FMLA_TYPE_SHARED
from .formatting import nearest_colour_index, Format
DEBUG = 0
OBJ_MSO_DEBUG = 0
_WINDOW2_options = (
# Attribute names and initial values to use in case
# a WINDOW2 record is not written.
("show_formulas", 0),
("show_grid_lines", 1),
("show_sheet_headers", 1),
("panes_are_frozen", 0),
("show_zero_values", 1),
("automatic_grid_line_colour", 1),
("columns_from_right_to_left", 0),
("show_outline_symbols", 1),
("remove_splits_if_pane_freeze_is_removed", 0),
# Multiple sheets can be selected, but only one can be active
# (hold down Ctrl and click multiple tabs in the file in OOo)
("sheet_selected", 0),
# "sheet_visible" should really be called "sheet_active"
# and is 1 when this sheet is the sheet displayed when the file
# is open. More than likely only one sheet should ever be set as
# visible.
# This would correspond to the Book's sheet_active attribute, but
# that doesn't exist as WINDOW1 records aren't currently processed.
# The real thing is the visibility attribute from the BOUNDSHEET record.
("sheet_visible", 0),
("show_in_page_break_preview", 0),
)
##
# <p>Contains the data for one worksheet.</p>
#
# <p>In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a
# column index, counting from zero.
# Negative values for row/column indexes and slice positions are supported in the expected fashion.</p>
#
# <p>For information about cell types and cell values, refer to the documentation of the {@link #Cell} class.</p>
#
# <p>WARNING: You don't call this class yourself. You access Sheet objects via the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
class Sheet(BaseObject):
##
# Name of sheet.
name = ''
##
# A reference to the Book object to which this sheet belongs.
# Example usage: some_sheet.book.datemode
book = None
##
# Number of rows in sheet. A row index is in range(thesheet.nrows).
nrows = 0
##
# Nominal number of columns in sheet. It is 1 + the maximum column index
# found, ignoring trailing empty cells. See also open_workbook(ragged_rows=?)
# and Sheet.{@link #Sheet.row_len}(row_index).
ncols = 0
##
# The map from a column index to a {@link #Colinfo} object. Often there is an entry
# in COLINFO records for all column indexes in range(257).
# Note that xlrd ignores the entry for the non-existent
# 257th column. On the other hand, there may be no entry for unused columns.
# <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True).
colinfo_map = {}
##
# The map from a row index to a {@link #Rowinfo} object. Note that it is possible
# to have missing entries -- at least one source of XLS files doesn't
# bother writing ROW records.
# <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True).
rowinfo_map = {}
##
# List of address ranges of cells containing column labels.
# These are set up in Excel by Insert > Name > Labels > Columns.
# <br> -- New in version 0.6.0
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.col_label_ranges:
# rlo, rhi, clo, chi = crange
# for rx in xrange(rlo, rhi):
# for cx in xrange(clo, chi):
# print "Column label at (rowx=%d, colx=%d) is %r" \
# (rx, cx, thesheet.cell_value(rx, cx))
# </pre>
col_label_ranges = []
##
# List of address ranges of cells containing row labels.
# For more details, see <i>col_label_ranges</i> above.
# <br> -- New in version 0.6.0
row_label_ranges = []
##
# List of address ranges of cells which have been merged.
# These are set up in Excel by Format > Cells > Alignment, then ticking
# the "Merge cells" box.
# <br> Note that the upper limits are exclusive: i.e. <tt>[2, 3, 7, 9]</tt> only
# spans two cells.
# <br> -- New in version 0.6.1. Extracted only if open_workbook(formatting_info=True).
# <br>How to deconstruct the list:
# <pre>
# for crange in thesheet.merged_cells:
# rlo, rhi, clo, chi = crange
# for rowx in xrange(rlo, rhi):
# for colx in xrange(clo, chi):
# # cell (rlo, clo) (the top left one) will carry the data
# # and formatting info; the remainder will be recorded as
# # blank cells, but a renderer will apply the formatting info
# # for the top left cell (e.g. border, pattern) to all cells in
# # the range.
# </pre>
merged_cells = []
##
# Mapping of (rowx, colx) to list of (offset, font_index) tuples. The offset
# defines where in the string the font begins to be used.
# Offsets are expected to be in ascending order.
# If the first offset is not zero, the meaning is that the cell's XF's font should
# be used from offset 0.
# <br /> This is a sparse mapping. There is no entry for cells that are not formatted with
# rich text.
# <br>How to use:
# <pre>
# runlist = thesheet.rich_text_runlist_map.get((rowx, colx))
# if runlist:
# for offset, font_index in runlist:
# # do work here.
# pass
# </pre>
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2.
# <br />
rich_text_runlist_map = {}
##
# Default column width from DEFCOLWIDTH record, else None.
# From the OOo docs:<br />
# """Column width in characters, using the width of the zero character
# from default font (first FONT record in the file). Excel adds some
# extra space to the default width, depending on the default font and
# default font size. The algorithm how to exactly calculate the resulting
# column width is not known.<br />
# Example: The default width of 8 set in this record results in a column
# width of 8.43 using Arial font with a size of 10 points."""<br />
# For the default hierarchy, refer to the {@link #Colinfo} class.
# <br /> -- New in version 0.6.1
defcolwidth = None
##
# Default column width from STANDARDWIDTH record, else None.
# From the OOo docs:<br />
# """Default width of the columns in 1/256 of the width of the zero
# character, using default font (first FONT record in the file)."""<br />
# For the default hierarchy, refer to the {@link #Colinfo} class.
# <br /> -- New in version 0.6.1
standardwidth = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_height_mismatch = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_row_hidden = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_above = None
##
# Default value to be used for a row if there is
# no ROW record for that row.
# From the <i>optional</i> DEFAULTROWHEIGHT record.
default_additional_space_below = None
##
# Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden
# by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden
# only by VBA macro).
visibility = 0
##
# A 256-element tuple corresponding to the contents of the GCW record for this sheet.
# If no such record, treat as all bits zero.
# Applies to BIFF4-7 only. See docs of the {@link #Colinfo} class for discussion.
gcw = (0, ) * 256
##
# <p>A list of {@link #Hyperlink} objects corresponding to HLINK records found
# in the worksheet.<br />-- New in version 0.7.2 </p>
hyperlink_list = []
##
# <p>A sparse mapping from (rowx, colx) to an item in {@link #Sheet.hyperlink_list}.
# Cells not covered by a hyperlink are not mapped.
# It is possible using the Excel UI to set up a hyperlink that
# covers a larger-than-1x1 rectangle of cells.
# Hyperlink rectangles may overlap (Excel doesn't check).
# When a multiply-covered cell is clicked on, the hyperlink that is activated
# (and the one that is mapped here) is the last in hyperlink_list.
# <br />-- New in version 0.7.2 </p>
hyperlink_map = {}
##
# <p>A sparse mapping from (rowx, colx) to a {@link #Note} object.
# Cells not containing a note ("comment") are not mapped.
# <br />-- New in version 0.7.2 </p>
cell_note_map = {}
##
# Number of columns in left pane (frozen panes; for split panes, see comments below in code)
vert_split_pos = 0
##
# Number of rows in top pane (frozen panes; for split panes, see comments below in code)
horz_split_pos = 0
##
# Index of first visible row in bottom frozen/split pane
horz_split_first_visible = 0
##
# Index of first visible column in right frozen/split pane
vert_split_first_visible = 0
##
# Frozen panes: ignore it. Split panes: explanation and diagrams in OOo docs.
split_active_pane = 0
##
# Boolean specifying if a PANE record was present, ignore unless you're xlutils.copy
has_pane_record = 0
##
# A list of the horizontal page breaks in this sheet.
# Breaks are tuples in the form (index of row after break, start col index, end col index).
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2
horizontal_page_breaks = []
##
# A list of the vertical page breaks in this sheet.
# Breaks are tuples in the form (index of col after break, start row index, end row index).
# Populated only if open_workbook(formatting_info=True).
# <br /> -- New in version 0.7.2
vertical_page_breaks = []
def __init__(self, book, position, name, number):
self.book = book
self.biff_version = book.biff_version
self._position = position
self.logfile = book.logfile
self.bt = array('B', [XL_CELL_EMPTY])
self.bf = array('h', [-1])
self.name = name
self.number = number
self.verbosity = book.verbosity
self.formatting_info = book.formatting_info
self.ragged_rows = book.ragged_rows
if self.ragged_rows:
self.put_cell = self.put_cell_ragged
else:
self.put_cell = self.put_cell_unragged
self._xf_index_to_xl_type_map = book._xf_index_to_xl_type_map
self.nrows = 0 # actual, including possibly empty cells
self.ncols = 0
self._maxdatarowx = -1 # highest rowx containing a non-empty cell
self._maxdatacolx = -1 # highest colx containing a non-empty cell
self._dimnrows = 0 # as per DIMENSIONS record
self._dimncols = 0
self._cell_values = []
self._cell_types = []
self._cell_xf_indexes = []
self.defcolwidth = None
self.standardwidth = None
self.default_row_height = None
self.default_row_height_mismatch = 0
self.default_row_hidden = 0
self.default_additional_space_above = 0
self.default_additional_space_below = 0
self.colinfo_map = {}
self.rowinfo_map = {}
self.col_label_ranges = []
self.row_label_ranges = []
self.merged_cells = []
self.rich_text_runlist_map = {}
self.horizontal_page_breaks = []
self.vertical_page_breaks = []
self._xf_index_stats = [0, 0, 0, 0]
self.visibility = book._sheet_visibility[number] # from BOUNDSHEET record
for attr, defval in _WINDOW2_options:
setattr(self, attr, defval)
self.first_visible_rowx = 0
self.first_visible_colx = 0
self.gridline_colour_index = 0x40
self.gridline_colour_rgb = None # pre-BIFF8
self.hyperlink_list = []
self.hyperlink_map = {}
self.cell_note_map = {}
# Values calculated by xlrd to predict the mag factors that
# will actually be used by Excel to display your worksheet.
# Pass these values to xlwt when writing XLS files.
# Warning 1: Behaviour of OOo Calc and Gnumeric has been observed to differ from Excel's.
# Warning 2: A value of zero means almost exactly what it says. Your sheet will be
# displayed as a very tiny speck on the screen. xlwt will reject attempts to set
# a mag_factor that is not (10 <= mag_factor <= 400).
self.cooked_page_break_preview_mag_factor = 60
self.cooked_normal_view_mag_factor = 100
# Values (if any) actually stored on the XLS file
self.cached_page_break_preview_mag_factor = 0 # default (60%), from WINDOW2 record
self.cached_normal_view_mag_factor = 0 # default (100%), from WINDOW2 record
self.scl_mag_factor = None # from SCL record
self._ixfe = None # BIFF2 only
self._cell_attr_to_xfx = {} # BIFF2.0 only
#### Don't initialise this here, use class attribute initialisation.
#### self.gcw = (0, ) * 256 ####
if self.biff_version >= 80:
self.utter_max_rows = 65536
else:
self.utter_max_rows = 16384
self.utter_max_cols = 256
self._first_full_rowx = -1
# self._put_cell_exceptions = 0
# self._put_cell_row_widenings = 0
# self._put_cell_rows_appended = 0
# self._put_cell_cells_appended = 0
##
# {@link #Cell} object in the given row and column.
def cell(self, rowx, colx):
if self.formatting_info:
xfx = self.cell_xf_index(rowx, colx)
else:
xfx = None
return Cell(
self._cell_types[rowx][colx],
self._cell_values[rowx][colx],
xfx,
)
##
# Value of the cell in the given row and column.
def cell_value(self, rowx, colx):
return self._cell_values[rowx][colx]
##
# Type of the cell in the given row and column.
# Refer to the documentation of the {@link #Cell} class.
def cell_type(self, rowx, colx):
return self._cell_types[rowx][colx]
##
# XF index of the cell in the given row and column.
# This is an index into Book.{@link #Book.xf_list}.
# <br /> -- New in version 0.6.1
def cell_xf_index(self, rowx, colx):
self.req_fmt_info()
xfx = self._cell_xf_indexes[rowx][colx]
if xfx > -1:
self._xf_index_stats[0] += 1
return xfx
# Check for a row xf_index
try:
xfx = self.rowinfo_map[rowx].xf_index
if xfx > -1:
self._xf_index_stats[1] += 1
return xfx
except KeyError:
pass
# Check for a column xf_index
try:
xfx = self.colinfo_map[colx].xf_index
if xfx == -1: xfx = 15
self._xf_index_stats[2] += 1
return xfx
except KeyError:
# If all else fails, 15 is used as hardwired global default xf_index.
self._xf_index_stats[3] += 1
return 15
##
# Returns the effective number of cells in the given row. For use with
# open_workbook(ragged_rows=True) which is likely to produce rows
# with fewer than {@link #Sheet.ncols} cells.
# <br /> -- New in version 0.7.2
def row_len(self, rowx):
return len(self._cell_values[rowx])
##
# Returns a sequence of the {@link #Cell} objects in the given row.
def row(self, rowx):
return [
self.cell(rowx, colx)
for colx in xrange(len(self._cell_values[rowx]))
]
##
# Returns a generator for iterating through each row.
def get_rows(self):
return (self.row(index) for index in range(self.nrows))
##
# Returns a slice of the types
# of the cells in the given row.
def row_types(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_types[rowx][start_colx:]
return self._cell_types[rowx][start_colx:end_colx]
##
# Returns a slice of the values
# of the cells in the given row.
def row_values(self, rowx, start_colx=0, end_colx=None):
if end_colx is None:
return self._cell_values[rowx][start_colx:]
return self._cell_values[rowx][start_colx:end_colx]
##
# Returns a slice of the {@link #Cell} objects in the given row.
def row_slice(self, rowx, start_colx=0, end_colx=None):
nc = len(self._cell_values[rowx])
if start_colx < 0:
start_colx += nc
if start_colx < 0:
start_colx = 0
if end_colx is None or end_colx > nc:
end_colx = nc
elif end_colx < 0:
end_colx += nc
return [
self.cell(rowx, colx)
for colx in xrange(start_colx, end_colx)
]
##
# Returns a slice of the {@link #Cell} objects in the given column.
def col_slice(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self.cell(rowx, colx)
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the values of the cells in the given column.
def col_values(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_values[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a slice of the types of the cells in the given column.
def col_types(self, colx, start_rowx=0, end_rowx=None):
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_types[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
##
# Returns a sequence of the {@link #Cell} objects in the given column.
def col(self, colx):
return self.col_slice(colx)
# Above two lines just for the docs. Here's the real McCoy:
col = col_slice
# === Following methods are used in building the worksheet.
# === They are not part of the API.
def tidy_dimensions(self):
if self.verbosity >= 3:
fprintf(self.logfile,
"tidy_dimensions: nrows=%d ncols=%d \n",
self.nrows, self.ncols,
)
if 1 and self.merged_cells:
nr = nc = 0
umaxrows = self.utter_max_rows
umaxcols = self.utter_max_cols
for crange in self.merged_cells:
rlo, rhi, clo, chi = crange
if not (0 <= rlo < rhi <= umaxrows) \
or not (0 <= clo < chi <= umaxcols):
fprintf(self.logfile,
"*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n",
self.number, self.name, crange)
if rhi > nr: nr = rhi
if chi > nc: nc = chi
if nc > self.ncols:
self.ncols = nc
self._first_full_rowx = -2
if nr > self.nrows:
# we put one empty cell at (nr-1,0) to make sure
# we have the right number of rows. The ragged rows
# will sort out the rest if needed.
self.put_cell(nr-1, 0, XL_CELL_EMPTY, UNICODE_LITERAL(''), -1)
if self.verbosity >= 1 \
and (self.nrows != self._dimnrows or self.ncols != self._dimncols):
fprintf(self.logfile,
"NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n",
self.number,
self.name,
self._dimnrows,
self._dimncols,
self.nrows,
self.ncols,
)
if not self.ragged_rows:
# fix ragged rows
ncols = self.ncols
s_cell_types = self._cell_types
s_cell_values = self._cell_values
s_cell_xf_indexes = self._cell_xf_indexes
s_fmt_info = self.formatting_info
# for rowx in xrange(self.nrows):
if self._first_full_rowx == -2:
ubound = self.nrows
else:
ubound = self._first_full_rowx
for rowx in xrange(ubound):
trow = s_cell_types[rowx]
rlen = len(trow)
nextra = ncols - rlen
if nextra > 0:
s_cell_values[rowx][rlen:] = [UNICODE_LITERAL('')] * nextra
trow[rlen:] = self.bt * nextra
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = self.bf * nextra
def put_cell_ragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
assert 0 <= colx < self.utter_max_cols
assert 0 <= rowx < self.utter_max_rows
fmt_info = self.formatting_info
try:
nr = rowx + 1
if self.nrows < nr:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
scta(bt * 0)
scva([])
if fmt_info:
scxa(bf * 0)
self.nrows = nr
types_row = self._cell_types[rowx]
values_row = self._cell_values[rowx]
if fmt_info:
fmt_row = self._cell_xf_indexes[rowx]
ltr = len(types_row)
if colx >= self.ncols:
self.ncols = colx + 1
num_empty = colx - ltr
if not num_empty:
# most common case: colx == previous colx + 1
# self._put_cell_cells_appended += 1
types_row.append(ctype)
values_row.append(value)
if fmt_info:
fmt_row.append(xf_index)
return
if num_empty > 0:
num_empty += 1
# self._put_cell_row_widenings += 1
# types_row.extend(self.bt * num_empty)
# values_row.extend([UNICODE_LITERAL('')] * num_empty)
# if fmt_info:
# fmt_row.extend(self.bf * num_empty)
types_row[ltr:] = self.bt * num_empty
values_row[ltr:] = [UNICODE_LITERAL('')] * num_empty
if fmt_info:
fmt_row[ltr:] = self.bf * num_empty
types_row[colx] = ctype
values_row[colx] = value
if fmt_info:
fmt_row[colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
def put_cell_unragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
# assert 0 <= colx < self.utter_max_cols
# assert 0 <= rowx < self.utter_max_rows
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
# self.extend_cells(rowx+1, colx+1)
# self._put_cell_exceptions += 1
nr = rowx + 1
nc = colx + 1
assert 1 <= nc <= self.utter_max_cols
assert 1 <= nr <= self.utter_max_rows
if nc > self.ncols:
self.ncols = nc
# The row self._first_full_rowx and all subsequent rows
# are guaranteed to have length == self.ncols. Thus the
# "fix ragged rows" section of the tidy_dimensions method
# doesn't need to examine them.
if nr < self.nrows:
# cell data is not in non-descending row order *AND*
# self.ncols has been bumped up.
# This very rare case ruins this optmisation.
self._first_full_rowx = -2
elif rowx > self._first_full_rowx > -2:
self._first_full_rowx = rowx
if nr <= self.nrows:
# New cell is in an existing row, so extend that row (if necessary).
# Note that nr < self.nrows means that the cell data
# is not in ascending row order!!
trow = self._cell_types[rowx]
nextra = self.ncols - len(trow)
if nextra > 0:
# self._put_cell_row_widenings += 1
trow.extend(self.bt * nextra)
if self.formatting_info:
self._cell_xf_indexes[rowx].extend(self.bf * nextra)
self._cell_values[rowx].extend([UNICODE_LITERAL('')] * nextra)
else:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
fmt_info = self.formatting_info
nc = self.ncols
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
# self._put_cell_rows_appended += 1
scta(bt * nc)
scva([UNICODE_LITERAL('')] * nc)
if fmt_info:
scxa(bf * nc)
self.nrows = nr
# === end of code from extend_cells()
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
# === Methods after this line neither know nor care about how cells are stored.
def read(self, bk):
global rc_stats
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
blah_rows = DEBUG or self.verbosity >= 4
blah_formulas = 0 and blah
r1c1 = 0
oldpos = bk._position
bk._position = self._position
XL_SHRFMLA_ETC_ETC = (
XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2,
XL_ARRAY2, XL_TABLEOP_B2,
)
self_put_cell = self.put_cell
local_unpack = unpack
bk_get_record_parts = bk.get_record_parts
bv = self.biff_version
fmt_info = self.formatting_info
do_sst_rich_text = fmt_info and bk._rich_text_runlist_map
rowinfo_sharing_dict = {}
txos = {}
eof_found = 0
while 1:
# if DEBUG: print "SHEET.READ: about to read from position %d" % bk._position
rc, data_len, data = bk_get_record_parts()
# if rc in rc_stats:
# rc_stats[rc] += 1
# else:
# rc_stats[rc] = 1
# if DEBUG: print "SHEET.READ: op 0x%04x, %d bytes %r" % (rc, data_len, data)
if rc == XL_NUMBER:
# [:14] in following stmt ignores extraneous rubbish at end of record.
# Sample file testEON-8.xls supplied by Jan Kraus.
rowx, colx, xf_index, d = local_unpack('<HHHd', data[:14])
# if xf_index == 0:
# fprintf(self.logfile,
# "NUMBER: r=%d c=%d xfx=%d %f\n", rowx, colx, xf_index, d)
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_LABELSST:
rowx, colx, xf_index, sstindex = local_unpack('<HHHi', data)
# print "LABELSST", rowx, colx, sstindex, bk._sharedstrings[sstindex]
self_put_cell(rowx, colx, XL_CELL_TEXT, bk._sharedstrings[sstindex], xf_index)
if do_sst_rich_text:
runlist = bk._rich_text_runlist_map.get(sstindex)
if runlist:
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_LABEL:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg = unpack_string(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
else:
strg = unpack_unicode(data, 6, lenlen=2)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
elif rc == XL_RSTRING:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg, pos = unpack_string_update_pos(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
nrt = BYTES_ORD(data[pos])
pos += 1
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<BB', data[pos:pos+2]))
pos += 2
assert pos == len(data)
else:
strg, pos = unpack_unicode_update_pos(data, 6, lenlen=2)
nrt = unpack('<H', data[pos:pos+2])[0]
pos += 2
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<HH', data[pos:pos+4]))
pos += 4
assert pos == len(data)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_RK:
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
d = unpack_RK(data[6:10])
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_MULRK:
mulrk_row, mulrk_first = local_unpack('<HH', data[0:4])
mulrk_last, = local_unpack('<H', data[-2:])
pos = 4
for colx in xrange(mulrk_first, mulrk_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
d = unpack_RK(data[pos+2:pos+6])
pos += 6
self_put_cell(mulrk_row, colx, None, d, xf_index)
elif rc == XL_ROW:
# Version 0.6.0a3: ROW records are just not worth using (for memory allocation).
# Version 0.6.1: now used for formatting info.
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH4xi', data[0:16])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows), file=self.logfile)
continue
key = (bits1, bits2)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
# Using upkbits() is far too slow on a file
# with 30 sheets each with 10K rows :-(
# upkbits(r, bits1, (
# ( 0, 0x7FFF, 'height'),
# (15, 0x8000, 'has_default_height'),
# ))
# upkbits(r, bits2, (
# ( 0, 0x00000007, 'outline_level'),
# ( 4, 0x00000010, 'outline_group_starts_ends'),
# ( 5, 0x00000020, 'hidden'),
# ( 6, 0x00000040, 'height_mismatch'),
# ( 7, 0x00000080, 'has_default_xf_index'),
# (16, 0x0FFF0000, 'xf_index'),
# (28, 0x10000000, 'additional_space_above'),
# (29, 0x20000000, 'additional_space_below'),
# ))
# So:
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.outline_level = bits2 & 7
r.outline_group_starts_ends = (bits2 >> 4) & 1
r.hidden = (bits2 >> 5) & 1
r.height_mismatch = (bits2 >> 6) & 1
r.has_default_xf_index = (bits2 >> 7) & 1
r.xf_index = (bits2 >> 16) & 0xfff
r.additional_space_above = (bits2 >> 28) & 1
r.additional_space_below = (bits2 >> 29) & 1
if not r.has_default_xf_index:
r.xf_index = -1
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW', rowx, bits1, bits2, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc in XL_FORMULA_OPCODES: # 06, 0206, 0406
# DEBUG = 1
# if DEBUG: print "FORMULA: rc: 0x%04x data: %r" % (rc, data)
if bv >= 50:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 20
elif bv >= 30:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
lenlen = 2
tkarr_offset = 16
else: # BIFF2
rowx, colx, cell_attr, result_str, flags = local_unpack('<HH3s8sB', data[0:16])
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)
lenlen = 1
tkarr_offset = 16
if blah_formulas: # testing formula dumper
#### XXXX FIXME
fprintf(self.logfile, "FORMULA: rowx=%d colx=%d\n", rowx, colx)
fmlalen = local_unpack("<H", data[20:22])[0]
decompile_formula(bk, data[22:], fmlalen, FMLA_TYPE_CELL,
browx=rowx, bcolx=colx, blah=1, r1c1=r1c1)
if result_str[6:8] == b"\xFF\xFF":
first_byte = BYTES_ORD(result_str[0])
if first_byte == 0:
# need to read next record (STRING)
gotstring = 0
# if flags & 8:
if 1: # "flags & 8" applies only to SHRFMLA
# actually there's an optional SHRFMLA or ARRAY etc record to skip over
rc2, data2_len, data2 = bk.get_record_parts()
if rc2 == XL_STRING or rc2 == XL_STRING_B2:
gotstring = 1
elif rc2 == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data2[:14])
if blah_formulas:
fprintf(self.logfile, "ARRAY: %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, array_flags)
# dump_formula(bk, data2[14:], tokslen, bv, reldelta=0, blah=1)
elif rc2 == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data2[:10])
if blah_formulas:
fprintf(self.logfile, "SHRFMLA (sub): %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, nfmlas)
decompile_formula(bk, data2[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc2 not in XL_SHRFMLA_ETC_ETC:
raise XLRDError(
"Expected SHRFMLA, ARRAY, TABLEOP* or STRING record; found 0x%04x" % rc2)
# if DEBUG: print "gotstring:", gotstring
# now for the STRING record
if not gotstring:
rc2, _unused_len, data2 = bk.get_record_parts()
if rc2 not in (XL_STRING, XL_STRING_B2):
raise XLRDError("Expected STRING record; found 0x%04x" % rc2)
# if DEBUG: print "STRING: data=%r BIFF=%d cp=%d" % (data2, self.biff_version, bk.encoding)
strg = self.string_record_contents(data2)
self.put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
# if DEBUG: print "FORMULA strg %r" % strg
elif first_byte == 1:
# boolean formula result
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_BOOLEAN, value, xf_index)
elif first_byte == 2:
# Error in cell
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)
elif first_byte == 3:
# empty ... i.e. empty (zero-length) string, NOT an empty cell.
self_put_cell(rowx, colx, XL_CELL_TEXT, "", xf_index)
else:
raise XLRDError("unexpected special case (0x%02x) in FORMULA" % first_byte)
else:
# it is a number
d = local_unpack('<d', result_str)[0]
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_BOOLERR:
rowx, colx, xf_index, value, is_err = local_unpack('<HHHBB', data[:8])
# Note OOo Calc 2.0 writes 9-byte BOOLERR records.
# OOo docs say 8. Excel writes 8.
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR", rowx, colx, xf_index, value, is_err
self_put_cell(rowx, colx, cellty, value, xf_index)
elif rc == XL_COLINFO:
if not fmt_info: continue
c = Colinfo()
first_colx, last_colx, c.width, c.xf_index, flags \
= local_unpack("<HHHHH", data[:10])
#### Colinfo.width is denominated in 256ths of a character,
#### *not* in characters.
if not(0 <= first_colx <= last_colx <= 256):
# Note: 256 instead of 255 is a common mistake.
# We silently ignore the non-existing 257th column in that case.
print("*** NOTE: COLINFO record has first col index %d, last %d; " \
"should have 0 <= first <= last <= 255 -- record ignored!" \
% (first_colx, last_colx), file=self.logfile)
del c
continue
upkbits(c, flags, (
( 0, 0x0001, 'hidden'),
( 1, 0x0002, 'bit1_flag'),
# *ALL* colinfos created by Excel in "default" cases are 0x0002!!
# Maybe it's "locked" by analogy with XFProtection data.
( 8, 0x0700, 'outline_level'),
(12, 0x1000, 'collapsed'),
))
for colx in xrange(first_colx, last_colx+1):
if colx > 255: break # Excel does 0 to 256 inclusive
self.colinfo_map[colx] = c
if 0:
fprintf(self.logfile,
"**COL %d %d %d\n",
self.number, colx, c.xf_index)
if blah:
fprintf(
self.logfile,
"COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n",
self.number, first_colx, last_colx, c.width, c.xf_index, flags,
)
c.dump(self.logfile, header='===')
elif rc == XL_DEFCOLWIDTH:
self.defcolwidth, = local_unpack("<H", data[:2])
if 0: print('DEFCOLWIDTH', self.defcolwidth, file=self.logfile)
elif rc == XL_STANDARDWIDTH:
if data_len != 2:
print('*** ERROR *** STANDARDWIDTH', data_len, repr(data), file=self.logfile)
self.standardwidth, = local_unpack("<H", data[:2])
if 0: print('STANDARDWIDTH', self.standardwidth, file=self.logfile)
elif rc == XL_GCW:
if not fmt_info: continue # useless w/o COLINFO
assert data_len == 34
assert data[0:2] == b"\x20\x00"
iguff = unpack("<8i", data[2:34])
gcw = []
for bits in iguff:
for j in xrange(32):
gcw.append(bits & 1)
bits >>= 1
self.gcw = tuple(gcw)
if 0:
showgcw = "".join(map(lambda x: "F "[x], gcw)).rstrip().replace(' ', '.')
print("GCW:", showgcw, file=self.logfile)
elif rc == XL_BLANK:
if not fmt_info: continue
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
# if 0: print >> self.logfile, "BLANK", rowx, colx, xf_index
self_put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index)
elif rc == XL_MULBLANK: # 00BE
if not fmt_info: continue
nitems = data_len >> 1
result = local_unpack("<%dH" % nitems, data)
rowx, mul_first = result[:2]
mul_last = result[-1]
# print >> self.logfile, "MULBLANK", rowx, mul_first, mul_last, data_len, nitems, mul_last + 4 - mul_first
assert nitems == mul_last + 4 - mul_first
pos = 2
for colx in xrange(mul_first, mul_last + 1):
self_put_cell(rowx, colx, XL_CELL_BLANK, '', result[pos])
pos += 1
elif rc == XL_DIMENSION or rc == XL_DIMENSION2:
if data_len == 0:
# Four zero bytes after some other record. See github issue 64.
continue
# if data_len == 10:
# Was crashing on BIFF 4.0 file w/o the two trailing unused bytes.
# Reported by Ralph Heimburger.
if bv < 80:
dim_tuple = local_unpack('<HxxH', data[2:8])
else:
dim_tuple = local_unpack('<ixxH', data[4:12])
self.nrows, self.ncols = 0, 0
self._dimnrows, self._dimncols = dim_tuple
if bv in (21, 30, 40) and self.book.xf_list and not self.book._xf_epilogue_done:
self.book.xf_epilogue()
if blah:
fprintf(self.logfile,
"sheet %d(%r) DIMENSIONS: ncols=%d nrows=%d\n",
self.number, self.name, self._dimncols, self._dimnrows
)
elif rc == XL_HLINK:
self.handle_hlink(data)
elif rc == XL_QUICKTIP:
self.handle_quicktip(data)
elif rc == XL_EOF:
DEBUG = 0
if DEBUG: print("SHEET.READ: EOF", file=self.logfile)
eof_found = 1
break
elif rc == XL_OBJ:
# handle SHEET-level objects; note there's a separate Book.handle_obj
saved_obj = self.handle_obj(data)
if saved_obj: saved_obj_id = saved_obj.id
else: saved_obj_id = None
elif rc == XL_MSO_DRAWING:
self.handle_msodrawingetc(rc, data_len, data)
elif rc == XL_TXO:
txo = self.handle_txo(data)
if txo and saved_obj_id:
txos[saved_obj_id] = txo
saved_obj_id = None
elif rc == XL_NOTE:
self.handle_note(data, txos)
elif rc == XL_FEAT11:
self.handle_feat11(data)
elif rc in bofcodes: ##### EMBEDDED BOF #####
version, boftype = local_unpack('<HH', data[0:4])
if boftype != 0x20: # embedded chart
print("*** Unexpected embedded BOF (0x%04x) at offset %d: version=0x%04x type=0x%04x" \
% (rc, bk._position - data_len - 4, version, boftype), file=self.logfile)
while 1:
code, data_len, data = bk.get_record_parts()
if code == XL_EOF:
break
if DEBUG: print("---> found EOF", file=self.logfile)
elif rc == XL_COUNTRY:
bk.handle_country(data)
elif rc == XL_LABELRANGES:
pos = 0
pos = unpack_cell_range_address_list_update_pos(
self.row_label_ranges, data, pos, bv, addr_size=8,
)
pos = unpack_cell_range_address_list_update_pos(
self.col_label_ranges, data, pos, bv, addr_size=8,
)
assert pos == data_len
elif rc == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data[:14])
if blah_formulas:
print("ARRAY:", row1x, rownx, col1x, colnx, array_flags, file=self.logfile)
# dump_formula(bk, data[14:], tokslen, bv, reldelta=0, blah=1)
elif rc == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data[:10])
if blah_formulas:
print("SHRFMLA (main):", row1x, rownx, col1x, colnx, nfmlas, file=self.logfile)
decompile_formula(bk, data[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc == XL_CONDFMT:
if not fmt_info: continue
assert bv >= 80
num_CFs, needs_recalc, browx1, browx2, bcolx1, bcolx2 = \
unpack("<6H", data[0:12])
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" \
"*** in Sheet %d (%r).\n" \
"*** %d CF record(s); needs_recalc_or_redraw = %d\n" \
"*** Bounding box is %s\n",
self.number, self.name, num_CFs, needs_recalc,
rangename2d(browx1, browx2+1, bcolx1, bcolx2+1),
)
olist = [] # updated by the function
pos = unpack_cell_range_address_list_update_pos(
olist, data, 12, bv, addr_size=8)
# print >> self.logfile, repr(result), len(result)
if self.verbosity >= 1:
fprintf(self.logfile,
"*** %d individual range(s):\n" \
"*** %s\n",
len(olist),
", ".join([rangename2d(*coords) for coords in olist]),
)
elif rc == XL_CF:
if not fmt_info: continue
cf_type, cmp_op, sz1, sz2, flags = unpack("<BBHHi", data[0:10])
font_block = (flags >> 26) & 1
bord_block = (flags >> 28) & 1
patt_block = (flags >> 29) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" \
"*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" \
"*** optional data blocks: font=%d, border=%d, pattern=%d\n",
cf_type, cmp_op, sz1, sz2, flags,
font_block, bord_block, patt_block,
)
# hex_char_dump(data, 0, data_len, fout=self.logfile)
pos = 12
if font_block:
(font_height, font_options, weight, escapement, underline,
font_colour_index, two_bits, font_esc, font_underl) = \
unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118])
font_style = (two_bits > 1) & 1
posture = (font_options > 1) & 1
font_canc = (two_bits > 7) & 1
cancellation = (font_options > 7) & 1
if self.verbosity >= 1:
fprintf(self.logfile,
"*** Font info: height=%d, weight=%d, escapement=%d,\n" \
"*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" \
"*** style=%d, posture=%d, canc=%d, cancellation=%d\n",
font_height, weight, escapement, underline,
font_colour_index, font_esc, font_underl,
font_style, posture, font_canc, cancellation,
)
pos += 118
if bord_block:
pos += 8
if patt_block:
pos += 4
fmla1 = data[pos:pos+sz1]
pos += sz1
if blah and sz1:
fprintf(self.logfile,
"*** formula 1:\n",
)
dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1)
fmla2 = data[pos:pos+sz2]
pos += sz2
assert pos == data_len
if blah and sz2:
fprintf(self.logfile,
"*** formula 2:\n",
)
dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1)
elif rc == XL_DEFAULTROWHEIGHT:
if data_len == 4:
bits, self.default_row_height = unpack("<HH", data[:4])
elif data_len == 2:
self.default_row_height, = unpack("<H", data)
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is 2, " \
"should be 4; assuming BIFF2 format\n")
else:
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is %d, " \
"should be 4; ignoring this record\n",
data_len)
self.default_row_height_mismatch = bits & 1
self.default_row_hidden = (bits >> 1) & 1
self.default_additional_space_above = (bits >> 2) & 1
self.default_additional_space_below = (bits >> 3) & 1
elif rc == XL_MERGEDCELLS:
if not fmt_info: continue
pos = unpack_cell_range_address_list_update_pos(
self.merged_cells, data, 0, bv, addr_size=8)
if blah:
fprintf(self.logfile,
"MERGEDCELLS: %d ranges\n", (pos - 2) // 8)
assert pos == data_len, \
"MERGEDCELLS: pos=%d data_len=%d" % (pos, data_len)
elif rc == XL_WINDOW2:
if bv >= 80 and data_len >= 14:
(options,
self.first_visible_rowx, self.first_visible_colx,
self.gridline_colour_index,
self.cached_page_break_preview_mag_factor,
self.cached_normal_view_mag_factor
) = unpack("<HHHHxxHH", data[:14])
else:
assert bv >= 30 # BIFF3-7
(options,
self.first_visible_rowx, self.first_visible_colx,
) = unpack("<HHH", data[:6])
self.gridline_colour_rgb = unpack("<BBB", data[6:9])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
# options -- Bit, Mask, Contents:
# 0 0001H 0 = Show formula results 1 = Show formulas
# 1 0002H 0 = Do not show grid lines 1 = Show grid lines
# 2 0004H 0 = Do not show sheet headers 1 = Show sheet headers
# 3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze)
# 4 0010H 0 = Show zero values as empty cells 1 = Show zero values
# 5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour
# 6 0040H 0 = Columns from left to right 1 = Columns from right to left
# 7 0080H 0 = Do not show outline symbols 1 = Show outline symbols
# 8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed
# 9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8)
# 10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8)
# 11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8)
# The freeze flag specifies, if a following PANE record (6.71) describes unfrozen or frozen panes.
for attr, _unused_defval in _WINDOW2_options:
setattr(self, attr, options & 1)
options >>= 1
elif rc == XL_SCL:
num, den = unpack("<HH", data)
result = 0
if den:
result = (num * 100) // den
if not(10 <= result <= 400):
if DEBUG or self.verbosity >= 0:
print((
"WARNING *** SCL rcd sheet %d: should have 0.1 <= num/den <= 4; got %d/%d"
% (self.number, num, den)
), file=self.logfile)
result = 100
self.scl_mag_factor = result
elif rc == XL_PANE:
(
self.vert_split_pos,
self.horz_split_pos,
self.horz_split_first_visible,
self.vert_split_first_visible,
self.split_active_pane,
) = unpack("<HHHHB", data[:9])
self.has_pane_record = 1
elif rc == XL_HORIZONTALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.horizontal_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 255))
pos += 2
else:
while pos < data_len:
self.horizontal_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
elif rc == XL_VERTICALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.vertical_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 65535))
pos += 2
else:
while pos < data_len:
self.vertical_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
#### all of the following are for BIFF <= 4W
elif bv <= 45:
if rc == XL_FORMAT or rc == XL_FORMAT2:
bk.handle_format(data, rc)
elif rc == XL_FONT or rc == XL_FONT_B3B4:
bk.handle_font(data)
elif rc == XL_STYLE:
if not self.book._xf_epilogue_done:
self.book.xf_epilogue()
bk.handle_style(data)
elif rc == XL_PALETTE:
bk.handle_palette(data)
elif rc == XL_BUILTINFMTCOUNT:
bk.handle_builtinfmtcount(data)
elif rc == XL_XF4 or rc == XL_XF3 or rc == XL_XF2: #### N.B. not XL_XF
bk.handle_xf(data)
elif rc == XL_DATEMODE:
bk.handle_datemode(data)
elif rc == XL_CODEPAGE:
bk.handle_codepage(data)
elif rc == XL_FILEPASS:
bk.handle_filepass(data)
elif rc == XL_WRITEACCESS:
bk.handle_writeaccess(data)
elif rc == XL_IXFE:
self._ixfe = local_unpack('<H', data)[0]
elif rc == XL_NUMBER_B2:
rowx, colx, cell_attr, d = local_unpack('<HH3sd', data)
self_put_cell(rowx, colx, None, d, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_INTEGER:
rowx, colx, cell_attr, d = local_unpack('<HH3sH', data)
self_put_cell(rowx, colx, None, float(d), self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_LABEL_B2:
rowx, colx, cell_attr = local_unpack('<HH3s', data[0:7])
strg = unpack_string(data, 7, bk.encoding or bk.derive_encoding(), lenlen=1)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BOOLERR_B2:
rowx, colx, cell_attr, value, is_err = local_unpack('<HH3sBB', data)
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR_B2", rowx, colx, cell_attr, value, is_err
self_put_cell(rowx, colx, cellty, value, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BLANK_B2:
if not fmt_info: continue
rowx, colx, cell_attr = local_unpack('<HH3s', data[:7])
self_put_cell(rowx, colx, XL_CELL_BLANK, '', self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_EFONT:
bk.handle_efont(data)
elif rc == XL_ROW_B2:
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH2xB', data[0:11])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW_B2 record has row index %d; " \
"should have 0 <= rowx < %d -- record ignored!" \
% (rowx, self.utter_max_rows), file=self.logfile)
continue
if not (bits2 & 1): # has_default_xf_index is false
xf_index = -1
elif data_len == 18:
# Seems the XF index in the cell_attr is dodgy
xfx = local_unpack('<H', data[16:18])[0]
xf_index = self.fixed_BIFF2_xfindex(cell_attr=None, rowx=rowx, colx=-1, true_xfx=xfx)
else:
cell_attr = data[13:16]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx=-1)
key = (bits1, bits2, xf_index)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.has_default_xf_index = bits2 & 1
r.xf_index = xf_index
# r.outline_level = 0 # set in __init__
# r.outline_group_starts_ends = 0 # set in __init__
# r.hidden = 0 # set in __init__
# r.height_mismatch = 0 # set in __init__
# r.additional_space_above = 0 # set in __init__
# r.additional_space_below = 0 # set in __init__
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW_B2', rowx, bits1, has_defaults, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc == XL_COLWIDTH: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx, width\
= local_unpack("<BBH", data[:4])
if not(first_colx <= last_colx):
print("*** NOTE: COLWIDTH record has first col index %d, last %d; " \
"should have first <= last -- record ignored!" \
% (first_colx, last_colx), file=self.logfile)
continue
for colx in xrange(first_colx, last_colx+1):
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.width = width
if blah:
fprintf(
self.logfile,
"COLWIDTH sheet #%d cols %d-%d: wid=%d\n",
self.number, first_colx, last_colx, width
)
elif rc == XL_COLUMNDEFAULT: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx = local_unpack("<HH", data[:4])
#### Warning OOo docs wrong; first_colx <= colx < last_colx
if blah:
fprintf(
self.logfile,
"COLUMNDEFAULT sheet #%d cols in range(%d, %d)\n",
self.number, first_colx, last_colx
)
if not(0 <= first_colx < last_colx <= 256):
print("*** NOTE: COLUMNDEFAULT record has first col index %d, last %d; " \
"should have 0 <= first < last <= 256" \
% (first_colx, last_colx), file=self.logfile)
last_colx = min(last_colx, 256)
for colx in xrange(first_colx, last_colx):
offset = 4 + 3 * (colx - first_colx)
cell_attr = data[offset:offset+3]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx=-1, colx=colx)
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.xf_index = xf_index
elif rc == XL_WINDOW2_B2: # BIFF 2 only
attr_names = ("show_formulas", "show_grid_lines", "show_sheet_headers",
"panes_are_frozen", "show_zero_values")
for attr, char in zip(attr_names, data[0:5]):
setattr(self, attr, int(char != b'\0'))
(self.first_visible_rowx, self.first_visible_colx,
self.automatic_grid_line_colour,
) = unpack("<HHB", data[5:10])
self.gridline_colour_rgb = unpack("<BBB", data[10:13])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
else:
# if DEBUG: print "SHEET.READ: Unhandled record type %02x %d bytes %r" % (rc, data_len, data)
pass
if not eof_found:
raise XLRDError("Sheet %d (%r) missing EOF record" \
% (self.number, self.name))
self.tidy_dimensions()
self.update_cooked_mag_factors()
bk._position = oldpos
return 1
def string_record_contents(self, data):
bv = self.biff_version
bk = self.book
lenlen = (bv >= 30) + 1
nchars_expected = unpack("<" + "BH"[lenlen - 1], data[:lenlen])[0]
offset = lenlen
if bv < 80:
enc = bk.encoding or bk.derive_encoding()
nchars_found = 0
result = UNICODE_LITERAL("")
while 1:
if bv >= 80:
flag = BYTES_ORD(data[offset]) & 1
enc = ("latin_1", "utf_16_le")[flag]
offset += 1
chunk = unicode(data[offset:], enc)
result += chunk
nchars_found += len(chunk)
if nchars_found == nchars_expected:
return result
if nchars_found > nchars_expected:
msg = ("STRING/CONTINUE: expected %d chars, found %d"
% (nchars_expected, nchars_found))
raise XLRDError(msg)
rc, _unused_len, data = bk.get_record_parts()
if rc != XL_CONTINUE:
raise XLRDError(
"Expected CONTINUE record; found record-type 0x%04X" % rc)
offset = 0
def update_cooked_mag_factors(self):
# Cached values are used ONLY for the non-active view mode.
# When the user switches to the non-active view mode,
# if the cached value for that mode is not valid,
# Excel pops up a window which says:
# "The number must be between 10 and 400. Try again by entering a number in this range."
# When the user hits OK, it drops into the non-active view mode
# but uses the magn from the active mode.
# NOTE: definition of "valid" depends on mode ... see below
blah = DEBUG or self.verbosity > 0
if self.show_in_page_break_preview:
if self.scl_mag_factor is None: # no SCL record
self.cooked_page_break_preview_mag_factor = 100 # Yes, 100, not 60, NOT a typo
else:
self.cooked_page_break_preview_mag_factor = self.scl_mag_factor
zoom = self.cached_normal_view_mag_factor
if not (10 <= zoom <=400):
if blah:
print((
"WARNING *** WINDOW2 rcd sheet %d: Bad cached_normal_view_mag_factor: %d"
% (self.number, self.cached_normal_view_mag_factor)
), file=self.logfile)
zoom = self.cooked_page_break_preview_mag_factor
self.cooked_normal_view_mag_factor = zoom
else:
# normal view mode
if self.scl_mag_factor is None: # no SCL record
self.cooked_normal_view_mag_factor = 100
else:
self.cooked_normal_view_mag_factor = self.scl_mag_factor
zoom = self.cached_page_break_preview_mag_factor
if not zoom:
# VALID, defaults to 60
zoom = 60
elif not (10 <= zoom <= 400):
if blah:
print((
"WARNING *** WINDOW2 rcd sheet %r: Bad cached_page_break_preview_mag_factor: %r"
% (self.number, self.cached_page_break_preview_mag_factor)
), file=self.logfile)
zoom = self.cooked_normal_view_mag_factor
self.cooked_page_break_preview_mag_factor = zoom
def fixed_BIFF2_xfindex(self, cell_attr, rowx, colx, true_xfx=None):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
if self.biff_version == 21:
if self.book.xf_list:
if true_xfx is not None:
xfx = true_xfx
else:
xfx = BYTES_ORD(cell_attr[0]) & 0x3F
if xfx == 0x3F:
if self._ixfe is None:
raise XLRDError("BIFF2 cell record has XF index 63 but no preceding IXFE record.")
xfx = self._ixfe
# OOo docs are capable of interpretation that each
# cell record is preceded immediately by its own IXFE record.
# Empirical evidence is that (sensibly) an IXFE record applies to all
# following cell records until another IXFE comes along.
return xfx
# Have either Excel 2.0, or broken 2.1 w/o XF records -- same effect.
self.biff_version = self.book.biff_version = 20
#### check that XF slot in cell_attr is zero
xfx_slot = BYTES_ORD(cell_attr[0]) & 0x3F
assert xfx_slot == 0
xfx = self._cell_attr_to_xfx.get(cell_attr)
if xfx is not None:
return xfx
if blah:
fprintf(self.logfile, "New cell_attr %r at (%r, %r)\n", cell_attr, rowx, colx)
if not self.book.xf_list:
for xfx in xrange(16):
self.insert_new_BIFF20_xf(cell_attr=b"\x40\x00\x00", style=xfx < 15)
xfx = self.insert_new_BIFF20_xf(cell_attr=cell_attr)
return xfx
def insert_new_BIFF20_xf(self, cell_attr, style=0):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
book = self.book
xfx = len(book.xf_list)
xf = self.fake_XF_from_BIFF20_cell_attr(cell_attr, style)
xf.xf_index = xfx
book.xf_list.append(xf)
if blah:
xf.dump(self.logfile, header="=== Faked XF %d ===" % xfx, footer="======")
if xf.format_key not in book.format_map:
if xf.format_key:
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
fmt = Format(xf.format_key, FUN, UNICODE_LITERAL("General"))
book.format_map[xf.format_key] = fmt
book.format_list.append(fmt)
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
fmt = book.format_map[xf.format_key]
cellty = cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
self._cell_attr_to_xfx[cell_attr] = xfx
return xfx
def fake_XF_from_BIFF20_cell_attr(self, cell_attr, style=0):
from .formatting import XF, XFAlignment, XFBorder, XFBackground, XFProtection
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
(prot_bits, font_and_format, halign_etc) = unpack('<BBB', cell_attr)
xf.format_key = font_and_format & 0x3F
xf.font_index = (font_and_format & 0xC0) >> 6
upkbits(xf.protection, prot_bits, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
xf.alignment.hor_align = halign_etc & 0x07
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = (0x0FFF, 0)[style]
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
return xf
def req_fmt_info(self):
if not self.formatting_info:
raise XLRDError("Feature requires open_workbook(..., formatting_info=True)")
##
# Determine column display width.
# <br /> -- New in version 0.6.1
# <br />
# @param colx Index of the queried column, range 0 to 255.
# Note that it is possible to find out the width that will be used to display
# columns with no cell information e.g. column IV (colx=255).
# @return The column width that will be used for displaying
# the given column by Excel, in units of 1/256th of the width of a
# standard character (the digit zero in the first font).
def computed_column_width(self, colx):
self.req_fmt_info()
if self.biff_version >= 80:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
if self.standardwidth is not None:
return self.standardwidth
elif self.biff_version >= 40:
if self.gcw[colx]:
if self.standardwidth is not None:
return self.standardwidth
else:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
elif self.biff_version == 30:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
# All roads lead to Rome and the DEFCOLWIDTH ...
if self.defcolwidth is not None:
return self.defcolwidth * 256
return 8 * 256 # 8 is what Excel puts in a DEFCOLWIDTH record
def handle_hlink(self, data):
# DEBUG = 1
if DEBUG: print("\n=== hyperlink ===", file=self.logfile)
record_size = len(data)
h = Hyperlink()
h.frowx, h.lrowx, h.fcolx, h.lcolx, guid0, dummy, options = unpack('<HHHH16s4si', data[:32])
assert guid0 == b"\xD0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B"
assert dummy == b"\x02\x00\x00\x00"
if DEBUG: print("options: %08X" % options, file=self.logfile)
offset = 32
def get_nul_terminated_unicode(buf, ofs):
nb = unpack('<L', buf[ofs:ofs+4])[0] * 2
ofs += 4
uc = unicode(buf[ofs:ofs+nb], 'UTF-16le')[:-1]
ofs += nb
return uc, ofs
if options & 0x14: # has a description
h.desc, offset = get_nul_terminated_unicode(data, offset)
if options & 0x80: # has a target
h.target, offset = get_nul_terminated_unicode(data, offset)
if (options & 1) and not (options & 0x100): # HasMoniker and not MonikerSavedAsString
# an OLEMoniker structure
clsid, = unpack('<16s', data[offset:offset + 16])
if DEBUG: fprintf(self.logfile, "clsid=%r\n", clsid)
offset += 16
if clsid == b"\xE0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B":
# E0H C9H EAH 79H F9H BAH CEH 11H 8CH 82H 00H AAH 00H 4BH A9H 0BH
# URL Moniker
h.type = UNICODE_LITERAL('url')
nbytes = unpack('<L', data[offset:offset + 4])[0]
offset += 4
h.url_or_path = unicode(data[offset:offset + nbytes], 'UTF-16le')
if DEBUG: fprintf(self.logfile, "initial url=%r len=%d\n", h.url_or_path, len(h.url_or_path))
endpos = h.url_or_path.find('\x00')
if DEBUG: print("endpos=%d" % endpos, file=self.logfile)
h.url_or_path = h.url_or_path[:endpos]
true_nbytes = 2 * (endpos + 1)
offset += true_nbytes
extra_nbytes = nbytes - true_nbytes
extra_data = data[offset:offset + extra_nbytes]
offset += extra_nbytes
if DEBUG:
fprintf(
self.logfile,
"url=%r\nextra=%r\nnbytes=%d true_nbytes=%d extra_nbytes=%d\n",
h.url_or_path, extra_data, nbytes, true_nbytes, extra_nbytes,
)
assert extra_nbytes in (24, 0)
elif clsid == b"\x03\x03\x00\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46":
# file moniker
h.type = UNICODE_LITERAL('local file')
uplevels, nbytes = unpack("<Hi", data[offset:offset + 6])
offset += 6
shortpath = b"..\\" * uplevels + data[offset:offset + nbytes - 1] #### BYTES, not unicode
if DEBUG: fprintf(self.logfile, "uplevels=%d shortpath=%r\n", uplevels, shortpath)
offset += nbytes
offset += 24 # OOo: "unknown byte sequence"
# above is version 0xDEAD + 20 reserved zero bytes
sz = unpack('<i', data[offset:offset + 4])[0]
if DEBUG: print("sz=%d" % sz, file=self.logfile)
offset += 4
if sz:
xl = unpack('<i', data[offset:offset + 4])[0]
offset += 4
offset += 2 # "unknown byte sequence" MS: 0x0003
extended_path = unicode(data[offset:offset + xl], 'UTF-16le') # not zero-terminated
offset += xl
h.url_or_path = extended_path
else:
h.url_or_path = shortpath
#### MS KLUDGE WARNING ####
# The "shortpath" is bytes encoded in the **UNKNOWN** creator's "ANSI" encoding.
else:
fprintf(self.logfile, "*** unknown clsid %r\n", clsid)
elif options & 0x163 == 0x103: # UNC
h.type = UNICODE_LITERAL('unc')
h.url_or_path, offset = get_nul_terminated_unicode(data, offset)
elif options & 0x16B == 8:
h.type = UNICODE_LITERAL('workbook')
else:
h.type = UNICODE_LITERAL('unknown')
if options & 0x8: # has textmark
h.textmark, offset = get_nul_terminated_unicode(data, offset)
if DEBUG:
h.dump(header="... object dump ...")
print("offset=%d record_size=%d" % (offset, record_size))
extra_nbytes = record_size - offset
if extra_nbytes > 0:
fprintf(
self.logfile,
"*** WARNING: hyperlink at r=%d c=%d has %d extra data bytes: %s\n",
h.frowx,
h.fcolx,
extra_nbytes,
REPR(data[-extra_nbytes:])
)
# Seen: b"\x00\x00" also b"A\x00", b"V\x00"
elif extra_nbytes < 0:
raise XLRDError("Bug or corrupt file, send copy of input file for debugging")
self.hyperlink_list.append(h)
for rowx in xrange(h.frowx, h.lrowx+1):
for colx in xrange(h.fcolx, h.lcolx+1):
self.hyperlink_map[rowx, colx] = h
def handle_quicktip(self, data):
rcx, frowx, lrowx, fcolx, lcolx = unpack('<5H', data[:10])
assert rcx == XL_QUICKTIP
assert self.hyperlink_list
h = self.hyperlink_list[-1]
assert (frowx, lrowx, fcolx, lcolx) == (h.frowx, h.lrowx, h.fcolx, h.lcolx)
assert data[-2:] == b'\x00\x00'
h.quicktip = unicode(data[10:-2], 'utf_16_le')
def handle_msodrawingetc(self, recid, data_len, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
o = MSODrawing()
pos = 0
while pos < data_len:
tmp, fbt, cb = unpack('<HHI', data[pos:pos+8])
ver = tmp & 0xF
inst = (tmp >> 4) & 0xFFF
if ver == 0xF:
ndb = 0 # container
else:
ndb = cb
if DEBUG:
hex_char_dump(data, pos, ndb + 8, base=0, fout=self.logfile)
fprintf(self.logfile,
"fbt:0x%04X inst:%d ver:0x%X cb:%d (0x%04X)\n",
fbt, inst, ver, cb, cb)
if fbt == 0xF010: # Client Anchor
assert ndb == 18
(o.anchor_unk,
o.anchor_colx_lo, o.anchor_rowx_lo,
o.anchor_colx_hi, o.anchor_rowx_hi) = unpack('<Hiiii', data[pos+8:pos+8+ndb])
elif fbt == 0xF011: # Client Data
# must be followed by an OBJ record
assert cb == 0
assert pos + 8 == data_len
else:
pass
pos += ndb + 8
else:
# didn't break out of while loop
assert pos == data_len
if DEBUG:
o.dump(self.logfile, header="=== MSODrawing ===", footer= " ")
def handle_obj(self, data):
if self.biff_version < 80:
return None
o = MSObj()
data_len = len(data)
pos = 0
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "... OBJ record len=%d...\n", data_len)
while pos < data_len:
ft, cb = unpack('<HH', data[pos:pos+4])
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "pos=%d ft=0x%04X cb=%d\n", pos, ft, cb)
hex_char_dump(data, pos, cb + 4, base=0, fout=self.logfile)
if pos == 0 and not (ft == 0x15 and cb == 18):
if self.verbosity:
fprintf(self.logfile, "*** WARNING Ignoring antique or corrupt OBJECT record\n")
return None
if ft == 0x15: # ftCmo ... s/b first
assert pos == 0
o.type, o.id, option_flags = unpack('<HHH', data[pos+4:pos+10])
upkbits(o, option_flags, (
( 0, 0x0001, 'locked'),
( 4, 0x0010, 'printable'),
( 8, 0x0100, 'autofilter'), # not documented in Excel 97 dev kit
( 9, 0x0200, 'scrollbar_flag'), # not documented in Excel 97 dev kit
(13, 0x2000, 'autofill'),
(14, 0x4000, 'autoline'),
))
elif ft == 0x00:
if data[pos:data_len] == b'\0' * (data_len - pos):
# ignore "optional reserved" data at end of record
break
msg = "Unexpected data at end of OBJECT record"
fprintf(self.logfile, "*** ERROR %s\n" % msg)
hex_char_dump(data, pos, data_len - pos, base=0, fout=self.logfile)
raise XLRDError(msg)
elif ft == 0x0C: # Scrollbar
values = unpack('<5H', data[pos+8:pos+18])
for value, tag in zip(values, ('value', 'min', 'max', 'inc', 'page')):
setattr(o, 'scrollbar_' + tag, value)
elif ft == 0x0D: # "Notes structure" [used for cell comments]
# not documented in Excel 97 dev kit
if OBJ_MSO_DEBUG: fprintf(self.logfile, "*** OBJ record has ft==0x0D 'notes' structure\n")
elif ft == 0x13: # list box data
if o.autofilter: # non standard exit. NOT documented
break
else:
pass
pos += cb + 4
else:
# didn't break out of while loop
pass
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSOBj ===", footer= " ")
return o
def handle_note(self, data, txos):
if OBJ_MSO_DEBUG:
fprintf(self.logfile, '... NOTE record ...\n')
hex_char_dump(data, 0, len(data), base=0, fout=self.logfile)
o = Note()
data_len = len(data)
if self.biff_version < 80:
o.rowx, o.colx, expected_bytes = unpack('<HHH', data[:6])
nb = len(data) - 6
assert nb <= expected_bytes
pieces = [data[6:]]
expected_bytes -= nb
while expected_bytes > 0:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_NOTE
dummy_rowx, nb = unpack('<H2xH', data2[:6])
assert dummy_rowx == 0xFFFF
assert nb == data2_len - 6
pieces.append(data2[6:])
expected_bytes -= nb
assert expected_bytes == 0
enc = self.book.encoding or self.book.derive_encoding()
o.text = unicode(b''.join(pieces), enc)
o.rich_text_runlist = [(0, 0)]
o.show = 0
o.row_hidden = 0
o.col_hidden = 0
o.author = UNICODE_LITERAL('')
o._object_id = None
self.cell_note_map[o.rowx, o.colx] = o
return
# Excel 8.0+
o.rowx, o.colx, option_flags, o._object_id = unpack('<4H', data[:8])
o.show = (option_flags >> 1) & 1
o.row_hidden = (option_flags >> 7) & 1
o.col_hidden = (option_flags >> 8) & 1
# XL97 dev kit book says NULL [sic] bytes padding between string count and string data
# to ensure that string is word-aligned. Appears to be nonsense.
o.author, endpos = unpack_unicode_update_pos(data, 8, lenlen=2)
# There is a random/undefined byte after the author string (not counted in the
# string length).
# Issue 4 on github: Google Spreadsheet doesn't write the undefined byte.
assert (data_len - endpos) in (0, 1)
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== Note ===", footer= " ")
txo = txos.get(o._object_id)
if txo:
o.text = txo.text
o.rich_text_runlist = txo.rich_text_runlist
self.cell_note_map[o.rowx, o.colx] = o
def handle_txo(self, data):
if self.biff_version < 80:
return
o = MSTxo()
data_len = len(data)
fmt = '<HH6sHHH'
fmtsize = calcsize(fmt)
option_flags, o.rot, controlInfo, cchText, cbRuns, o.ifntEmpty = unpack(fmt, data[:fmtsize])
o.fmla = data[fmtsize:]
upkbits(o, option_flags, (
( 3, 0x000E, 'horz_align'),
( 6, 0x0070, 'vert_align'),
( 9, 0x0200, 'lock_text'),
(14, 0x4000, 'just_last'),
(15, 0x8000, 'secret_edit'),
))
totchars = 0
o.text = UNICODE_LITERAL('')
while totchars < cchText:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_CONTINUE
if OBJ_MSO_DEBUG:
hex_char_dump(data2, 0, data2_len, base=0, fout=self.logfile)
nb = BYTES_ORD(data2[0]) # 0 means latin1, 1 means utf_16_le
nchars = data2_len - 1
if nb:
assert nchars % 2 == 0
nchars //= 2
utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars)
assert endpos == data2_len
o.text += utext
totchars += nchars
o.rich_text_runlist = []
totruns = 0
while totruns < cbRuns: # counts of BYTES, not runs
rc3, data3_len, data3 = self.book.get_record_parts()
# print totruns, cbRuns, rc3, data3_len, repr(data3)
assert rc3 == XL_CONTINUE
assert data3_len % 8 == 0
for pos in xrange(0, data3_len, 8):
run = unpack('<HH4x', data3[pos:pos+8])
o.rich_text_runlist.append(run)
totruns += 8
# remove trailing entries that point to the end of the string
while o.rich_text_runlist and o.rich_text_runlist[-1][0] == cchText:
del o.rich_text_runlist[-1]
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSTxo ===", footer= " ")
print(o.rich_text_runlist, file=self.logfile)
return o
def handle_feat11(self, data):
if not OBJ_MSO_DEBUG:
return
# rt: Record type; this matches the BIFF rt in the first two bytes of the record; =0872h
# grbitFrt: FRT cell reference flag (see table below for details)
# Ref0: Range reference to a worksheet cell region if grbitFrt=1 (bitFrtRef). Otherwise blank.
# isf: Shared feature type index =5 for Table
# fHdr: =0 since this is for feat not feat header
# reserved0: Reserved for future use =0 for Table
# cref: Count of ref ranges this feature is on
# cbFeatData: Count of byte for the current feature data.
# reserved1: =0 currently not used
# Ref1: Repeat of Ref0. UNDOCUMENTED
rt, grbitFrt, Ref0, isf, fHdr, reserved0, cref, cbFeatData, reserved1, Ref1 = unpack('<HH8sHBiHiH8s', data[0:35])
assert reserved0 == 0
assert reserved1 == 0
assert isf == 5
assert rt == 0x872
assert fHdr == 0
assert Ref1 == Ref0
print(self.logfile, "FEAT11: grbitFrt=%d Ref0=%r cref=%d cbFeatData=%d\n", grbitFrt, Ref0, cref, cbFeatData)
# lt: Table data source type:
# =0 for Excel Worksheet Table =1 for read-write SharePoint linked List
# =2 for XML mapper Table =3 for Query Table
# idList: The ID of the Table (unique per worksheet)
# crwHeader: How many header/title rows the Table has at the top
# crwTotals: How many total rows the Table has at the bottom
# idFieldNext: Next id to try when assigning a unique id to a new field
# cbFSData: The size of the Fixed Data portion of the Table data structure.
# rupBuild: the rupBuild that generated the record
# unusedShort: UNUSED short that can be used later. The value is reserved during round-tripping.
# listFlags: Collection of bit flags: (see listFlags' bit setting table below for detail.)
# lPosStmCache: Table data stream position of cached data
# cbStmCache: Count of bytes of cached data
# cchStmCache: Count of characters of uncompressed cached data in the stream
# lem: Table edit mode (see List (Table) Editing Mode (lem) setting table below for details.)
# rgbHashParam: Hash value for SharePoint Table
# cchName: Count of characters in the Table name string rgbName
(lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort, listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName) = unpack('<iiiiiiHHiiiii16sH', data[35:35+66])
print("lt=%d idList=%d crwHeader=%d crwTotals=%d idFieldNext=%d cbFSData=%d\n"\
"rupBuild=%d unusedShort=%d listFlags=%04X lPosStmCache=%d cbStmCache=%d\n"\
"cchStmCache=%d lem=%d rgbHashParam=%r cchName=%d" % (
lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort,listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName), file=self.logfile)
class MSODrawing(BaseObject):
pass
class MSObj(BaseObject):
pass
class MSTxo(BaseObject):
pass
##
# <p> Represents a user "comment" or "note".
# Note objects are accessible through Sheet.{@link #Sheet.cell_note_map}.
# <br />-- New in version 0.7.2
# </p>
class Note(BaseObject):
##
# Author of note
author = UNICODE_LITERAL('')
##
# True if the containing column is hidden
col_hidden = 0
##
# Column index
colx = 0
##
# List of (offset_in_string, font_index) tuples.
# Unlike Sheet.{@link #Sheet.rich_text_runlist_map}, the first offset should always be 0.
rich_text_runlist = None
##
# True if the containing row is hidden
row_hidden = 0
##
# Row index
rowx = 0
##
# True if note is always shown
show = 0
##
# Text of the note
text = UNICODE_LITERAL('')
##
# <p>Contains the attributes of a hyperlink.
# Hyperlink objects are accessible through Sheet.{@link #Sheet.hyperlink_list}
# and Sheet.{@link #Sheet.hyperlink_map}.
# <br />-- New in version 0.7.2
# </p>
class Hyperlink(BaseObject):
##
# Index of first row
frowx = None
##
# Index of last row
lrowx = None
##
# Index of first column
fcolx = None
##
# Index of last column
lcolx = None
##
# Type of hyperlink. Unicode string, one of 'url', 'unc',
# 'local file', 'workbook', 'unknown'
type = None
##
# The URL or file-path, depending in the type. Unicode string, except
# in the rare case of a local but non-existent file with non-ASCII
# characters in the name, in which case only the "8.3" filename is available,
# as a bytes (3.x) or str (2.x) string, <i>with unknown encoding.</i>
url_or_path = None
##
# Description ... this is displayed in the cell,
# and should be identical to the cell value. Unicode string, or None. It seems
# impossible NOT to have a description created by the Excel UI.
desc = None
##
# Target frame. Unicode string. Note: I have not seen a case of this.
# It seems impossible to create one in the Excel UI.
target = None
##
# "Textmark": the piece after the "#" in
# "http://docs.python.org/library#struct_module", or the Sheet1!A1:Z99
# part when type is "workbook".
textmark = None
##
# The text of the "quick tip" displayed when the cursor
# hovers over the hyperlink.
quicktip = None
# === helpers ===
def unpack_RK(rk_str):
flags = BYTES_ORD(rk_str[0])
if flags & 2:
# There's a SIGNED 30-bit integer in there!
i, = unpack('<i', rk_str)
i >>= 2 # div by 4 to drop the 2 flag bits
if flags & 1:
return i / 100.0
return float(i)
else:
# It's the most significant 30 bits of an IEEE 754 64-bit FP number
d, = unpack('<d', b'\0\0\0\0' + BYTES_LITERAL(chr(flags & 252)) + rk_str[1:4])
if flags & 1:
return d / 100.0
return d
##### =============== Cell ======================================== #####
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
ctype_text = {
XL_CELL_EMPTY: 'empty',
XL_CELL_TEXT: 'text',
XL_CELL_NUMBER: 'number',
XL_CELL_DATE: 'xldate',
XL_CELL_BOOLEAN: 'bool',
XL_CELL_ERROR: 'error',
XL_CELL_BLANK: 'blank',
}
##
# <p>Contains the data for one cell.</p>
#
# <p>WARNING: You don't call this class yourself. You access Cell objects
# via methods of the {@link #Sheet} object(s) that you found in the {@link #Book} object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
# <p> Cell objects have three attributes: <i>ctype</i> is an int, <i>value</i>
# (which depends on <i>ctype</i>) and <i>xf_index</i>.
# If "formatting_info" is not enabled when the workbook is opened, xf_index will be None.
# The following table describes the types of cells and how their values
# are represented in Python.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Type symbol</th>
# <th>Type number</th>
# <th>Python value</th>
# </tr>
# <tr>
# <td>XL_CELL_EMPTY</td>
# <td align="center">0</td>
# <td>empty string u''</td>
# </tr>
# <tr>
# <td>XL_CELL_TEXT</td>
# <td align="center">1</td>
# <td>a Unicode string</td>
# </tr>
# <tr>
# <td>XL_CELL_NUMBER</td>
# <td align="center">2</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_DATE</td>
# <td align="center">3</td>
# <td>float</td>
# </tr>
# <tr>
# <td>XL_CELL_BOOLEAN</td>
# <td align="center">4</td>
# <td>int; 1 means TRUE, 0 means FALSE</td>
# </tr>
# <tr>
# <td>XL_CELL_ERROR</td>
# <td align="center">5</td>
# <td>int representing internal Excel codes; for a text representation,
# refer to the supplied dictionary error_text_from_code</td>
# </tr>
# <tr>
# <td>XL_CELL_BLANK</td>
# <td align="center">6</td>
# <td>empty string u''. Note: this type will appear only when
# open_workbook(..., formatting_info=True) is used.</td>
# </tr>
# </table>
#<p></p>
class Cell(BaseObject):
__slots__ = ['ctype', 'value', 'xf_index']
def __init__(self, ctype, value, xf_index=None):
self.ctype = ctype
self.value = value
self.xf_index = xf_index
def __repr__(self):
if self.xf_index is None:
return "%s:%r" % (ctype_text[self.ctype], self.value)
else:
return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index)
empty_cell = Cell(XL_CELL_EMPTY, UNICODE_LITERAL(''))
##### =============== Colinfo and Rowinfo ============================== #####
##
# Width and default formatting information that applies to one or
# more columns in a sheet. Derived from COLINFO records.
#
# <p> Here is the default hierarchy for width, according to the OOo docs:
#
# <br />"""In BIFF3, if a COLINFO record is missing for a column,
# the width specified in the record DEFCOLWIDTH is used instead.
#
# <br />In BIFF4-BIFF7, the width set in this [COLINFO] record is only used,
# if the corresponding bit for this column is cleared in the GCW
# record, otherwise the column width set in the DEFCOLWIDTH record
# is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]).
#
# <br />In BIFF8, if a COLINFO record is missing for a column,
# the width specified in the record STANDARDWIDTH is used.
# If this [STANDARDWIDTH] record is also missing,
# the column width of the record DEFCOLWIDTH is used instead."""
# <br />
#
# Footnote: The docs on the GCW record say this:
# """<br />
# If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH
# record. If a bit is cleared, the corresponding column uses the width set in the
# COLINFO record for this column.
# <br />If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if
# the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH
# record of the worksheet will be used instead.
# <br />"""<br />
# At the moment (2007-01-17) xlrd is going with the GCW version of the story.
# Reference to the source may be useful: see the computed_column_width(colx) method
# of the Sheet class.
# <br />-- New in version 0.6.1
# </p>
class Colinfo(BaseObject):
##
# Width of the column in 1/256 of the width of the zero character,
# using default font (first FONT record in the file).
width = 0
##
# XF index to be used for formatting empty cells.
xf_index = -1
##
# 1 = column is hidden
hidden = 0
##
# Value of a 1-bit flag whose purpose is unknown
# but is often seen set to 1
bit1_flag = 0
##
# Outline level of the column, in range(7).
# (0 = no outline)
outline_level = 0
##
# 1 = column is collapsed
collapsed = 0
_USE_SLOTS = 1
##
# <p>Height and default formatting information that applies to a row in a sheet.
# Derived from ROW records.
# <br /> -- New in version 0.6.1</p>
#
# <p><b>height</b>: Height of the row, in twips. One twip == 1/20 of a point.</p>
#
# <p><b>has_default_height</b>: 0 = Row has custom height; 1 = Row has default height.</p>
#
# <p><b>outline_level</b>: Outline level of the row (0 to 7) </p>
#
# <p><b>outline_group_starts_ends</b>: 1 = Outline group starts or ends here (depending on where the
# outline buttons are located, see WSBOOL record [TODO ??]),
# <i>and</i> is collapsed </p>
#
# <p><b>hidden</b>: 1 = Row is hidden (manually, or by a filter or outline group) </p>
#
# <p><b>height_mismatch</b>: 1 = Row height and default font height do not match </p>
#
# <p><b>has_default_xf_index</b>: 1 = the xf_index attribute is usable; 0 = ignore it </p>
#
# <p><b>xf_index</b>: Index to default XF record for empty cells in this row.
# Don't use this if has_default_xf_index == 0. </p>
#
# <p><b>additional_space_above</b>: This flag is set, if the upper border of at least one cell in this row
# or if the lower border of at least one cell in the row above is
# formatted with a thick line style. Thin and medium line styles are not
# taken into account. </p>
#
# <p><b>additional_space_below</b>: This flag is set, if the lower border of at least one cell in this row
# or if the upper border of at least one cell in the row below is
# formatted with a medium or thick line style. Thin line styles are not
# taken into account. </p>
class Rowinfo(BaseObject):
if _USE_SLOTS:
__slots__ = (
"height",
"has_default_height",
"outline_level",
"outline_group_starts_ends",
"hidden",
"height_mismatch",
"has_default_xf_index",
"xf_index",
"additional_space_above",
"additional_space_below",
)
def __init__(self):
self.height = None
self.has_default_height = None
self.outline_level = None
self.outline_group_starts_ends = None
self.hidden = None
self.height_mismatch = None
self.has_default_xf_index = None
self.xf_index = None
self.additional_space_above = None
self.additional_space_below = None
def __getstate__(self):
return (
self.height,
self.has_default_height,
self.outline_level,
self.outline_group_starts_ends,
self.hidden,
self.height_mismatch,
self.has_default_xf_index,
self.xf_index,
self.additional_space_above,
self.additional_space_below,
)
def __setstate__(self, state):
(
self.height,
self.has_default_height,
self.outline_level,
self.outline_group_starts_ends,
self.hidden,
self.height_mismatch,
self.has_default_xf_index,
self.xf_index,
self.additional_space_above,
self.additional_space_below,
) = state
|
tudorvio/nova
|
refs/heads/master
|
nova/tests/unit/objects/test_instance_group.py
|
26
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_objects
from nova.tests.unit import utils as tests_utils
class _TestInstanceGroupObjects(object):
def setUp(self):
super(_TestInstanceGroupObjects, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
members=None):
return db.instance_group_create(context, values, policies=policies,
members=members)
def test_get_by_uuid(self):
values = self._get_default_values()
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.members, members)
self.assertEqual(obj_result.policies, policies)
def test_get_by_instance_uuid(self):
values = self._get_default_values()
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_instance_uuid(
self.context, 'instance_id1')
self.assertEqual(obj_result.uuid, db_result.uuid)
def test_refresh(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, db_result['uuid'],
values)
obj_result.refresh()
self.assertEqual(obj_result.name, 'new_name')
self.assertEqual(set([]), obj_result.obj_what_changed())
def test_save_simple(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
obj_result.name = 'new_name'
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['name'], 'new_name')
def test_save_policies(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
policies = ['policy1', 'policy2']
obj_result.policies = policies
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['policies'], policies)
def test_save_members(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
members = ['instance1', 'instance2']
obj_result.members = members
fake_notifier.NOTIFICATIONS = []
obj_result.save()
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('servergroup.update', msg.event_type)
self.assertEqual(members, msg.payload['members'])
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['members'], members)
def test_create(self):
group1 = instance_group.InstanceGroup(context=self.context)
group1.uuid = 'fake-uuid'
group1.name = 'fake-name'
fake_notifier.NOTIFICATIONS = []
group1.create()
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(group1.name, msg.payload['name'])
self.assertEqual(group1.uuid, msg.payload['server_group_id'])
self.assertEqual('servergroup.create', msg.event_type)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.uuid, group2.uuid)
self.assertEqual(group1.name, group2.name)
result = db.instance_group_get(self.context, group1.uuid)
self.assertEqual(group1.id, result.id)
self.assertEqual(group1.uuid, result.uuid)
self.assertEqual(group1.name, result.name)
def test_create_with_policies(self):
group1 = instance_group.InstanceGroup(context=self.context)
group1.policies = ['policy1', 'policy2']
group1.create()
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.policies, group2.policies)
def test_create_with_members(self):
group1 = instance_group.InstanceGroup(context=self.context)
group1.members = ['instance1', 'instance2']
group1.create()
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.members, group2.members)
def test_recreate_fails(self):
group = instance_group.InstanceGroup(context=self.context)
group.create()
self.assertRaises(exception.ObjectActionError, group.create,
self.context)
def test_destroy(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
group = instance_group.InstanceGroup(context=self.context)
group.id = result.id
group.uuid = result.uuid
fake_notifier.NOTIFICATIONS = []
group.destroy()
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('servergroup.delete', msg.event_type)
self.assertEqual(group.uuid, msg.payload['server_group_id'])
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get, self.context, result['uuid'])
def _populate_instances(self):
instances = [(str(uuid.uuid4()), 'f1', 'p1'),
(str(uuid.uuid4()), 'f2', 'p1'),
(str(uuid.uuid4()), 'f3', 'p2'),
(str(uuid.uuid4()), 'f4', 'p2')]
for instance in instances:
values = self._get_default_values()
values['uuid'] = instance[0]
values['name'] = instance[1]
values['project_id'] = instance[2]
self._create_instance_group(self.context, values)
return instances
def test_list_all(self):
self._populate_instances()
inst_list = instance_group.InstanceGroupList.get_all(self.context)
groups = db.instance_group_get_all(self.context)
self.assertEqual(len(groups), len(inst_list.objects))
self.assertEqual(len(groups), 4)
for i in range(0, len(groups)):
self.assertIsInstance(inst_list.objects[i],
instance_group.InstanceGroup)
self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
def test_list_by_project_id(self):
self._populate_instances()
project_ids = ['p1', 'p2']
for id in project_ids:
il = instance_group.InstanceGroupList.get_by_project_id(
self.context, id)
groups = db.instance_group_get_all_by_project_id(self.context, id)
self.assertEqual(len(groups), len(il.objects))
self.assertEqual(len(groups), 2)
for i in range(0, len(groups)):
self.assertIsInstance(il.objects[i],
instance_group.InstanceGroup)
self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
self.assertEqual(il.objects[i].name, groups[i]['name'])
self.assertEqual(il.objects[i].project_id, id)
def test_get_by_name(self):
self._populate_instances()
ctxt = context.RequestContext('fake_user', 'p1')
ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1')
self.assertEqual('f1', ig.name)
def test_get_by_hint(self):
instances = self._populate_instances()
for instance in instances:
ctxt = context.RequestContext('fake_user', instance[2])
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1])
self.assertEqual(instance[1], ig.name)
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0])
self.assertEqual(instance[0], ig.uuid)
def test_add_members(self):
instance_ids = ['fakeid1', 'fakeid2']
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
fake_notifier.NOTIFICATIONS = []
members = instance_group.InstanceGroup.add_members(self.context,
group.uuid, instance_ids)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('servergroup.addmember', msg.event_type)
self.assertEqual(group.uuid, msg.payload['server_group_id'])
self.assertEqual(instance_ids, msg.payload['instance_uuids'])
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
for instance in instance_ids:
self.assertIn(instance, members)
self.assertIn(instance, group.members)
def test_get_hosts(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = 'hostA'
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance3 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance3.host = 'hostB'
instance3.save()
instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts()
self.assertEqual(2, len(hosts))
self.assertIn('hostA', hosts)
self.assertIn('hostB', hosts)
hosts = group.get_hosts(exclude=[instance1.uuid])
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
def test_get_hosts_with_some_none(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = None
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance_ids = [instance1.uuid, instance2.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts()
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
def test_obj_make_compatible(self):
group = instance_group.InstanceGroup(context=self.context,
uuid='fake-uuid',
name='fake-name')
group.create()
group_primitive = group.obj_to_primitive()
group.obj_make_compatible(group_primitive, '1.6')
self.assertEqual({}, group_primitive['metadetails'])
def test_count_members_by_user(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.user_id = 'user1'
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.user_id = 'user2'
instance2.save()
instance3 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance3.user_id = 'user2'
instance3.save()
instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
count_user1 = group.count_members_by_user('user1')
count_user2 = group.count_members_by_user('user2')
count_user3 = group.count_members_by_user('user3')
self.assertEqual(1, count_user1)
self.assertEqual(2, count_user2)
self.assertEqual(0, count_user3)
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObjects):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObjects):
pass
|
hwmaier/esp-idf
|
refs/heads/master
|
components/idf_test/integration_test/TestCaseScript/NVSTest/NVSCompatibleWiFi.py
|
9
|
import os
import time
from TCAction import TCActionBase
from TCAction import PerformanceTCBase
from NativeLog import NativeLog
SOFTAP_SSID = "SoftAPSSID4NVSCompatibleTest"
SOFTAP_PASSWORD = "SoftAPPassword4NVSCompatibleTest"
NVS_FILE_NAME = "nvs_wifi.bin"
class TestCase(PerformanceTCBase.PerformanceTCBase):
def __init__(self, test_case, test_env, timeout=30, log_path=TCActionBase.LOG_PATH):
PerformanceTCBase.PerformanceTCBase.__init__(self, test_case, test_env, timeout=timeout, log_path=log_path)
self.nvs_path = ""
# load param from excel
cmd_set = test_case["cmd set"]
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
def get_nvs_bins(self):
if os.path.exists(self.nvs_path) is False:
NativeLog.add_trace_critical("NVS path is not correct")
files = []
else:
files = [os.path.join(self.nvs_path, x) for x in os.listdir(self.nvs_path)]
return filter(lambda f: NVS_FILE_NAME in f, files)
def check_nvs(self, nvs_bin):
branch_name = os.path.basename(nvs_bin)
branch_name.replace("___", "/")
result = True
ssc1_port = self.test_env.get_port_by_name("SSC1")
ap_ssid = self.get_parameter("ap_ssid")
self.flush_data("SSC1")
# first download
ssc1_port.flash_nvs(nvs_bin)
self.check_response("SSC1", "ready!!!")
# set to sta mode and join ap
if self.check_response("SSC1", "+JAP:CONNECTED,%s" % ap_ssid, timeout=15) is False:
NativeLog.add_trace_critical("Failed to join AP on: " + branch_name)
result = False
self.serial_write_line("SSC1", "op -Q")
if self.check_response("SSC1", "+CURMODE:3") is False:
NativeLog.add_trace_critical("Failed on verifying WIFI mode on: " + branch_name)
result = False
self.serial_write_line("SSC1", "ap -Q")
if self.check_response("SSC1", "+APCONFIG:%s,%s" % (SOFTAP_SSID, SOFTAP_PASSWORD)) is False:
NativeLog.add_trace_critical("Failed on verifying SoftAP config on: " + branch_name)
result = False
return result
def dump_nvs(self):
ssc1_port = self.test_env.get_port_by_name("SSC1")
ap_ssid = self.get_parameter("ap_ssid")
ap_password = self.get_parameter("ap_password")
# first erase NVS
ssc1_port.flash_nvs(None)
self.check_response("SSC1", "ready!!!")
self.serial_write_line("SSC1", "op -S -o 3")
self.check_response("SSC1", "+MODE:OK")
self.serial_write_line("SSC1", "sta -C -s %s -p %s" % (ap_ssid, ap_password))
self.check_response("SSC1", "+JAP:CONNECTED,%s" % ap_ssid, timeout=20)
self.serial_write_line("SSC1", "ap -S -s %s -p %s -t 3" % (SOFTAP_SSID, SOFTAP_PASSWORD))
self.check_response("SSC1", "+SAP:OK")
time.sleep(1)
idf_path = os.getenv("IDF_PATH")
ssc1_port.dump_nvs(os.path.join(idf_path, NVS_FILE_NAME))
def cleanup(self):
# make sure dump nvs will be executed
self.dump_nvs()
def process(self):
result = True
nvs_bins = self.get_nvs_bins()
for nvs_bin in nvs_bins:
result = result and self.check_nvs(nvs_bin)
if result is True:
self.set_result("Succeed")
|
kun--hust/sccloud
|
refs/heads/master
|
swift/common/middleware/account_quotas.py
|
39
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``account_quotas`` is a middleware which blocks write requests (PUT, POST) if a
given account quota (in bytes) is exceeded while DELETE requests are still
allowed.
``account_quotas`` uses the ``x-account-meta-quota-bytes`` metadata entry to
store the quota. Write requests to this metadata entry are only permitted for
resellers. There is no quota limit if ``x-account-meta-quota-bytes`` is not
set.
The ``account_quotas`` middleware should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware.
For example::
[pipeline:main]
pipeline = catch_errors cache tempauth account_quotas proxy-server
[filter:account_quotas]
use = egg:swift#account_quotas
To set the quota on an account::
swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:10000
Remove the quota::
swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:
The same limitations apply for the account quotas as for the container quotas.
For example, when uploading an object without a content-length header the proxy
server doesn't know the final size of the currently uploaded object and the
upload will be allowed if the current account size is within the quota.
Due to the eventual consistency further uploads might be possible until the
account size has been updated.
"""
from swift.common.constraints import check_copy_from_header
from swift.common.swob import HTTPForbidden, HTTPBadRequest, \
HTTPRequestEntityTooLarge, wsgify
from swift.common.utils import register_swift_info
from swift.proxy.controllers.base import get_account_info, get_object_info
class AccountQuotaMiddleware(object):
"""Account quota middleware
See above for a full description.
"""
def __init__(self, app, *args, **kwargs):
self.app = app
@wsgify
def __call__(self, request):
if request.method not in ("POST", "PUT", "COPY"):
return self.app
try:
ver, account, container, obj = request.split_path(
2, 4, rest_with_last=True)
except ValueError:
return self.app
if not container:
# account request, so we pay attention to the quotas
new_quota = request.headers.get(
'X-Account-Meta-Quota-Bytes')
remove_quota = request.headers.get(
'X-Remove-Account-Meta-Quota-Bytes')
else:
# container or object request; even if the quota headers are set
# in the request, they're meaningless
new_quota = remove_quota = None
if remove_quota:
new_quota = 0 # X-Remove dominates if both are present
if request.environ.get('reseller_request') is True:
if new_quota and not new_quota.isdigit():
return HTTPBadRequest()
return self.app
# deny quota set for non-reseller
if new_quota is not None:
return HTTPForbidden()
if request.method == "POST" or not obj:
return self.app
if request.method == 'COPY':
copy_from = container + '/' + obj
else:
if 'x-copy-from' in request.headers:
src_cont, src_obj = check_copy_from_header(request)
copy_from = "%s/%s" % (src_cont, src_obj)
else:
copy_from = None
content_length = (request.content_length or 0)
account_info = get_account_info(request.environ, self.app)
if not account_info or not account_info['bytes']:
return self.app
try:
quota = int(account_info['meta'].get('quota-bytes', -1))
except ValueError:
return self.app
if quota < 0:
return self.app
if copy_from:
path = '/' + ver + '/' + account + '/' + copy_from
object_info = get_object_info(request.environ, self.app, path)
if not object_info or not object_info['length']:
content_length = 0
else:
content_length = int(object_info['length'])
new_size = int(account_info['bytes']) + content_length
if quota < new_size:
resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.')
if 'swift.authorize' in request.environ:
orig_authorize = request.environ['swift.authorize']
def reject_authorize(*args, **kwargs):
aresp = orig_authorize(*args, **kwargs)
if aresp:
return aresp
return resp
request.environ['swift.authorize'] = reject_authorize
else:
return resp
return self.app
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
register_swift_info('account_quotas')
def account_quota_filter(app):
return AccountQuotaMiddleware(app)
return account_quota_filter
|
ytoolshed/range
|
refs/heads/master
|
python_seco_range/source/setup.py
|
1
|
#!/usr/bin/python
from distutils.core import setup
setup(name='seco/range',
version = '1.0',
packages = [ 'seco', 'seco.range', 'seco.range.sync' ])
|
chriscrosscutler/scikit-image
|
refs/heads/master
|
skimage/io/_plugins/fits_plugin.py
|
28
|
__all__ = ['imread', 'imread_collection']
import skimage.io as io
try:
from astropy.io import fits as pyfits
except ImportError:
try:
import pyfits
except ImportError:
raise ImportError(
"PyFITS could not be found. Please refer to\n"
"http://www.stsci.edu/resources/software_hardware/pyfits\n"
"for further instructions.")
def imread(fname, dtype=None):
"""Load an image from a FITS file.
Parameters
----------
fname : string
Image file name, e.g. ``test.fits``.
dtype : dtype, optional
For FITS, this argument is ignored because Stefan is planning on
removing the dtype argument from imread anyway.
Returns
-------
img_array : ndarray
Unlike plugins such as PIL, where different colour bands/channels are
stored in the third dimension, FITS images are greyscale-only and can
be N-dimensional, so an array of the native FITS dimensionality is
returned, without colour channels.
Currently if no image is found in the file, None will be returned
Notes
-----
Currently FITS ``imread()`` always returns the first image extension when
given a Multi-Extension FITS file; use ``imread_collection()`` (which does
lazy loading) to get all the extensions at once.
"""
hdulist = pyfits.open(fname)
# Iterate over FITS image extensions, ignoring any other extension types
# such as binary tables, and get the first image data array:
img_array = None
for hdu in hdulist:
if isinstance(hdu, pyfits.ImageHDU) or \
isinstance(hdu, pyfits.PrimaryHDU):
if hdu.data is not None:
img_array = hdu.data
break
hdulist.close()
return img_array
def imread_collection(load_pattern, conserve_memory=True):
"""Load a collection of images from one or more FITS files
Parameters
----------
load_pattern : str or list
List of extensions to load. Filename globbing is currently
unsupported.
converve_memory : bool
If True, never keep more than one in memory at a specific
time. Otherwise, images will be cached once they are loaded.
Returns
-------
ic : ImageCollection
Collection of images.
"""
intype = type(load_pattern)
if intype is not list and intype is not str:
raise TypeError("Input must be a filename or list of filenames")
# Ensure we have a list, otherwise we'll end up iterating over the string:
if intype is not list:
load_pattern = [load_pattern]
# Generate a list of filename/extension pairs by opening the list of
# files and finding the image extensions in each one:
ext_list = []
for filename in load_pattern:
hdulist = pyfits.open(filename)
for n, hdu in zip(range(len(hdulist)), hdulist):
if isinstance(hdu, pyfits.ImageHDU) or \
isinstance(hdu, pyfits.PrimaryHDU):
# Ignore (primary) header units with no data (use '.size'
# rather than '.data' to avoid actually loading the image):
try:
data_size = hdu.size()
except TypeError: # (size changed to int in PyFITS 3.1)
data_size = hdu.size
if data_size > 0:
ext_list.append((filename, n))
hdulist.close()
return io.ImageCollection(ext_list, load_func=FITSFactory,
conserve_memory=conserve_memory)
def FITSFactory(image_ext):
"""Load an image extension from a FITS file and return a NumPy array
Parameters
----------
image_ext : tuple
FITS extension to load, in the format ``(filename, ext_num)``.
The FITS ``(extname, extver)`` format is unsupported, since this
function is not called directly by the user and
``imread_collection()`` does the work of figuring out which
extensions need loading.
"""
# Expect a length-2 tuple with a filename as the first element:
if not isinstance(image_ext, tuple):
raise TypeError("Expected a tuple")
if len(image_ext) != 2:
raise ValueError("Expected a tuple of length 2")
filename = image_ext[0]
extnum = image_ext[1]
if type(filename) is not str or type(extnum) is not int:
raise ValueError("Expected a (filename, extension) tuple")
hdulist = pyfits.open(filename)
data = hdulist[extnum].data
hdulist.close()
if data is None:
raise RuntimeError(
"Extension %d of %s has no data" % (extnum, filename))
return data
|
pcdocker/pcdocker
|
refs/heads/master
|
pcdocker/taskapp/celery.py
|
2
|
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery('pcdocker')
class CeleryConfig(AppConfig):
name = 'pcdocker.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
mixturemodel-flow/tensorflow
|
refs/heads/master
|
tensorflow/python/training/training_util.py
|
68
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
# TODO(drpng): remove this after legacy uses are resolved.
write_graph = graph_io.write_graph
def global_step(sess, global_step_tensor):
"""Small helper to get the global step.
```python
# Creates a variable to hold the global_step.
global_step_tensor = tf.Variable(10, trainable=False, name='global_step')
# Creates a session.
sess = tf.Session()
# Initializes the variable.
print('global_step: %s' % tf.train.global_step(sess, global_step_tensor))
global_step: 10
```
Args:
sess: A TensorFlow `Session` object.
global_step_tensor: `Tensor` or the `name` of the operation that contains
the global step.
Returns:
The global step value.
"""
return int(sess.run(global_step_tensor))
def get_global_step(graph=None):
"""Get the global step tensor.
The global step tensor must be an integer variable. We first try to find it
in the collection `GLOBAL_STEP`, or by name `global_step:0`.
Args:
graph: The graph to find the global step in. If missing, use default graph.
Returns:
The global step variable, or `None` if none was found.
Raises:
TypeError: If the global step tensor has a non-integer type, or if it is not
a `Variable`.
"""
graph = graph or ops.get_default_graph()
global_step_tensor = None
global_step_tensors = graph.get_collection(ops.GraphKeys.GLOBAL_STEP)
if len(global_step_tensors) == 1:
global_step_tensor = global_step_tensors[0]
elif not global_step_tensors:
try:
global_step_tensor = graph.get_tensor_by_name('global_step:0')
except KeyError:
return None
else:
logging.error('Multiple tensors in global_step collection.')
return None
assert_global_step(global_step_tensor)
return global_step_tensor
def create_global_step(graph=None):
"""Create global step tensor in graph.
Args:
graph: The graph in which to create the global step tensor. If missing,
use default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
graph = graph or ops.get_default_graph()
if get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
graph = graph or ops.get_default_graph()
global_step_tensor = get_global_step(graph)
if global_step_tensor is None:
global_step_tensor = create_global_step(graph)
return global_step_tensor
def assert_global_step(global_step_tensor):
"""Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.
Args:
global_step_tensor: `Tensor` to test.
"""
if not (isinstance(global_step_tensor, variables.Variable) or
isinstance(global_step_tensor, ops.Tensor) or
isinstance(global_step_tensor,
resource_variable_ops.ResourceVariable)):
raise TypeError(
'Existing "global_step" must be a Variable or Tensor: %s.' %
global_step_tensor)
if not global_step_tensor.dtype.base_dtype.is_integer:
raise TypeError('Existing "global_step" does not have integer type: %s' %
global_step_tensor.dtype)
if global_step_tensor.get_shape().ndims != 0:
raise TypeError('Existing "global_step" is not scalar: %s' %
global_step_tensor.get_shape())
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/psi/ReturnAtEOF.py
|
83
|
def test(a):
if a:
return
|
BigBorg/Blog
|
refs/heads/master
|
Blog/comments/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
wraldata/venus
|
refs/heads/master
|
leg_tracker/billcatcher/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
google-code/android-scripting
|
refs/heads/master
|
python/src/Lib/test/badsyntax_future6.py
|
383
|
"""This is a test"""
"this isn't a doc string"
from __future__ import nested_scopes
def f(x):
def g(y):
return x + y
return g
result = f(2)(4)
|
mini13i/maintenance
|
refs/heads/master
|
accounts/apps.py
|
172
|
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'accounts'
|
t0mk/ansible
|
refs/heads/devel
|
test/units/playbook/test_playbook.py
|
290
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml":"""
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
|
civisanalytics/ansible
|
refs/heads/civis
|
hacking/metadata-tool.py
|
20
|
#!/usr/bin/env python
import ast
import csv
import os
import sys
from collections import defaultdict
from distutils.version import StrictVersion
from pprint import pformat, pprint
import yaml
from ansible.module_utils._text import to_text
from ansible.plugins import module_loader
# There's a few files that are not new-style modules. Have to blacklist them
NONMODULE_PY_FILES = frozenset(('async_wrapper.py',))
NONMODULE_MODULE_NAMES = frozenset(os.path.splitext(p)[0] for p in NONMODULE_PY_FILES)
# Default metadata
DEFAULT_METADATA = {'version': '1.0', 'status': ['preview'], 'supported_by':'community'}
class ParseError(Exception):
"""Thrown when parsing a file fails"""
pass
class MissingModuleError(Exception):
"""Thrown when unable to find a plugin"""
pass
def usage():
print("""Usage:
metadata-tester.py report [--version X]
metadata-tester.py add [--version X] [--overwrite] CSVFILE
metadata-tester.py add-default [--version X] [--overwrite]""")
sys.exit(1)
def parse_args(arg_string):
if len(arg_string) < 1:
usage()
action = arg_string[0]
version = None
if '--version' in arg_string:
version_location = arg_string.index('--version')
arg_string.pop(version_location)
version = arg_string.pop(version_location)
overwrite = False
if '--overwrite' in arg_string:
overwrite = True
arg_string.remove('--overwrite')
csvfile = None
if len(arg_string) == 2:
csvfile = arg_string[1]
elif len(arg_string) > 2:
usage()
return action, {'version': version, 'overwrite': overwrite, 'csvfile': csvfile}
def seek_end_of_dict(module_data, start_line, start_col, next_node_line, next_node_col):
"""Look for the end of a dict in a set of lines
We know the starting position of the dict and we know the start of the
next code node but in between there may be multiple newlines and comments.
There may also be multiple python statements on the same line (separated
by semicolons)
Examples::
ANSIBLE_METADATA = {[..]}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} # Optional comments with confusing junk => {}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {
[..]
}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} ; DOCUMENTATION = [..]
ANSIBLE_METADATA = {}EOF
"""
if next_node_line is None:
# The dict is the last statement in the file
snippet = module_data.splitlines()[start_line:]
next_node_col = 0
# Include the last line in the file
last_line_offset = 0
else:
# It's somewhere in the middle so we need to separate it from the rest
snippet = module_data.splitlines()[start_line:next_node_line]
# Do not include the last line because that's where the next node
# starts
last_line_offset = 1
if next_node_col == 0:
# This handles all variants where there are only comments and blank
# lines between the dict and the next code node
# Step backwards through all the lines in the snippet
for line_idx, line in tuple(reversed(tuple(enumerate(snippet))))[last_line_offset:]:
end_col = None
# Step backwards through all the characters in the line
for col_idx, char in reversed(tuple(enumerate(c for c in line))):
if char == '}' and end_col is None:
# Potentially found the end of the dict
end_col = col_idx
elif char == '#' and end_col is not None:
# The previous '}' was part of a comment. Keep trying
end_col = None
if end_col is not None:
# Found the end!
end_line = start_line + line_idx
break
else:
# Harder cases involving multiple statements on one line
# Good Ansible Module style doesn't do this so we're just going to
# treat this as an error for now:
raise ParseError('Multiple statements per line confuses the module metadata parser.')
return end_line, end_col
def seek_end_of_string(module_data, start_line, start_col, next_node_line, next_node_col):
"""
This is much trickier than finding the end of a dict. A dict has only one
ending character, "}". Strings have four potential ending characters. We
have to parse the beginning of the string to determine what the ending
character will be.
Examples:
ANSIBLE_METADATA = '''[..]''' # Optional comment with confusing chars '''
# Optional comment with confusing chars '''
DOCUMENTATION = [..]
ANSIBLE_METADATA = '''
[..]
'''
DOCUMENTATIONS = [..]
ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
SHORT_NAME = ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
String marker variants:
* '[..]'
* "[..]"
* '''[..]'''
* \"\"\"[..]\"\"\"
Each of these come in u, r, and b variants:
* '[..]'
* u'[..]'
* b'[..]'
* r'[..]'
* ur'[..]'
* ru'[..]'
* br'[..]'
* b'[..]'
* rb'[..]'
"""
raise NotImplementedError('Finding end of string not yet implemented')
def extract_metadata(module_data):
"""Extract the metadata from a module
:arg module_data: Byte string containing a module's code
:returns: a tuple of metadata (a dict), line the metadata starts on,
column the metadata starts on, line the metadata ends on, column the
metadata ends on, and the names the metadata is assigned to. One of
the names the metadata is assigned to will be ANSIBLE_METADATA If no
metadata is found, the tuple will be (None, -1, -1, -1, -1, None)
"""
metadata = None
start_line = -1
start_col = -1
end_line = -1
end_col = -1
targets = None
mod_ast_tree = ast.parse(module_data)
for root_idx, child in enumerate(mod_ast_tree.body):
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'ANSIBLE_METADATA':
if isinstance(child.value, ast.Dict):
metadata = ast.literal_eval(child.value)
try:
# Determine where the next node starts
next_node = mod_ast_tree.body[root_idx+1]
next_lineno = next_node.lineno
next_col_offset = next_node.col_offset
except IndexError:
# Metadata is defined in the last node of the file
next_lineno = None
next_col_offset = None
# Determine where the current metadata ends
end_line, end_col = seek_end_of_dict(module_data,
child.lineno - 1, child.col_offset, next_lineno,
next_col_offset)
elif isinstance(child.value, ast.Str):
metadata = yaml.safe_load(child.value.s)
end_line = seek_end_of_string(module_data)
elif isinstance(child.value, ast.Bytes):
metadata = yaml.safe_load(to_text(child.value.s, errors='surrogate_or_strict'))
end_line = seek_end_of_string(module_data)
else:
# Example:
# ANSIBLE_METADATA = 'junk'
# ANSIBLE_METADATA = { [..the real metadata..] }
continue
# Do these after the if-else so we don't pollute them in
# case this was a false positive
start_line = child.lineno - 1
start_col = child.col_offset
targets = [t.id for t in child.targets]
break
if metadata is not None:
# Once we've found the metadata we're done
break
return metadata, start_line, start_col, end_line, end_col, targets
def find_documentation(module_data):
"""Find the DOCUMENTATION metadata for a module file"""
start_line = -1
mod_ast_tree = ast.parse(module_data)
for child in mod_ast_tree.body:
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'DOCUMENTATION':
start_line = child.lineno - 1
break
return start_line
def remove_metadata(module_data, start_line, start_col, end_line, end_col):
"""Remove a section of a module file"""
lines = module_data.split('\n')
new_lines = lines[:start_line]
if start_col != 0:
new_lines.append(lines[start_line][:start_col])
next_line = lines[end_line]
if len(next_line) - 1 != end_col:
new_lines.append(next_line[end_col:])
if len(lines) > end_line:
new_lines.extend(lines[end_line + 1:])
return '\n'.join(new_lines)
def insert_metadata(module_data, new_metadata, insertion_line, targets=('ANSIBLE_METADATA',)):
"""Insert a new set of metadata at a specified line"""
assignments = ' = '.join(targets)
pretty_metadata = pformat(new_metadata, width=1).split('\n')
new_lines = []
new_lines.append('{} = {}'.format(assignments, pretty_metadata[0]))
if len(pretty_metadata) > 1:
for line in pretty_metadata[1:]:
new_lines.append('{}{}'.format(' ' * (len(assignments) - 1 + len(' = {')), line))
old_lines = module_data.split('\n')
lines = old_lines[:insertion_line] + new_lines + [''] + old_lines[insertion_line:]
return '\n'.join(lines)
def parse_assigned_metadata_initial(csvfile):
"""
Fields:
:0: Module name
:1: Core (x if so)
:2: Extras (x if so)
:3: Category
:4: Supported/SLA
:5: Committer
:6: Stable
:7: Deprecated
:8: Notes
:9: Team Notes
:10: Notes 2
:11: final supported_by field
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
if record[12] == 'core':
supported_by = 'core'
elif record[12] == 'curated':
supported_by = 'committer'
elif record[12] == 'community':
supported_by = 'community'
else:
print('Module %s has no supported_by field. Using community' % record[0])
supported_by = 'community'
supported_by = DEFAULT_METADATA['supported_by']
status = []
if record[6]:
status.append('stableinterface')
if record[7]:
status.append('deprecated')
if not status:
status.extend(DEFAULT_METADATA['status'])
yield (module, {'version': DEFAULT_METADATA['version'], 'supported_by': supported_by, 'status': status})
def parse_assigned_metadata(csvfile):
"""
Fields:
:0: Module name
:1: supported_by string. One of the valid support fields
core, community, unmaintained, committer
:2: stableinterface
:3: preview
:4: deprecated
:5: removed
:6: tested
https://github.com/ansible/proposals/issues/30
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
supported_by = record[1]
status = []
if record[2]:
status.append('stableinterface')
if record[4]:
status.append('deprecated')
if record[5]:
status.append('removed')
if record[6]:
status.append('tested')
if not status or record[3]:
status.append('preview')
yield (module, {'version': '1.0', 'supported_by': supported_by, 'status': status})
def write_metadata(filename, new_metadata, version=None, overwrite=False):
with open(filename, 'rb') as f:
module_data = f.read()
try:
current_metadata, start_line, start_col, end_line, end_col, targets = extract_metadata(module_data)
except SyntaxError:
if filename.endswith('.py'):
raise
# Probably non-python modules. These should all have python
# documentation files where we can place the data
raise ParseError('Could not add metadata to {}'.format(filename))
if current_metadata is None:
# No curent metadata so we can just add it
start_line = find_documentation(module_data)
if start_line < 0:
if os.path.basename(filename) in NONMODULE_PY_FILES:
# These aren't new-style modules
return
raise Exception('Module file {} had no ANSIBLE_METADATA or DOCUMENTATION'.format(filename))
module_data = insert_metadata(module_data, new_metadata, start_line, targets=('ANSIBLE_METADATA',))
elif overwrite or (version is not None and ('version' not in current_metadata or StrictVersion(current_metadata['version']) < StrictVersion(version))):
# Current metadata that we do not want. Remove the current
# metadata and put the new version in its place
module_data = remove_metadata(module_data, start_line, start_col, end_line, end_col)
module_data = insert_metadata(module_data, new_metadata, start_line, targets=targets)
else:
# Current metadata and we don't want to overwrite it
return
# Save the new version of the module
with open(filename, 'wb') as f:
f.write(module_data)
def return_metadata(plugins):
metadata = {}
for name, filename in plugins:
# There may be several files for a module (if it is written in another
# language, for instance) but only one of them (the .py file) should
# contain the metadata.
if name not in metadata or metadata[name] is not None:
with open(filename, 'rb') as f:
module_data = f.read()
metadata[name] = extract_metadata(module_data)[0]
return metadata
def metadata_summary(plugins, version=None):
"""Compile information about the metadata status for a list of modules
:arg plugins: List of plugins to look for. Each entry in the list is
a tuple of (module name, full path to module)
:kwarg version: If given, make sure the modules have this version of
metadata or higher.
:returns: A tuple consisting of a list of modules with no metadata at the
required version and a list of files that have metadata at the
required version.
"""
no_metadata = {}
has_metadata = {}
supported_by = defaultdict(set)
status = defaultdict(set)
plugins = list(plugins)
all_mods_metadata = return_metadata(plugins)
for name, filename in plugins:
# Does the module have metadata?
if name not in no_metadata and name not in has_metadata:
metadata = all_mods_metadata[name]
if metadata is None:
no_metadata[name] = filename
elif version is not None and ('version' not in metadata or StrictVersion(metadata['version']) < StrictVersion(version)):
no_metadata[name] = filename
else:
has_metadata[name] = filename
# What categories does the plugin belong in?
if all_mods_metadata[name] is None:
# No metadata for this module. Use the default metadata
supported_by[DEFAULT_METADATA['supported_by']].add(filename)
status[DEFAULT_METADATA['status'][0]].add(filename)
else:
supported_by[all_mods_metadata[name]['supported_by']].add(filename)
for one_status in all_mods_metadata[name]['status']:
status[one_status].add(filename)
return list(no_metadata.values()), list(has_metadata.values()), supported_by, status
#
# Subcommands
#
def add_from_csv(csv_file, version=None, overwrite=False):
"""Implement the subcommand to add metadata from a csv file
"""
# Add metadata for everything from the CSV file
diagnostic_messages = []
for module_name, new_metadata in parse_assigned_metadata_initial(csv_file):
filename = module_loader.find_plugin(module_name, mod_type='.py')
if filename is None:
diagnostic_messages.append('Unable to find the module file for {}'.format(module_name))
continue
try:
write_metadata(filename, new_metadata, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def add_default(version=None, overwrite=False):
"""Implement the subcommand to add default metadata to modules
Add the default metadata to any plugin which lacks it.
:kwarg version: If given, the metadata must be at least this version.
Otherwise, treat the module as not having existing metadata.
:kwarg overwrite: If True, overwrite any existing metadata. Otherwise,
do not modify files which have metadata at an appropriate version
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
# Iterate through each plugin
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
try:
write_metadata(filename, DEFAULT_METADATA, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def report(version=None):
"""Implement the report subcommand
Print out all the modules that have metadata and all the ones that do not.
:kwarg version: If given, the metadata must be at least this version.
Otherwise return it as not having metadata
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = list(plugins)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] != NONMODULE_MODULE_NAMES)
plugins = list(plugins)
no_metadata, has_metadata, support, status = metadata_summary(plugins, version=version)
print('== Has metadata ==')
pprint(sorted(has_metadata))
print('')
print('== Has no metadata ==')
pprint(sorted(no_metadata))
print('')
print('== Supported by core ==')
pprint(sorted(support['core']))
print('== Supported by committers ==')
pprint(sorted(support['committer']))
print('== Supported by community ==')
pprint(sorted(support['community']))
print('')
print('== Status: stableinterface ==')
pprint(sorted(status['stableinterface']))
print('== Status: preview ==')
pprint(sorted(status['preview']))
print('== Status: deprecated ==')
pprint(sorted(status['deprecated']))
print('== Status: removed ==')
pprint(sorted(status['removed']))
print('')
print('== Summary ==')
print('No Metadata: {0} Has Metadata: {1}'.format(len(no_metadata), len(has_metadata)))
print('Supported by core: {0} Supported by community: {1} Supported by committer: {2}'.format(len(support['core']), len(support['community']),
len(support['committer'])))
print('Status StableInterface: {0} Status Preview: {1} Status Deprecated: {2} Status Removed: {3}'.format(len(status['stableinterface']),
len(status['preview']),
len(status['deprecated']),
len(status['removed'])))
return 0
if __name__ == '__main__':
action, args = parse_args(sys.argv[1:])
### TODO: Implement upgrade metadata and upgrade metadata from csvfile
if action == 'report':
rc = report(version=args['version'])
elif action == 'add':
rc = add_from_csv(args['csvfile'], version=args['version'], overwrite=args['overwrite'])
elif action == 'add-default':
rc = add_default(version=args['version'], overwrite=args['overwrite'])
sys.exit(rc)
|
pisskidney/leetcode
|
refs/heads/master
|
easy/232.py
|
1
|
#!/usr/bin/python
class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.revstack = []
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.stack.append(x)
x = self.stack[:]
self.revstack = []
while x:
self.revstack.append(x.pop())
def pop(self):
"""
:rtype: nothing
"""
self.revstack.pop()
x = self.revstack[:]
self.stack = []
while x:
self.stack.append(x.pop())
def peek(self):
"""
:rtype: int
"""
return self.revstack[-1]
def empty(self):
"""
:rtype: bool
"""
return not bool(self.stack)
q = Queue()
q.push(1)
q.push(2)
q.push(3)
q.push(4)
q.push(5)
print q.stack, q.revstack
print q.peek()
q.pop()
q.pop()
q.pop()
q.pop()
print q.stack, q.revstack, q.empty()
|
wimberosa/samba
|
refs/heads/master
|
buildtools/wafsamba/generic_cc.py
|
22
|
# compiler definition for a generic C compiler
# based on suncc.py from waf
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
from compiler_cc import c_compiler
c_compiler['default'] = ['gcc', 'generic_cc']
c_compiler['hpux'] = ['gcc', 'generic_cc']
@conftest
def find_generic_cc(conf):
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
if not cc: cc = conf.find_program('cc', var='CC')
if not cc: conf.fatal('generic_cc was not found')
cc = conf.cmd_to_list(cc)
v['CC'] = cc
v['CC_NAME'] = 'generic'
@conftest
def generic_cc_common_flags(conf):
v = conf.env
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', '']
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CCDEFINES_ST'] = '-D%s'
# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
# v['SHLIB_MARKER'] = '-Bdynamic'
# v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
# v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
# v['staticlib_LINKFLAGS'] = ['-Bstatic']
# v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_generic_cc
find_cpp
find_ar
generic_cc_common_flags
cc_load_tools
cc_add_flags
link_add_flags
'''
|
ethan-nelson/osm-tasking-manager2
|
refs/heads/master
|
alembic/versions/b54ce37bde0_added_due_state_column_to_project_table.py
|
7
|
"""Added due_state column to project table
Revision ID: b54ce37bde0
Revises: 33a5ec066e1d
Create Date: 2014-07-17 16:52:59.898360
"""
# revision identifiers, used by Alembic.
revision = 'b54ce37bde0'
down_revision = '33a5ec066e1d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('project', sa.Column('due_date', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('project', 'due_date')
|
sourcelair/jason
|
refs/heads/master
|
jason/resources.py
|
1
|
from fields import BaseField
from queryselectors import JasonQuerySelector
import exceptions
class JasonResourceMeta(type):
def __new__(cls, name, bases, dct):
new_class = type.__new__(cls, name, bases, dct)
# Set names to attributes
for (key, value) in new_class.__dict__.iteritems():
if (isinstance(value, BaseField)):
value.name = key
# Set default query selector
new_class.objects = JasonQuerySelector(new_class)
return new_class
class JasonGenericResource(object):
NotFound = exceptions.NotFound
MultipleItemsReturned = exceptions.MultipleItemsReturned
_serializers = {}
def __init__(self, **data):
"""
Initialize a new Jason resource with arbitrary data.
"""
self._data = data
for key in self._data:
value = self._data[key]
value_type = type(value)
if (value_type in self._serializers):
value = self._serializers[value_type](value)
setattr(self, key, value)
@classmethod
def register_serializer(cls, object_type, serializer):
cls._serializers[object_type] = serializer
class JasonEmbeddedResource(JasonGenericResource):
def __init__(self, dict_obj):
# Take care of edge case
if 'self' in dict_obj:
dict_obj['_self'] = dict_obj['self']
del dict_obj['self']
super(JasonEmbeddedResource, self).__init__(**dict_obj)
class JasonResource(JasonGenericResource):
__metaclass__ = JasonResourceMeta
@classmethod
def get_root(cls):
if hasattr(cls, '_root'):
return cls._root
value = '%ss' % cls.__name__.lower()
if cls.service.root:
value = '%s/%s' % (cls.service.base_url, value)
return value
def __unicode__(self):
repr = (self.__class__.__name__, self.service.base_url)
return '<%s at %s>' % repr
def __str__(self):
return self.__unicode__()
def __repr__(self):
return self.__unicode__()
JasonGenericResource.register_serializer(dict, JasonEmbeddedResource)
|
softak/webfaction_demo
|
refs/heads/master
|
vendor-local/lib/python/amqplib/client_0_8/abstract_channel.py
|
22
|
"""
Code common to Connection and Channel objects.
"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from serialization import AMQPWriter
try:
bytes
except NameError:
# Python 2.5 and lower
bytes = str
__all__ = [
'AbstractChannel',
]
class AbstractChannel(object):
"""
Superclass for both the Connection, which is treated
as channel 0, and other user-created Channel objects.
The subclasses must have a _METHOD_MAP class property, mapping
between AMQP method signatures and Python methods.
"""
def __init__(self, connection, channel_id):
self.connection = connection
self.channel_id = channel_id
connection.channels[channel_id] = self
self.method_queue = [] # Higher level queue for methods
self.auto_decode = False
def __enter__(self):
"""
Support for Python >= 2.5 'with' statements.
"""
return self
def __exit__(self, type, value, traceback):
"""
Support for Python >= 2.5 'with' statements.
"""
self.close()
def _send_method(self, method_sig, args=bytes(), content=None):
"""
Send a method for our channel.
"""
if isinstance(args, AMQPWriter):
args = args.getvalue()
self.connection.method_writer.write_method(self.channel_id,
method_sig, args, content)
def close(self):
"""
Close this Channel or Connection
"""
raise NotImplementedError('Must be overriden in subclass')
def wait(self, allowed_methods=None):
"""
Wait for a method that matches our allowed_methods parameter (the
default value of None means match any method), and dispatch to it.
"""
method_sig, args, content = self.connection._wait_method(
self.channel_id, allowed_methods)
return self.dispatch_method(method_sig, args, content)
def dispatch_method(self, method_sig, args, content):
if content \
and self.auto_decode \
and hasattr(content, 'content_encoding'):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass
amqp_method = self._METHOD_MAP.get(method_sig, None)
if amqp_method is None:
raise Exception('Unknown AMQP method (%d, %d)' % method_sig)
if content is None:
return amqp_method(self, args)
else:
return amqp_method(self, args, content)
#
# Placeholder, the concrete implementations will have to
# supply their own versions of _METHOD_MAP
#
_METHOD_MAP = {}
|
simmetria/sentry
|
refs/heads/master
|
src/sentry/services/__init__.py
|
3
|
"""
sentry.services
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
|
tayfun/django
|
refs/heads/master
|
tests/i18n/project_dir/app_no_locale/__init__.py
|
12133432
| |
ornlneutronimaging/iBeatles
|
refs/heads/master
|
ibeatles/table_dictionary/__init__.py
|
12133432
| |
teltek/edx-platform
|
refs/heads/master
|
lms/djangoapps/mobile_api/tests/__init__.py
|
12133432
| |
ssxenon01/contact-sharing
|
refs/heads/master
|
main/api/__init__.py
|
816
|
# coding: utf-8
|
SteveViss/readthedocs.org
|
refs/heads/master
|
readthedocs/gold/admin.py
|
40
|
from django.contrib import admin
from .models import GoldUser
class GoldAdmin(admin.ModelAdmin):
model = GoldUser
raw_id_fields = ('user', 'projects')
list_display = ('user', 'level')
list_filter = ('user', 'level')
admin.site.register(GoldUser, GoldAdmin)
|
reasonerjt/harbor
|
refs/heads/master
|
make/photon/prepare/utils/migration.py
|
2
|
import yaml
import click
import importlib
import os
from collections import deque
class MigratioNotFound(Exception): ...
class MigrationVersion:
'''
The version used to migration
Arttribute:
name(str): version name like `1.0.0`
module: the python module object for a specific migration which contains migrate info, codes and templates
down_versions(list): previous versions that can migrated to this version
'''
def __init__(self, version: str):
self.name = version
self.module = importlib.import_module("migrations.version_{}".format(version.replace(".","_")))
@property
def down_versions(self):
return self.module.down_revisions
def read_conf(path):
with open(path) as f:
try:
d = yaml.safe_load(f)
except Exception as e:
click.echo("parse config file err, make sure your harbor config version is above 1.8.0", e)
exit(-1)
return d
def search(input_version: str, target_version: str) -> list :
"""
Find the migration path by BFS
Args:
input_version(str): The version migration start from
target_version(str): The target version migrated to
Returns:
list: the module of migrations in the upgrade path
"""
upgrade_path = []
next_version, visited, q = {}, set(), deque()
q.append(target_version)
found = False
while q: # BFS to find a valid path
version = MigrationVersion(q.popleft())
visited.add(version.name)
if version.name == input_version:
found = True
break # break loop cause migration path found
for v in version.down_versions:
next_version[v] = version.name
if v not in (visited.union(q)):
q.append(v)
if not found:
raise MigratioNotFound('no migration path found to target version')
current_version = MigrationVersion(input_version)
while current_version.name != target_version:
current_version = MigrationVersion(next_version[current_version.name])
upgrade_path.append(current_version)
return list(map(lambda x: x.module, upgrade_path))
|
P0cL4bs/WiFi-Pumpkin
|
refs/heads/master
|
core/helpers/update.py
|
4
|
import time
import fnmatch
from os import path,walk
from subprocess import check_output,CalledProcessError
from core.loaders.master.github import GithubUpdate,UrllibDownload
from core.loaders.models.PackagesUI import *
"""
Description:
This program is a module for wifi-pumpkin.py. GUI update from github
Copyright:
Copyright (C) 2015-2017 Marcos Nesster P0cl4bs Team
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
class frm_githubUpdate(PumpkinModule):
''' called update from github repository master'''
def __init__(self,version,parent = None):
super(frm_githubUpdate, self).__init__(parent)
self.setWindowTitle("WiFi-Pumpkin Software Update")
self.checkHasCommits = False
self.version = version
self.UrlDownloadCommits = C.LCOMMITS
self.PathUrlLcommits = self.get_file_cfg_Update('Core')
self.PathUrlRcommits = self.PathUrlLcommits.replace('L','R')
self.center()
self.GUI()
def GUI(self):
self.Main = QtGui.QVBoxLayout()
self.widget = QtGui.QWidget()
self.layout = QtGui.QVBoxLayout(self.widget)
self.Blayout = QtGui.QHBoxLayout()
self.frmVersion = QtGui.QFormLayout()
self.frmLabels = QtGui.QHBoxLayout()
self.frmOutPut = QtGui.QHBoxLayout()
self.frmCommits = QtGui.QHBoxLayout()
self.split = QtGui.QHBoxLayout()
self.LVersion = QtGui.QLabel(self.version)
self.pb = ProgressBarWid(total=101)
self.btnUpdate = QtGui.QPushButton('Install')
self.btnCheck = QtGui.QPushButton('Check Updates')
self.LCommits = QtGui.QListWidget(self)
self.LOutput = QtGui.QListWidget(self)
self.LCommits.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.LOutput.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.btnUpdate.setDisabled(True)
# icons
self.btnCheck.setIcon(QtGui.QIcon('icons/Checklist_update.png'))
self.btnUpdate.setIcon(QtGui.QIcon('icons/updates_.png'))
#connects
self.btnCheck.clicked.connect(self.checkUpdate)
self.btnUpdate.clicked.connect(self.startUpdate)
#temporary
# split left
self.frmLabels.addWidget(QtGui.QLabel('New Commits::'))
self.frmCommits.addWidget(self.LCommits)
# split right
self.frmLabels.addWidget(QtGui.QLabel('Outputs::'))
self.frmOutPut.addWidget(self.LOutput)
# blayout
self.Blayout.addWidget(self.pb)
self.Blayout.addWidget(self.btnCheck)
self.Blayout.addWidget(self.btnUpdate)
self.frmVersion.addRow("Current Version:", self.LVersion)
self.split.addLayout(self.frmCommits)
self.split.addLayout(self.frmOutPut)
self.layout.addLayout(self.frmVersion)
self.layout.addLayout(self.frmLabels)
self.layout.addLayout(self.split)
self.layout.addLayout(self.Blayout)
self.Main.addWidget(self.widget)
self.setLayout(self.Main)
def startUpdate(self):
if hasattr(self,'git'):
self.git.UpdateRepository()
def get_file_cfg_Update(self,base_path):
matches = []
if not path.exists(base_path):
base_path = base_path.lower()
for root, dirnames, filenames in walk(base_path):
for filename in fnmatch.filter(filenames, '*.cfg'):
matches.append(path.join(root, filename))
for filename in matches:
if str(filename).endswith('Lcommits.cfg'):
return filename
def checkUpdate(self):
try:
if not path.isfile(check_output(['which','git']).rstrip()):
return QtGui.QMessageBox.warning(self,'git','git is not installed')
except CalledProcessError:
return QtGui.QMessageBox.warning(self,'git','git is not installed')
self.LCommits.clear(),self.LOutput.clear()
self.pb.setValue(1)
self.btnCheck.setDisabled(True)
self.downloaderUrl = UrllibDownload(self.UrlDownloadCommits)
self.downloaderUrl.data_downloaded.connect(self.Get_ContentUrl)
self.downloaderUrl.start()
def Get_ContentUrl(self,data):
if data == 'URLError':
return self.btnCheck.setEnabled(True)
self.git = GithubUpdate(self.version,data,self.PathUrlLcommits,self.PathUrlRcommits)
self.connect(self.git,QtCore.SIGNAL('Activated ( QString ) '), self.RcheckCommits)
self.git.start()
self.btnCheck.setDisabled(True)
def RcheckCommits(self,commits):
if 'no changes into' in commits:
item = QtGui.QListWidgetItem()
item.setText(commits)
item.setIcon(QtGui.QIcon('icons/checked_update.png'))
item.setSizeHint(QtCore.QSize(20,20))
self.LCommits.addItem(item)
return self.btnCheck.setEnabled(True)
elif 'New version available WiFi-Pumpkin v' in commits:
reply = QtGui.QMessageBox.question(self, 'Update Information',
'{}, would you like to update??'.format(commits), QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.git.NewVersionUpdate()
return self.btnCheck.setEnabled(True)
elif 'commit:' in commits:
item = QtGui.QListWidgetItem()
item.setText(commits)
item.setIcon(QtGui.QIcon('icons/check_update.png'))
item.setSizeHint(QtCore.QSize(20,20))
self.LCommits.addItem(item)
self.btnCheck.setEnabled(True)
self.btnUpdate.setEnabled(True)
self.checkHasCommits = True
elif 'alive::' in commits:
self.pb.update_bar(10)
elif '::updated' in commits:
self.pb.update_bar(100)
QtGui.QMessageBox.information(self,'Update Information',
"Already up-to-date. Please restart WiFi-Pumpkin to apply this update.")
self.btnUpdate.setDisabled(True)
else:
self.LOutput.addItem(commits)
''' http://stackoverflow.com/questions/22332106/python-qtgui-qprogressbar-color '''
class ProgressBarWid(QtGui.QProgressBar):
def __init__(self, parent=None, total=0):
super(ProgressBarWid, self).__init__()
self.setMinimum(1)
self.setMaximum(total)
font=QtGui.QFont('White Rabbit')
font.setPointSize(5)
self.setFont(font)
self.effect = QtGui.QGraphicsOpacityEffect(self)
self.setGraphicsEffect(self.effect)
self.animationIn = QtCore.QPropertyAnimation(self.effect, 'opacity')
self.animationIn.setDuration(300)
self.animationIn.setStartValue(0)
self.animationIn.setEndValue(1.0)
self.animationIn.start()
self._active = False
self.setAlignment(QtCore.Qt.AlignCenter)
self._text = None
def hideProcessbar(self):
self.animationOut = QtCore.QPropertyAnimation(self.effect, 'opacity')
self.animationOut.setDuration(300)
self.animationOut.setStartValue(1.0)
self.animationOut.setEndValue(0)
self.animationOut.start()
self.animationOut.finished.connect(self.hide)
def showProcessBar(self):
self.animationIn = QtCore.QPropertyAnimation(self.effect, 'opacity')
self.animationIn.setDuration(300)
self.animationIn.setStartValue(0)
self.animationIn.setEndValue(1.0)
self.animationIn.start()
self.show()
def setText(self, text):
self._text = text
def text(self):
if self._text != None:
return QtCore.QString(str(self._text))
return QtCore.QString('')
def update_bar_simple(self, add):
value = self.value() + add
self.setValue(value)
def update_bar(self, add):
while True:
time.sleep(0.01)
value = self.value() + add
self.setValue(value)
if value > 50:
self.change_color("green")
if (not self._active or value >= self.maximum()):
break
self._active = False
def closeEvent(self, event):
self._active = False
def change_color(self, color):
template_css = """QProgressBar::chunk { background: %s; }"""
css = template_css % color
self.setStyleSheet(css)
|
astroJeff/dart_board
|
refs/heads/master
|
dart_board/constants.py
|
1
|
##################### CONSTANTS TO BE USED THROUGHOUT MODULE #####################
G = 6.674e-8 # Gravitational constant in cgs
GGG = 1.909e5 # Gravitational constant in Rsun * (km/s)^2 / Msun
c_light = 2.9979e10 # speed of light in cgs
km_to_cm = 1.0e5 # km to cm
Msun_to_g = 1.989e33 # Msun to g
Rsun_to_cm = 6.955e10 # Rsun to cm
AU_to_cm = 1.496e13 # AU to cm
pc_to_cm = 3.086e18 # parsec to cm
pc_to_km = 3.086e13 # parsec to km
yr_to_sec = 31557600.0 # Sec in yr
day_to_sec = 3600.0*24.0 # Sec in day
deg_to_rad = 0.0174532925199 # Degrees to radians
rad_to_deg = 57.2957795131 # Radians to degrees
asec_to_rad = 4.84814e-6 # Arcsec to radians
Hubble_time = 1.47e4 # Hubble time set to be 14.7 Gyr
R_NS = 10.0 # NS radius in km
eta_bol = 0.15 # Calibrated to Chandra X-ray sensitivity
v_kick_sigma = 265.0 # Kick velocity Maxwellian dispersion - Fe-core collapse
v_kick_sigma_ECS = 50.0 # Kick velocity Maxwellian dispersion - ECS
alpha = -2.35 # IMF index
##################### PARAMETER RANGES #####################
min_mass_M1 = 8.0 # in Msun
max_mass_M1 = 150.0 # in Msun
min_mass_M2 = 2.0 # in Msun
max_mass_M2 = 150.0 # in Msun
min_a = 1.0e1 # In Rsun
max_a = 1.0e5 # In Rsun
min_t = 0.0
max_t = 1.0e4 # in Myr
min_z = 5.0e-5
max_z = 0.03
ra_max = None
ra_min = None
dec_max = None
dec_min = None
################### DISTANCE TO OBJECT #################
distance = None
dist_LMC = 5.0e4 * pc_to_km # Distance to Large Magellanic Cloud (in km)
dist_SMC = 6.1e4 * pc_to_km # Distance to Small Magellanic Cloud (in km)
dist_NGC4244 = 4.3e6 * pc_to_km # Distance to NGC4244 (in km)
dist_NGC660 = 13.0e6 * pc_to_km # Distance to NGC 660 (13 Mpc in km)
|
giocalitri/django-guardian
|
refs/heads/devel
|
guardian/south_migrations/__init__.py
|
12133432
| |
tyler569/irclib
|
refs/heads/master
|
irclib/__init__.py
|
12133432
| |
YACOWS/opps
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
Metaswitch/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/metadata_defs/__init__.py
|
12133432
| |
Stratoscale/yumsnapshot
|
refs/heads/master
|
config.py
|
1
|
sources = { 'fedora' : 'rsync://dl.fedoraproject.org/fedora-enchilada/linux/%(repo)s/%(release)s/%(arch)s/',
'centos' : 'rsync://centos.eecs.wsu.edu/centos/%(release)s/%(repo)s/%(arch)s/'
}
S3_BUCKET = 's3://yumfreeze.com'
|
azureplus/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/indexes/__init__.py
|
12133432
| |
hadrianpaulo/project_deathstar
|
refs/heads/master
|
core/api/__init__.py
|
12133432
| |
alathers/projecteuler
|
refs/heads/master
|
338/338.py
|
12133432
| |
sid-kap/readthedocs.org
|
refs/heads/master
|
readthedocs/projects/__init__.py
|
12133432
| |
ajoubert-mitre/geoq
|
refs/heads/develop
|
geoq/core/migrations/__init__.py
|
12133432
| |
mlperf/training_results_v0.5
|
refs/heads/master
|
v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/staging/models/rough/ssd/async_checkpoint.py
|
36
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
Temporarily copied from tensorflow/contrib/tpu/python/tpu to work around a bug
in async_checkpoint in the 1.12 release.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
# Save the last checkpoint synchronously if needed.
if last_step != self._timer.last_triggered_step():
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
if not asynchronous:
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
Quadrocube/rep
|
refs/heads/master
|
tests/test_folding.py
|
4
|
from __future__ import division, print_function, absolute_import
import numpy
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.metrics.metrics import accuracy_score, roc_auc_score
from rep.estimators import SklearnClassifier
from rep.metaml import FoldingClassifier
from rep.test.test_estimators import generate_classification_data, check_classification_model
__author__ = 'antares'
def check_folding(classifier, check_instance=True, has_staged_pp=True, has_importances=True):
X, y, sample_weight = generate_classification_data(distance=0.6)
assert classifier == classifier.fit(X, y, sample_weight=sample_weight)
assert list(classifier.features) == list(X.columns)
check_classification_model(classifier, X, y, check_instance=check_instance, has_staged_pp=has_staged_pp,
has_importances=has_importances)
def mean_vote(x):
return numpy.mean(x, axis=0)
labels = classifier.predict(X, mean_vote)
proba = classifier.predict_proba(X, mean_vote)
assert numpy.all(proba == classifier.predict_proba(X, mean_vote))
score = accuracy_score(y, labels)
print(score)
assert score > 0.7
assert numpy.allclose(proba.sum(axis=1), 1), 'probabilities do not sum to 1'
assert numpy.all(proba >= 0.), 'negative probabilities'
auc_score = roc_auc_score(y, proba[:, 1])
print(auc_score)
assert auc_score > 0.8
if has_staged_pp:
for p in classifier.staged_predict_proba(X, mean_vote):
assert p.shape == (len(X), 2)
# checking that last iteration coincides with previous
assert numpy.all(p == proba)
def test_folding():
# base_ada = SklearnClassifier(AdaBoostClassifier())
# folding_str = FoldingClassifier(base_ada, n_folds=2)
# check_folding(folding_str, True, False, False)
base_ada = SklearnClassifier(SVC())
folding_str = FoldingClassifier(base_ada, n_folds=4)
check_folding(folding_str, True, False, False)
|
ttfseiko/openerp-trunk
|
refs/heads/master
|
openerp/addons/web/tests/test_ui.py
|
4
|
# -*- coding: utf-8 -*-
import os
import openerp.tests
class TestUi(openerp.tests.HttpCase):
def test_01_jsfile_ui_hello(self):
self.phantom_jsfile(os.path.join(os.path.dirname(__file__), 'test_ui_hello.js'))
def test_02_jsfile_ui_load(self):
self.phantom_jsfile(os.path.join(os.path.dirname(__file__), 'test_ui_load.js'))
def test_03_js_public(self):
self.phantom_js('/',"console.log('ok')","console")
def test_04_js_admin(self):
self.phantom_js('/',"console.log('ok')","openerp.client.action_manager.inner_widget.views.form", login='admin')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ashleysommer/sanic
|
refs/heads/master
|
sanic/log.py
|
4
|
import logging
import sys
LOGGING_CONFIG_DEFAULTS = dict(
version=1,
disable_existing_loggers=False,
loggers={
"sanic.root": {"level": "INFO", "handlers": ["console"]},
"sanic.error": {
"level": "INFO",
"handlers": ["error_console"],
"propagate": True,
"qualname": "sanic.error",
},
"sanic.access": {
"level": "INFO",
"handlers": ["access_console"],
"propagate": True,
"qualname": "sanic.access",
},
},
handlers={
"console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": sys.stdout,
},
"error_console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": sys.stderr,
},
"access_console": {
"class": "logging.StreamHandler",
"formatter": "access",
"stream": sys.stdout,
},
},
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
"access": {
"format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: "
+ "%(request)s %(message)s %(status)d %(byte)d",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
},
)
logger = logging.getLogger("sanic.root")
error_logger = logging.getLogger("sanic.error")
access_logger = logging.getLogger("sanic.access")
|
AlexandraMercier/RULEngine
|
refs/heads/dev
|
Communication/protobuf/google/protobuf/message.py
|
78
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = 'robinson@google.com (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Parse serialized protocol buffer data into this message.
Like MergeFromString(), except we clear the object first and
do not return the value that MergeFromString returns.
"""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
|
splav/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/more-itertools/more_itertools/tests/test_recipes.py
|
39
|
from doctest import DocTestSuite
from unittest import TestCase
from itertools import combinations
from six.moves import range
import more_itertools as mi
def load_tests(loader, tests, ignore):
# Add the doctests
tests.addTests(DocTestSuite('more_itertools.recipes'))
return tests
class AccumulateTests(TestCase):
"""Tests for ``accumulate()``"""
def test_empty(self):
"""Test that an empty input returns an empty output"""
self.assertEqual(list(mi.accumulate([])), [])
def test_default(self):
"""Test accumulate with the default function (addition)"""
self.assertEqual(list(mi.accumulate([1, 2, 3])), [1, 3, 6])
def test_bogus_function(self):
"""Test accumulate with an invalid function"""
with self.assertRaises(TypeError):
list(mi.accumulate([1, 2, 3], func=lambda x: x))
def test_custom_function(self):
"""Test accumulate with a custom function"""
self.assertEqual(
list(mi.accumulate((1, 2, 3, 2, 1), func=max)), [1, 2, 3, 3, 3]
)
class TakeTests(TestCase):
"""Tests for ``take()``"""
def test_simple_take(self):
"""Test basic usage"""
t = mi.take(5, range(10))
self.assertEqual(t, [0, 1, 2, 3, 4])
def test_null_take(self):
"""Check the null case"""
t = mi.take(0, range(10))
self.assertEqual(t, [])
def test_negative_take(self):
"""Make sure taking negative items results in a ValueError"""
self.assertRaises(ValueError, lambda: mi.take(-3, range(10)))
def test_take_too_much(self):
"""Taking more than an iterator has remaining should return what the
iterator has remaining.
"""
t = mi.take(10, range(5))
self.assertEqual(t, [0, 1, 2, 3, 4])
class TabulateTests(TestCase):
"""Tests for ``tabulate()``"""
def test_simple_tabulate(self):
"""Test the happy path"""
t = mi.tabulate(lambda x: x)
f = tuple([next(t) for _ in range(3)])
self.assertEqual(f, (0, 1, 2))
def test_count(self):
"""Ensure tabulate accepts specific count"""
t = mi.tabulate(lambda x: 2 * x, -1)
f = (next(t), next(t), next(t))
self.assertEqual(f, (-2, 0, 2))
class TailTests(TestCase):
"""Tests for ``tail()``"""
def test_greater(self):
"""Length of iterable is greather than requested tail"""
self.assertEqual(list(mi.tail(3, 'ABCDEFG')), ['E', 'F', 'G'])
def test_equal(self):
"""Length of iterable is equal to the requested tail"""
self.assertEqual(
list(mi.tail(7, 'ABCDEFG')), ['A', 'B', 'C', 'D', 'E', 'F', 'G']
)
def test_less(self):
"""Length of iterable is less than requested tail"""
self.assertEqual(
list(mi.tail(8, 'ABCDEFG')), ['A', 'B', 'C', 'D', 'E', 'F', 'G']
)
class ConsumeTests(TestCase):
"""Tests for ``consume()``"""
def test_sanity(self):
"""Test basic functionality"""
r = (x for x in range(10))
mi.consume(r, 3)
self.assertEqual(3, next(r))
def test_null_consume(self):
"""Check the null case"""
r = (x for x in range(10))
mi.consume(r, 0)
self.assertEqual(0, next(r))
def test_negative_consume(self):
"""Check that negative consumsion throws an error"""
r = (x for x in range(10))
self.assertRaises(ValueError, lambda: mi.consume(r, -1))
def test_total_consume(self):
"""Check that iterator is totally consumed by default"""
r = (x for x in range(10))
mi.consume(r)
self.assertRaises(StopIteration, lambda: next(r))
class NthTests(TestCase):
"""Tests for ``nth()``"""
def test_basic(self):
"""Make sure the nth item is returned"""
l = range(10)
for i, v in enumerate(l):
self.assertEqual(mi.nth(l, i), v)
def test_default(self):
"""Ensure a default value is returned when nth item not found"""
l = range(3)
self.assertEqual(mi.nth(l, 100, "zebra"), "zebra")
def test_negative_item_raises(self):
"""Ensure asking for a negative item raises an exception"""
self.assertRaises(ValueError, lambda: mi.nth(range(10), -3))
class AllEqualTests(TestCase):
"""Tests for ``all_equal()``"""
def test_true(self):
"""Everything is equal"""
self.assertTrue(mi.all_equal('aaaaaa'))
self.assertTrue(mi.all_equal([0, 0, 0, 0]))
def test_false(self):
"""Not everything is equal"""
self.assertFalse(mi.all_equal('aaaaab'))
self.assertFalse(mi.all_equal([0, 0, 0, 1]))
def test_tricky(self):
"""Not everything is identical, but everything is equal"""
items = [1, complex(1, 0), 1.0]
self.assertTrue(mi.all_equal(items))
def test_empty(self):
"""Return True if the iterable is empty"""
self.assertTrue(mi.all_equal(''))
self.assertTrue(mi.all_equal([]))
def test_one(self):
"""Return True if the iterable is singular"""
self.assertTrue(mi.all_equal('0'))
self.assertTrue(mi.all_equal([0]))
class QuantifyTests(TestCase):
"""Tests for ``quantify()``"""
def test_happy_path(self):
"""Make sure True count is returned"""
q = [True, False, True]
self.assertEqual(mi.quantify(q), 2)
def test_custom_predicate(self):
"""Ensure non-default predicates return as expected"""
q = range(10)
self.assertEqual(mi.quantify(q, lambda x: x % 2 == 0), 5)
class PadnoneTests(TestCase):
"""Tests for ``padnone()``"""
def test_happy_path(self):
"""wrapper iterator should return None indefinitely"""
r = range(2)
p = mi.padnone(r)
self.assertEqual([0, 1, None, None], [next(p) for _ in range(4)])
class NcyclesTests(TestCase):
"""Tests for ``nyclces()``"""
def test_happy_path(self):
"""cycle a sequence three times"""
r = ["a", "b", "c"]
n = mi.ncycles(r, 3)
self.assertEqual(
["a", "b", "c", "a", "b", "c", "a", "b", "c"],
list(n)
)
def test_null_case(self):
"""asking for 0 cycles should return an empty iterator"""
n = mi.ncycles(range(100), 0)
self.assertRaises(StopIteration, lambda: next(n))
def test_pathalogical_case(self):
"""asking for negative cycles should return an empty iterator"""
n = mi.ncycles(range(100), -10)
self.assertRaises(StopIteration, lambda: next(n))
class DotproductTests(TestCase):
"""Tests for ``dotproduct()``'"""
def test_happy_path(self):
"""simple dotproduct example"""
self.assertEqual(400, mi.dotproduct([10, 10], [20, 20]))
class FlattenTests(TestCase):
"""Tests for ``flatten()``"""
def test_basic_usage(self):
"""ensure list of lists is flattened one level"""
f = [[0, 1, 2], [3, 4, 5]]
self.assertEqual(list(range(6)), list(mi.flatten(f)))
def test_single_level(self):
"""ensure list of lists is flattened only one level"""
f = [[0, [1, 2]], [[3, 4], 5]]
self.assertEqual([0, [1, 2], [3, 4], 5], list(mi.flatten(f)))
class RepeatfuncTests(TestCase):
"""Tests for ``repeatfunc()``"""
def test_simple_repeat(self):
"""test simple repeated functions"""
r = mi.repeatfunc(lambda: 5)
self.assertEqual([5, 5, 5, 5, 5], [next(r) for _ in range(5)])
def test_finite_repeat(self):
"""ensure limited repeat when times is provided"""
r = mi.repeatfunc(lambda: 5, times=5)
self.assertEqual([5, 5, 5, 5, 5], list(r))
def test_added_arguments(self):
"""ensure arguments are applied to the function"""
r = mi.repeatfunc(lambda x: x, 2, 3)
self.assertEqual([3, 3], list(r))
def test_null_times(self):
"""repeat 0 should return an empty iterator"""
r = mi.repeatfunc(range, 0, 3)
self.assertRaises(StopIteration, lambda: next(r))
class PairwiseTests(TestCase):
"""Tests for ``pairwise()``"""
def test_base_case(self):
"""ensure an iterable will return pairwise"""
p = mi.pairwise([1, 2, 3])
self.assertEqual([(1, 2), (2, 3)], list(p))
def test_short_case(self):
"""ensure an empty iterator if there's not enough values to pair"""
p = mi.pairwise("a")
self.assertRaises(StopIteration, lambda: next(p))
class GrouperTests(TestCase):
"""Tests for ``grouper()``"""
def test_even(self):
"""Test when group size divides evenly into the length of
the iterable.
"""
self.assertEqual(
list(mi.grouper(3, 'ABCDEF')), [('A', 'B', 'C'), ('D', 'E', 'F')]
)
def test_odd(self):
"""Test when group size does not divide evenly into the length of the
iterable.
"""
self.assertEqual(
list(mi.grouper(3, 'ABCDE')), [('A', 'B', 'C'), ('D', 'E', None)]
)
def test_fill_value(self):
"""Test that the fill value is used to pad the final group"""
self.assertEqual(
list(mi.grouper(3, 'ABCDE', 'x')),
[('A', 'B', 'C'), ('D', 'E', 'x')]
)
class RoundrobinTests(TestCase):
"""Tests for ``roundrobin()``"""
def test_even_groups(self):
"""Ensure ordered output from evenly populated iterables"""
self.assertEqual(
list(mi.roundrobin('ABC', [1, 2, 3], range(3))),
['A', 1, 0, 'B', 2, 1, 'C', 3, 2]
)
def test_uneven_groups(self):
"""Ensure ordered output from unevenly populated iterables"""
self.assertEqual(
list(mi.roundrobin('ABCD', [1, 2], range(0))),
['A', 1, 'B', 2, 'C', 'D']
)
class PartitionTests(TestCase):
"""Tests for ``partition()``"""
def test_bool(self):
"""Test when pred() returns a boolean"""
lesser, greater = mi.partition(lambda x: x > 5, range(10))
self.assertEqual(list(lesser), [0, 1, 2, 3, 4, 5])
self.assertEqual(list(greater), [6, 7, 8, 9])
def test_arbitrary(self):
"""Test when pred() returns an integer"""
divisibles, remainders = mi.partition(lambda x: x % 3, range(10))
self.assertEqual(list(divisibles), [0, 3, 6, 9])
self.assertEqual(list(remainders), [1, 2, 4, 5, 7, 8])
class PowersetTests(TestCase):
"""Tests for ``powerset()``"""
def test_combinatorics(self):
"""Ensure a proper enumeration"""
p = mi.powerset([1, 2, 3])
self.assertEqual(
list(p),
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
)
class UniqueEverseenTests(TestCase):
"""Tests for ``unique_everseen()``"""
def test_everseen(self):
"""ensure duplicate elements are ignored"""
u = mi.unique_everseen('AAAABBBBCCDAABBB')
self.assertEqual(
['A', 'B', 'C', 'D'],
list(u)
)
def test_custom_key(self):
"""ensure the custom key comparison works"""
u = mi.unique_everseen('aAbACCc', key=str.lower)
self.assertEqual(list('abC'), list(u))
def test_unhashable(self):
"""ensure things work for unhashable items"""
iterable = ['a', [1, 2, 3], [1, 2, 3], 'a']
u = mi.unique_everseen(iterable)
self.assertEqual(list(u), ['a', [1, 2, 3]])
def test_unhashable_key(self):
"""ensure things work for unhashable items with a custom key"""
iterable = ['a', [1, 2, 3], [1, 2, 3], 'a']
u = mi.unique_everseen(iterable, key=lambda x: x)
self.assertEqual(list(u), ['a', [1, 2, 3]])
class UniqueJustseenTests(TestCase):
"""Tests for ``unique_justseen()``"""
def test_justseen(self):
"""ensure only last item is remembered"""
u = mi.unique_justseen('AAAABBBCCDABB')
self.assertEqual(list('ABCDAB'), list(u))
def test_custom_key(self):
"""ensure the custom key comparison works"""
u = mi.unique_justseen('AABCcAD', str.lower)
self.assertEqual(list('ABCAD'), list(u))
class IterExceptTests(TestCase):
"""Tests for ``iter_except()``"""
def test_exact_exception(self):
"""ensure the exact specified exception is caught"""
l = [1, 2, 3]
i = mi.iter_except(l.pop, IndexError)
self.assertEqual(list(i), [3, 2, 1])
def test_generic_exception(self):
"""ensure the generic exception can be caught"""
l = [1, 2]
i = mi.iter_except(l.pop, Exception)
self.assertEqual(list(i), [2, 1])
def test_uncaught_exception_is_raised(self):
"""ensure a non-specified exception is raised"""
l = [1, 2, 3]
i = mi.iter_except(l.pop, KeyError)
self.assertRaises(IndexError, lambda: list(i))
def test_first(self):
"""ensure first is run before the function"""
l = [1, 2, 3]
f = lambda: 25
i = mi.iter_except(l.pop, IndexError, f)
self.assertEqual(list(i), [25, 3, 2, 1])
class FirstTrueTests(TestCase):
"""Tests for ``first_true()``"""
def test_something_true(self):
"""Test with no keywords"""
self.assertEqual(mi.first_true(range(10)), 1)
def test_nothing_true(self):
"""Test default return value."""
self.assertEqual(mi.first_true([0, 0, 0]), False)
def test_default(self):
"""Test with a default keyword"""
self.assertEqual(mi.first_true([0, 0, 0], default='!'), '!')
def test_pred(self):
"""Test with a custom predicate"""
self.assertEqual(
mi.first_true([2, 4, 6], pred=lambda x: x % 3 == 0), 6
)
class RandomProductTests(TestCase):
"""Tests for ``random_product()``
Since random.choice() has different results with the same seed across
python versions 2.x and 3.x, these tests use highly probably events to
create predictable outcomes across platforms.
"""
def test_simple_lists(self):
"""Ensure that one item is chosen from each list in each pair.
Also ensure that each item from each list eventually appears in
the chosen combinations.
Odds are roughly 1 in 7.1 * 10e16 that one item from either list will
not be chosen after 100 samplings of one item from each list. Just to
be safe, better use a known random seed, too.
"""
nums = [1, 2, 3]
lets = ['a', 'b', 'c']
n, m = zip(*[mi.random_product(nums, lets) for _ in range(100)])
n, m = set(n), set(m)
self.assertEqual(n, set(nums))
self.assertEqual(m, set(lets))
self.assertEqual(len(n), len(nums))
self.assertEqual(len(m), len(lets))
def test_list_with_repeat(self):
"""ensure multiple items are chosen, and that they appear to be chosen
from one list then the next, in proper order.
"""
nums = [1, 2, 3]
lets = ['a', 'b', 'c']
r = list(mi.random_product(nums, lets, repeat=100))
self.assertEqual(2 * 100, len(r))
n, m = set(r[::2]), set(r[1::2])
self.assertEqual(n, set(nums))
self.assertEqual(m, set(lets))
self.assertEqual(len(n), len(nums))
self.assertEqual(len(m), len(lets))
class RandomPermutationTests(TestCase):
"""Tests for ``random_permutation()``"""
def test_full_permutation(self):
"""ensure every item from the iterable is returned in a new ordering
15 elements have a 1 in 1.3 * 10e12 of appearing in sorted order, so
we fix a seed value just to be sure.
"""
i = range(15)
r = mi.random_permutation(i)
self.assertEqual(set(i), set(r))
if i == r:
raise AssertionError("Values were not permuted")
def test_partial_permutation(self):
"""ensure all returned items are from the iterable, that the returned
permutation is of the desired length, and that all items eventually
get returned.
Sampling 100 permutations of length 5 from a set of 15 leaves a
(2/3)^100 chance that an item will not be chosen. Multiplied by 15
items, there is a 1 in 2.6e16 chance that at least 1 item will not
show up in the resulting output. Using a random seed will fix that.
"""
items = range(15)
item_set = set(items)
all_items = set()
for _ in range(100):
permutation = mi.random_permutation(items, 5)
self.assertEqual(len(permutation), 5)
permutation_set = set(permutation)
self.assertLessEqual(permutation_set, item_set)
all_items |= permutation_set
self.assertEqual(all_items, item_set)
class RandomCombinationTests(TestCase):
"""Tests for ``random_combination()``"""
def test_psuedorandomness(self):
"""ensure different subsets of the iterable get returned over many
samplings of random combinations"""
items = range(15)
all_items = set()
for _ in range(50):
combination = mi.random_combination(items, 5)
all_items |= set(combination)
self.assertEqual(all_items, set(items))
def test_no_replacement(self):
"""ensure that elements are sampled without replacement"""
items = range(15)
for _ in range(50):
combination = mi.random_combination(items, len(items))
self.assertEqual(len(combination), len(set(combination)))
self.assertRaises(
ValueError, lambda: mi.random_combination(items, len(items) + 1)
)
class RandomCombinationWithReplacementTests(TestCase):
"""Tests for ``random_combination_with_replacement()``"""
def test_replacement(self):
"""ensure that elements are sampled with replacement"""
items = range(5)
combo = mi.random_combination_with_replacement(items, len(items) * 2)
self.assertEqual(2 * len(items), len(combo))
if len(set(combo)) == len(combo):
raise AssertionError("Combination contained no duplicates")
def test_pseudorandomness(self):
"""ensure different subsets of the iterable get returned over many
samplings of random combinations"""
items = range(15)
all_items = set()
for _ in range(50):
combination = mi.random_combination_with_replacement(items, 5)
all_items |= set(combination)
self.assertEqual(all_items, set(items))
class NthCombinationTests(TestCase):
def test_basic(self):
iterable = 'abcdefg'
r = 4
for index, expected in enumerate(combinations(iterable, r)):
actual = mi.nth_combination(iterable, r, index)
self.assertEqual(actual, expected)
def test_long(self):
actual = mi.nth_combination(range(180), 4, 2000000)
expected = (2, 12, 35, 126)
self.assertEqual(actual, expected)
class PrependTests(TestCase):
def test_basic(self):
value = 'a'
iterator = iter('bcdefg')
actual = list(mi.prepend(value, iterator))
expected = list('abcdefg')
self.assertEqual(actual, expected)
def test_multiple(self):
value = 'ab'
iterator = iter('cdefg')
actual = tuple(mi.prepend(value, iterator))
expected = ('ab',) + tuple('cdefg')
self.assertEqual(actual, expected)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.