repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
MihaiMoldovanu/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_tags.py
|
75
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_tags
short_description: Module to manage tags in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage tags in oVirt/RHV. It can also manage assignments
of those tags to entities."
options:
name:
description:
- "Name of the tag to manage."
required: true
state:
description:
- "Should the tag be present/absent/attached/detached."
- "C(Note): I(attached) and I(detached) states are supported since version 2.4."
choices: ['present', 'absent', 'attached', 'detached']
default: present
description:
description:
- "Description of the tag to manage."
parent:
description:
- "Name of the parent tag."
vms:
description:
- "List of the VMs names, which should have assigned this tag."
hosts:
description:
- "List of the hosts names, which should have assigned this tag."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign tag to vms vm1 and vm2:
- ovirt_tags:
name: mytag
vms:
- vm1
- vm2
# Attach a tag to VM 'vm1', keeping the rest already attached tags on VM:
- ovirt_tags:
name: mytag
state: attached
vms:
- vm3
# Detach a tag from VM 'vm1', keeping the rest already attached tags on VM:
- ovirt_tags:
name: mytag
state: detached
vms:
- vm3
# To detach all VMs from tag:
- ovirt_tags:
name: mytag
vms: []
# Remove tag
- ovirt_tags:
state: absent
name: mytag
'''
RETURN = '''
id:
description: ID of the tag which is managed
returned: On success if tag is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
tag:
description: "Dictionary of all the tag attributes. Tag attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
returned: On success if tag is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_id_by_name,
ovirt_full_argument_spec,
)
class TagsModule(BaseModule):
def build_entity(self):
return otypes.Tag(
name=self._module.params['name'],
description=self._module.params['description'],
parent=otypes.Tag(
name=self._module.params['parent'],
) if self._module.params['parent'] else None,
)
def post_create(self, entity):
self.update_check(entity)
def _update_tag_assignments(self, entity, name):
if self._module.params[name] is None:
return
state = self.param('state')
entities_service = getattr(self._connection.system_service(), '%s_service' % name)()
current_vms = [
vm.name
for vm in entities_service.list(search='tag=%s' % self._module.params['name'])
]
# Assign tags:
if state in ['present', 'attached', 'detached']:
for entity_name in self._module.params[name]:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
current_tags = [tag.name for tag in tags_service.list()]
# Assign the tag:
if state in ['attached', 'present']:
if self._module.params['name'] not in current_tags:
if not self._module.check_mode:
tags_service.add(
tag=otypes.Tag(
name=self._module.params['name'],
),
)
self.changed = True
# Detach the tag:
elif state == 'detached':
if self._module.params['name'] in current_tags:
tag_id = get_id_by_name(tags_service, self.param('name'))
if not self._module.check_mode:
tags_service.tag_service(tag_id).remove()
self.changed = True
# Unassign tags:
if state == 'present':
for entity_name in [e for e in current_vms if e not in self._module.params[name]]:
if not self._module.check_mode:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
tag_id = get_id_by_name(tags_service, self.param('name'))
tags_service.tag_service(tag_id).remove()
self.changed = True
def _get_parent(self, entity):
parent = None
if entity.parent:
parent = self._connection.follow_link(entity.parent).name
return parent
def update_check(self, entity):
self._update_tag_assignments(entity, 'vms')
self._update_tag_assignments(entity, 'hosts')
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('parent'), self._get_parent(entity))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'attached', 'detached'],
default='present',
),
name=dict(default=None, required=True),
description=dict(default=None),
parent=dict(default=None),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
tags_service = connection.system_service().tags_service()
tags_module = TagsModule(
connection=connection,
module=module,
service=tags_service,
)
state = module.params['state']
if state in ['present', 'attached', 'detached']:
ret = tags_module.create()
elif state == 'absent':
ret = tags_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
hndrewaall/league
|
refs/heads/master
|
app/league/admin/__init__.py
|
4
|
# -*- coding: utf-8 -*-
"""The admin module."""
from . import views, utils # noqa
|
Gateworks/platform-external-chromium_org
|
refs/heads/imx_kk4.4.3_2.0.0-beta
|
tools/telemetry/telemetry/page/page_test.py
|
23
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.page import test_expectations
from telemetry.page.actions import all_page_actions
from telemetry.page.actions import interact
from telemetry.page.actions import navigate
from telemetry.page.actions import page_action
def _GetActionFromData(action_data):
action_name = action_data['action']
action = all_page_actions.FindClassWithName(action_name)
if not action:
logging.critical('Could not find an action named %s.', action_name)
logging.critical('Check the page set for a typo and check the error '
'log for possible Python loading/compilation errors.')
raise Exception('Action "%s" not found.' % action_name)
return action(action_data)
def GetCompoundActionFromPage(page, action_name, interactive=False):
if interactive:
return [interact.InteractAction()]
if not action_name:
return []
action_data_list = getattr(page, action_name)
if not isinstance(action_data_list, list):
action_data_list = [action_data_list]
action_list = []
for subaction_data in action_data_list:
subaction_name = subaction_data['action']
if hasattr(page, subaction_name):
subaction = GetCompoundActionFromPage(page, subaction_name, interactive)
else:
subaction = [_GetActionFromData(subaction_data)]
action_list += subaction * subaction_data.get('repeat', 1)
return action_list
class Failure(Exception):
"""Exception that can be thrown from PageMeasurement to indicate an
undesired but designed-for problem."""
pass
class PageTest(object):
"""A class styled on unittest.TestCase for creating page-specific tests."""
def __init__(self,
test_method_name,
action_name_to_run='',
needs_browser_restart_after_each_run=False,
discard_first_result=False,
clear_cache_before_each_run=False):
self.options = None
try:
self._test_method = getattr(self, test_method_name)
except AttributeError:
raise ValueError, 'No such method %s.%s' % (
self.__class_, test_method_name) # pylint: disable=E1101
self._action_name_to_run = action_name_to_run
self._needs_browser_restart_after_each_run = (
needs_browser_restart_after_each_run)
self._discard_first_result = discard_first_result
self._clear_cache_before_each_run = clear_cache_before_each_run
self._close_tabs_before_run = True
# If the test overrides the TabForPage method, it is considered a multi-tab
# test. The main difference between this and a single-tab test is that we
# do not attempt recovery for the former if a tab or the browser crashes,
# because we don't know the current state of tabs (how many are open, etc.)
self.is_multi_tab_test = (self.__class__ is not PageTest and
self.TabForPage.__func__ is not
self.__class__.__bases__[0].TabForPage.__func__)
# _exit_requested is set to true when the test requests an early exit.
self._exit_requested = False
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
@discard_first_result.setter
def discard_first_result(self, discard):
self._discard_first_result = discard
@property
def clear_cache_before_each_run(self):
"""When set to True, the browser's disk and memory cache will be cleared
before each run."""
return self._clear_cache_before_each_run
@property
def close_tabs_before_run(self):
"""When set to True, all tabs are closed before running the test for the
first time."""
return self._close_tabs_before_run
@close_tabs_before_run.setter
def close_tabs_before_run(self, close_tabs):
self._close_tabs_before_run = close_tabs
def NeedsBrowserRestartAfterEachRun(self, browser): # pylint: disable=W0613
"""Override to specify browser restart after each run."""
return self._needs_browser_restart_after_each_run
def AddCommandLineOptions(self, parser):
"""Override to expose command-line options for this test.
The provided parser is an optparse.OptionParser instance and accepts all
normal results. The parsed options are available in Run as
self.options."""
pass
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
pass
def CustomizeBrowserOptionsForPage(self, page, options):
"""Add options specific to the test and the given page."""
if not self.CanRunForPage(page):
return
interactive = options and options.interactive
for action in GetCompoundActionFromPage(
page, self._action_name_to_run, interactive):
action.CustomizeBrowserOptions(options)
def WillStartBrowser(self, browser):
"""Override to manipulate the browser environment before it launches."""
pass
def DidStartBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
pass
def CanRunForPage(self, page): # pylint: disable=W0613
"""Override to customize if the test can be ran for the given page."""
return True
def WillRunTest(self):
"""Override to do operations before the page set(s) are navigated."""
pass
def DidRunTest(self, browser, results):
"""Override to do operations after all page set(s) are completed.
This will occur before the browser is torn down.
"""
pass
def WillRunPageRepeats(self, page):
"""Override to do operations before each page is iterated over."""
pass
def DidRunPageRepeats(self, page):
"""Override to do operations after each page is iterated over."""
pass
def DidStartHTTPServer(self, tab):
"""Override to do operations after the HTTP server is started."""
pass
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated, notably Telemetry
will already have performed the following operations on the browser before
calling this function:
* Ensure only one tab is open.
* Call WaitForDocumentReadyStateToComplete on the tab."""
pass
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated and after
all waiting for completion has occurred."""
pass
def WillRunActions(self, page, tab):
"""Override to do operations before running the actions on the page."""
pass
def DidRunActions(self, page, tab):
"""Override to do operations after running the actions on the page."""
pass
def WillRunAction(self, page, tab, action):
"""Override to do operations before running the action on the page."""
pass
def DidRunAction(self, page, tab, action):
"""Override to do operations after running the action on the page."""
pass
def CreatePageSet(self, args, options): # pylint: disable=W0613
"""Override to make this test generate its own page set instead of
allowing arbitrary page sets entered from the command-line."""
return None
def CreateExpectations(self, page_set): # pylint: disable=W0613
"""Override to make this test generate its own expectations instead of
any that may have been defined in the page set."""
return test_expectations.TestExpectations()
def TabForPage(self, page, browser): # pylint: disable=W0613
"""Override to select a different tab for the page. For instance, to
create a new tab for every page, return browser.tabs.New()."""
return browser.tabs[0]
def ValidatePageSet(self, page_set):
"""Override to examine the page set before the test run. Useful for
example to validate that the pageset can be used with the test."""
pass
def Run(self, options, page, tab, results):
self.options = options
interactive = options and options.interactive
compound_action = GetCompoundActionFromPage(
page, self._action_name_to_run, interactive)
self.WillRunActions(page, tab)
self._RunCompoundAction(page, tab, compound_action)
self.DidRunActions(page, tab)
try:
self._test_method(page, tab, results)
finally:
self.options = None
def _RunCompoundAction(self, page, tab, actions, run_setup_methods=True):
for i, action in enumerate(actions):
prev_action = actions[i - 1] if i > 0 else None
next_action = actions[i + 1] if i < len(actions) - 1 else None
if (action.RunsPreviousAction() and
next_action and next_action.RunsPreviousAction()):
raise page_action.PageActionFailed('Consecutive actions cannot both '
'have RunsPreviousAction() == True.')
if not (next_action and next_action.RunsPreviousAction()):
action.WillRunAction(page, tab)
if run_setup_methods:
self.WillRunAction(page, tab, action)
try:
action.RunAction(page, tab, prev_action)
finally:
if run_setup_methods:
self.DidRunAction(page, tab, action)
# Note that we must not call util.CloseConnections here. Many tests
# navigate to a URL in the first action and then wait for a condition
# in the second action. Calling util.CloseConnections here often
# aborts resource loads performed by the page.
def RunNavigateSteps(self, page, tab):
"""Navigates the tab to the page URL attribute.
Runs the 'navigate_steps' page attribute as a compound action.
"""
navigate_actions = GetCompoundActionFromPage(page, 'navigate_steps')
if not any(isinstance(action, navigate.NavigateAction)
for action in navigate_actions):
raise page_action.PageActionFailed(
'No NavigateAction in navigate_steps')
self._RunCompoundAction(page, tab, navigate_actions, False)
def IsExiting(self):
return self._exit_requested
def RequestExit(self):
self._exit_requested = True
@property
def action_name_to_run(self):
return self._action_name_to_run
|
durai145/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/tvplay.py
|
86
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
parse_iso8601,
qualities,
)
class TVPlayIE(InfoExtractor):
IE_DESC = 'TV3Play and related services'
_VALID_URL = r'''(?x)http://(?:www\.)?
(?:tvplay\.lv/parraides|
tv3play\.lt/programos|
play\.tv3\.lt/programos|
tv3play\.ee/sisu|
tv3play\.se/program|
tv6play\.se/program|
tv8play\.se/program|
tv10play\.se/program|
tv3play\.no/programmer|
viasat4play\.no/programmer|
tv6play\.no/programmer|
tv3play\.dk/programmer|
play\.novatv\.bg/programi
)/[^/]+/(?P<id>\d+)
'''
_TESTS = [
{
'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
'info_dict': {
'id': '418113',
'ext': 'flv',
'title': 'Kādi ir īri? - Viņas melo labāk',
'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.',
'duration': 25,
'timestamp': 1406097056,
'upload_date': '20140723',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://play.tv3.lt/programos/moterys-meluoja-geriau/409229?autostart=true',
'info_dict': {
'id': '409229',
'ext': 'flv',
'title': 'Moterys meluoja geriau',
'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e',
'duration': 1330,
'timestamp': 1403769181,
'upload_date': '20140626',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true',
'info_dict': {
'id': '238551',
'ext': 'flv',
'title': 'Kodu keset linna 398537',
'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701',
'duration': 1257,
'timestamp': 1292449761,
'upload_date': '20101215',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true',
'info_dict': {
'id': '395385',
'ext': 'flv',
'title': 'Husräddarna S02E07',
'description': 'md5:f210c6c89f42d4fc39faa551be813777',
'duration': 2574,
'timestamp': 1400596321,
'upload_date': '20140520',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true',
'info_dict': {
'id': '266636',
'ext': 'flv',
'title': 'Den sista dokusåpan S01E08',
'description': 'md5:295be39c872520221b933830f660b110',
'duration': 1492,
'timestamp': 1330522854,
'upload_date': '20120229',
'age_limit': 18,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true',
'info_dict': {
'id': '282756',
'ext': 'flv',
'title': 'Antikjakten S01E10',
'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8',
'duration': 2646,
'timestamp': 1348575868,
'upload_date': '20120925',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true',
'info_dict': {
'id': '230898',
'ext': 'flv',
'title': 'Anna Anka søker assistent - Ep. 8',
'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474',
'duration': 2656,
'timestamp': 1277720005,
'upload_date': '20100628',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true',
'info_dict': {
'id': '21873',
'ext': 'flv',
'title': 'Budbringerne program 10',
'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d',
'duration': 1297,
'timestamp': 1254205102,
'upload_date': '20090929',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true',
'info_dict': {
'id': '361883',
'ext': 'flv',
'title': 'Hotelinspektør Alex Polizzi - Ep. 10',
'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81',
'duration': 2594,
'timestamp': 1393236292,
'upload_date': '20140224',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://play.novatv.bg/programi/zdravei-bulgariya/624952?autostart=true',
'info_dict': {
'id': '624952',
'ext': 'flv',
'title': 'Здравей, България (12.06.2015 г.) ',
'description': 'md5:99f3700451ac5bb71a260268b8daefd7',
'duration': 8838,
'timestamp': 1434100372,
'upload_date': '20150612',
},
'params': {
# rtmp download
'skip_download': True,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
if video['is_geo_blocked']:
self.report_warning(
'This content might not be available in your country due to copyright reasons')
streams = self._download_json(
'http://playapi.mtgx.tv/v1/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON')
quality = qualities(['hls', 'medium', 'high'])
formats = []
for format_id, video_url in streams['streams'].items():
if not video_url or not isinstance(video_url, compat_str):
continue
fmt = {
'format_id': format_id,
'preference': quality(format_id),
}
if video_url.startswith('rtmp'):
m = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
if not m:
continue
fmt.update({
'ext': 'flv',
'url': m.group('url'),
'app': m.group('app'),
'play_path': m.group('playpath'),
})
elif video_url.endswith('.f4m'):
formats.extend(self._extract_f4m_formats(
video_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id))
continue
else:
fmt.update({
'url': video_url,
})
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'title': video['title'],
'description': video['description'],
'duration': video['duration'],
'timestamp': parse_iso8601(video['created_at']),
'view_count': video['views']['total'],
'age_limit': video.get('age_limit', 0),
'formats': formats,
}
|
tanmaykm/edx-platform
|
refs/heads/master
|
lms/djangoapps/verified_track_content/views.py
|
26
|
"""
View methods for verified track content.
"""
from util.json_request import expect_json, JsonResponse
from django.contrib.auth.decorators import login_required
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access
from verified_track_content.models import VerifiedTrackCohortedCourse
@expect_json
@login_required
def cohorting_settings(request, course_key_string):
"""
The handler for verified track cohorting requests.
This will raise 404 if user is not staff.
Returns a JSON representation of whether or not the course has verified track cohorting enabled.
The "verified_cohort_name" field will only be present if "enabled" is True.
Example:
>>> example = {
>>> "enabled": True,
>>> "verified_cohort_name" : "Micromasters"
>>> }
"""
course_key = CourseKey.from_string(course_key_string)
get_course_with_access(request.user, 'staff', course_key)
settings = {}
verified_track_cohort_enabled = VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key)
settings['enabled'] = verified_track_cohort_enabled
if verified_track_cohort_enabled:
settings['verified_cohort_name'] = VerifiedTrackCohortedCourse.verified_cohort_name_for_course(course_key)
return JsonResponse(settings)
|
jejimenez/django
|
refs/heads/master
|
tests/utils_tests/test_numberformat.py
|
307
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3,
thousand_sep='comma', force_grouping=True),
'10comma000')
def test_large_number(self):
most_max = ('{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}')
most_max2 = ('{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736')
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super(EuroDecimal, self).__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
|
vjmac15/Lyilis
|
refs/heads/master
|
lib/emoji/core.py
|
1
|
# -*- coding: UTF-8 -*-
"""
emoji.core
~~~~~~~~~~
Core components for emoji.
"""
import re
import sys
from emoji import unicode_codes
__all__ = ['emojize', 'demojize', 'get_emoji_regexp']
PY2 = sys.version_info[0] is 2
_EMOJI_REGEXP = None
_DEFAULT_DELIMITER = ":"
def emojize(string, use_aliases=False, delimiters=(_DEFAULT_DELIMITER,_DEFAULT_DELIMITER)):
"""Replace emoji names in a string with unicode codes.
:param string: String contains emoji names.
:param use_aliases: (optional) Enable emoji aliases. See ``emoji.UNICODE_EMOJI_ALIAS``.
:param delimiters: (optional) Use delimiters other than _DEFAULT_DELIMITER
>>> import emoji
>>> print(emoji.emojize("Python is fun :thumbsup:", use_aliases=True))
Python is fun 👍
>>> print(emoji.emojize("Python is fun :thumbs_up_sign:"))
Python is fun 👍
>>> print(emoji.emojize("Python is fun __thumbs_up_sign__", delimiters = ("__", "__")))
Python is fun 👍
"""
pattern = re.compile(u'(%s[a-zA-Z0-9\+\-_&.ô’Åéãíç()!#*]+%s)' % delimiters)
def replace(match):
mg = match.group(1).replace(delimiters[0], _DEFAULT_DELIMITER).replace(delimiters[1], _DEFAULT_DELIMITER)
if use_aliases:
return unicode_codes.EMOJI_ALIAS_UNICODE.get(mg, mg)
else:
return unicode_codes.EMOJI_UNICODE.get(mg, mg)
return pattern.sub(replace, string)
def demojize(string, delimiters=(_DEFAULT_DELIMITER,_DEFAULT_DELIMITER)):
"""Replace unicode emoji in a string with emoji shortcodes. Useful for storage.
:param string: String contains unicode characters. MUST BE UNICODE.
:param delimiters: (optional) User delimiters other than _DEFAULT_DELIMITER
>>> import emoji
>>> print(emoji.emojize("Python is fun :thumbs_up_sign:"))
Python is fun 👍
>>> print(emoji.demojize(u"Python is fun 👍"))
Python is fun :thumbs_up_sign:
>>> print(emoji.demojize("Unicode is tricky 😯".decode('utf-8')))
Unicode is tricky :hushed_face:
>>> print(emoji.demojize("Unicode is tricky 😯".decode('utf-8'), delimiters=(" __", "__ ")))
Unicode is tricky :hushed_face:
"""
def replace(match):
val = unicode_codes.UNICODE_EMOJI.get(match.group(0), match.group(0))
return delimiters[0] + val[1:-1] + delimiters[1]
return get_emoji_regexp().sub(replace, string)
def get_emoji_regexp():
"""Returns compiled regular expression that matches emojis defined in
``emoji.UNICODE_EMOJI_ALIAS``. The regular expression is only compiled once.
"""
global _EMOJI_REGEXP
# Build emoji regexp once
if _EMOJI_REGEXP is None:
# Sort emojis by length to make sure mulit-character emojis are
# matched first
emojis = sorted(unicode_codes.EMOJI_UNICODE.values(), key=len,
reverse=True)
pattern = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'
_EMOJI_REGEXP = re.compile(pattern)
return _EMOJI_REGEXP
|
40223136/2015w11
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/etree/__init__.py
|
1200
|
# $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
|
fnavarrogonzalez/RankingEmpresas
|
refs/heads/master
|
rankingempresas/tests.py
|
1
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import Permission, User
from django.contrib.auth import authenticate, login
from rankingempresas.models import *
from django.test import Client
# Create your tests here.
class SomeViewsTest(TestCase):
def setUp(self):
user = User.objects.create_user("prueba", "prueba@prueba.com", 123123)
company = Company.objects.create(name=u'compañia de prueba', address="calle falsa 123", telephone="56456156",
email="prueba@compañiadeprueba.com")
def test_login(self):
c = Client()
response = self.client.get('/students/login/')
self.assertEqual(response.status_code, 200)
response = c.post('/students/login/', {'username': 'prueba'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['error'], "No has introducido usuario/contraseña")
response = c.post('/students/login/', {'username': 'prueba', 'password':"123123"})
#if login sucessfull redirecto to ranking
self.assertEqual(response.status_code, 302)
def test_company(self):
c = Client()
response = self.client.get('/company/1/view/')
self.assertEqual(response.status_code, 200)
self.assertEquals(response.context['name'], u'compañia de prueba')
self.assertEquals(response.context['address'], "calle falsa 123")
self.assertEquals(response.context['telephone'], "56456156")
self.assertEquals(response.context['email'], u'prueba@compañiadeprueba.com')
def test_vote(self):
c = Client()
response = self.client.get('/company/1/score/add/')
self.assertEqual(response.status_code, 200)
response = c.post('/company/1/score/add/', {'mark': 20, 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 200)
self.assertEquals(response.context['error'], "Debes estar autenticado!")
response = c.post('/students/login/', {'username': 'prueba', 'password':"123123"})
self.assertEqual(response.status_code, 302)
#with minus than 1
response = c.post('/company/1/score/add/', {'mark': '-5', 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 200)
self.assertEquals(response.context['error'], "La nota debe ser entre 1 y 10")
#with more than 10
response = c.post('/company/1/score/add/', {'mark': 20, 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 200)
self.assertEquals(response.context['error'], "La nota debe ser entre 1 y 10")
response = c.post('/company/1/score/add/', {'mark': 'hola', 'comment':'Estuvo de luho premoh!'})
#with a string
self.assertEqual(response.status_code, 200)
self.assertEquals(response.context['error'], "La nota debe ser un entero")
#score good
response = c.post('/company/1/score/add/', {'mark': '10', 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 302)
#try another vote
response = c.post('/company/1/score/add/', {'mark': '10', 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 200)
self.assertEquals(response.context['error'], "Ya has votado")
def test_ranking(self):
c = Client()
response = c.post('/ranking/', {'mark': '10', 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 200)
company = response.context['companies'][0]
self.assertEquals(company.name, u'compañia de prueba')
self.assertEquals(company.total_score, 0)
self.assertEquals(company.number_votes, 0)
response = c.post('/students/login/', {'username': 'prueba', 'password':"123123"})
self.assertEqual(response.status_code, 302)
response = c.post('/company/1/score/add/', {'mark': '10', 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 302)
response = c.post('/ranking/', {'mark': '10', 'comment':'Estuvo de luho premoh!'})
self.assertEqual(response.status_code, 200)
company = response.context['companies'][0]
self.assertEquals(company.name, u'compañia de prueba')
self.assertEquals(company.total_score, 10)
self.assertEquals(company.number_votes, 1)
|
Stanford-Online/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/content/course_overviews/migrations/0001_initial.py
|
13
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
from opaque_keys.edx.django.models import CourseKeyField, UsageKeyField
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CourseOverview',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('version', models.IntegerField()),
('id', CourseKeyField(max_length=255, serialize=False, primary_key=True, db_index=True)),
('_location', UsageKeyField(max_length=255)),
('display_name', models.TextField(null=True)),
('display_number_with_default', models.TextField()),
('display_org_with_default', models.TextField()),
('start', models.DateTimeField(null=True)),
('end', models.DateTimeField(null=True)),
('advertised_start', models.TextField(null=True)),
('course_image_url', models.TextField()),
('facebook_url', models.TextField(null=True)),
('social_sharing_url', models.TextField(null=True)),
('end_of_course_survey_url', models.TextField(null=True)),
('certificates_display_behavior', models.TextField(null=True)),
('certificates_show_before_end', models.BooleanField(default=False)),
('cert_html_view_enabled', models.BooleanField(default=False)),
('has_any_active_web_certificate', models.BooleanField(default=False)),
('cert_name_short', models.TextField()),
('cert_name_long', models.TextField()),
('lowest_passing_grade', models.DecimalField(null=True, max_digits=5, decimal_places=2)),
('days_early_for_beta', models.FloatField(null=True)),
('mobile_available', models.BooleanField(default=False)),
('visible_to_staff_only', models.BooleanField(default=False)),
('_pre_requisite_courses_json', models.TextField()),
('enrollment_start', models.DateTimeField(null=True)),
('enrollment_end', models.DateTimeField(null=True)),
('enrollment_domain', models.TextField(null=True)),
('invitation_only', models.BooleanField(default=False)),
('max_student_enrollments_allowed', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='CourseOverviewTab',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tab_id', models.CharField(max_length=50)),
('course_overview', models.ForeignKey(related_name='tabs', to='course_overviews.CourseOverview', on_delete=models.CASCADE)),
],
),
]
|
sbidoul/odoo
|
refs/heads/8.0
|
addons/survey/wizard/__init__.py
|
385
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey_email_compose_message
|
mozvip/CouchPotatoServer
|
refs/heads/develop
|
libs/requests/packages/chardet/mbcsgroupprober.py
|
236
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from charsetgroupprober import CharSetGroupProber
from utf8prober import UTF8Prober
from sjisprober import SJISProber
from eucjpprober import EUCJPProber
from gb2312prober import GB2312Prober
from euckrprober import EUCKRProber
from big5prober import Big5Prober
from euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
Big5Prober(),
EUCTWProber()]
self.reset()
|
ayoubserti/winpty
|
refs/heads/master
|
build-gyp/test/mac/gyptest-non-strs-flattened-to-env.py
|
34
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that list xcode_settings are flattened before being exported to the
environment.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'non-strs-flattened-to-env'
INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, '''\
\t<key>My Variable</key>
\t<string>some expansion</string>''')
test.must_contain(info_plist, '''\
\t<key>CFlags</key>
\t<string>-fstack-protector-all -fno-strict-aliasing -DS="A Space"</string>''')
test.pass_test()
|
frouty/odoo_oph
|
refs/heads/dev_70
|
openerp/tools/import_email.py
|
105
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os, sys
import re
import smtplib
import email, mimetypes
from email.Header import decode_header
from email.MIMEText import MIMEText
import xmlrpclib
warn_msg = """
Bonjour,
Le message avec le sujet "%s" n'a pu être archivé dans l'ERP.
""".decode('utf-8')
class EmailParser(object):
def __init__(self, headers, dispatcher):
self.headers = headers
self.dispatcher = dispatcher
def parse(self, msg):
dispatcher((self.headers, msg))
class CommandDispatcher(object):
def __init__(self, receiver):
self.receiver = receiver
def __call__(self, request):
return self.receiver(request)
class RPCProxy(object):
def __init__(self, uid, passwd, host='localhost', port=8069, path='object'):
self.rpc = xmlrpclib.ServerProxy('http://%s:%s/%s' % (host, port, path))
self.user_id = uid
self.passwd = passwd
def __call__(self, request):
return self.rpc.execute(self.user_id, self.passwd, *request)
class ReceiverEmail2Event(object):
email_re = re.compile(r"""
([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part
@ # mandatory @ sign
[a-zA-Z0-9][\w\.-]* # domain must start with a letter
\.
[a-z]{2,3} # TLD
)
""", re.VERBOSE)
project_re = re.compile(r"^ *\[?(\d{4}\.?\d{0,3})\]?", re.UNICODE)
def __init__(self, rpc):
self.rpc = rpc
def get_addresses(self, headers, msg):
hcontent = ''
for header in [h for h in headers if msg.has_key(h)]:
hcontent += msg[header]
return self.email_re.findall(hcontent)
def get_partners(self, headers, msg):
alladdresses = self.get_addresses(headers, msg)
address_ids = self.rpc(('res.partner', 'search', [('email', 'in', alladdresses)]))
addresses = self.rpc(('res.partner', 'read', address_ids))
return [x['partner_id'][0] for x in addresses]
def __call__(self, request):
headers, msg = request
partners = self.get_partners(headers, msg)
subject = u''
for string, charset in decode_header(msg['Subject']):
if charset:
subject += string.decode(charset)
else:
subject += unicode(string)
if partners:
self.save_mail(msg, subject, partners)
else:
warning = MIMEText((warn_msg % (subject,)).encode('utf-8'), 'plain', 'utf-8')
warning['Subject'] = 'Message de OpenERP'
warning['From'] = 'erp@steel-sa.com'
warning['To'] = msg['From']
s = smtplib.SMTP()
s.connect()
s.sendmail('erp@steel-sa.com', self.email_re.findall(msg['From']), warning.as_string())
s.close()
if msg.is_multipart():
for message in [m for m in msg.get_payload() if m.get_content_type() == 'message/rfc822']:
self((headers, message.get_payload()[0]))
def save_mail(self, msg, subject, partners):
counter, description = 1, u''
if msg.is_multipart():
for part in msg.get_payload():
stockdir = os.path.join('emails', msg['Message-Id'][1:-1])
newdir = os.path.join('/tmp', stockdir)
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_type())
if not ext:
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
if part.get_content_maintype() == 'multipart':
continue
elif part.get_content_maintype() == 'text':
if part.get_content_subtype() == 'plain':
description += part.get_payload(decode=1).decode(part.get_charsets()[0])
description += u'\n\nVous trouverez les éventuels fichiers dans le répertoire: %s' % stockdir
continue
else:
description += u'\n\nCe message est en "%s", vous trouverez ce texte dans le répertoire: %s' % (part.get_content_type(), stockdir)
elif part.get_content_type() == 'message/rfc822':
continue
if not os.path.isdir(newdir):
os.mkdir(newdir)
counter += 1
fd = file(os.path.join(newdir, filename), 'w')
fd.write(part.get_payload(decode=1))
fd.close()
else:
description = msg.get_payload(decode=1).decode(msg.get_charsets()[0])
project = self.project_re.search(subject)
if project:
project = project.groups()[0]
else:
project = ''
for partner in partners:
self.rpc(('res.partner.event', 'create', {'name' : subject, 'partner_id' : partner, 'description' : description, 'project' : project}))
if __name__ == '__main__':
rpc_dispatcher = CommandDispatcher(RPCProxy(4, 'admin'))
dispatcher = CommandDispatcher(ReceiverEmail2Event(rpc_dispatcher))
parser = EmailParser(['To', 'Cc', 'From'], dispatcher)
parser.parse(email.message_from_file(sys.stdin))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
AxelDelmas/ansible
|
refs/heads/devel
|
lib/ansible/vars/__init__.py
|
15
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import defaultdict
from collections import MutableMapping
from jinja2.exceptions import UndefinedError
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError
from ansible.parsing import DataLoader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.debug import debug
from ansible.utils.vars import combine_vars
from ansible.vars.hostvars import HostVars
from ansible.vars.unsafe_proxy import UnsafeProxy
CACHED_VARS = dict()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
class VariableManager:
def __init__(self):
self._fact_cache = FactCache()
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
if play:
play_id = play._uuid
host_id = "NONE"
if host:
host_id = host.get_name()
task_id = "NONE"
if task:
task_id = task._uuid
return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id)
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
@extra_vars.setter
def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, MutableMapping)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
def _preprocess_vars(self, a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [ a ]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
'''
debug("in VariableManager get_vars()")
cache_entry = self._get_cache_entry(play=play, host=host, task=task)
if cache_entry in CACHED_VARS and use_cache:
debug("vars are cached, returning them now")
return CACHED_VARS[cache_entry]
all_vars = defaultdict(dict)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_default_vars())
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task and task._role is not None:
all_vars = combine_vars(all_vars, task._role.get_default_vars())
if host:
# next, if a host is specified, we load any vars from group_vars
# files and then any vars from host_vars files which may apply to
# this host or the groups it belongs to
# we merge in vars from groups specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_group_vars())
# then we merge in the special 'all' group_vars first, if they exist
if 'all' in self._group_vars_files:
data = preprocess_vars(self._group_vars_files['all'])
for item in data:
all_vars = combine_vars(all_vars, item)
for group in host.get_groups():
if group.name in self._group_vars_files and group.name != 'all':
for data in self._group_vars_files[group.name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# then we merge in vars from the host specified in the inventory (INI or script)
all_vars = combine_vars(all_vars, host.get_vars())
# then we merge in the host_vars/<hostname> file, if it exists
host_name = host.get_name()
if host_name in self._host_vars_files:
for data in self._host_vars_files[host_name]:
data = preprocess_vars(data)
for item in data:
all_vars = combine_vars(all_vars, item)
# finally, the facts caches for this host, if it exists
try:
host_facts = self._fact_cache.get(host.name, dict())
for k in host_facts.keys():
if host_facts[k] is not None and not isinstance(host_facts[k], UnsafeProxy):
host_facts[k] = UnsafeProxy(host_facts[k])
all_vars = combine_vars(all_vars, host_facts)
except KeyError:
pass
if play:
all_vars = combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
try:
# create a set of temporary vars here, which incorporate the
# extra vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
templar = Templar(loader=loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = templar.template(vars_file_item)
if not isinstance(vars_file_list, list):
vars_file_list = [ vars_file_list ]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
for vars_file in vars_file_list:
data = preprocess_vars(loader.load_from_file(vars_file))
if data is not None:
for item in data:
all_vars = combine_vars(all_vars, item)
break
else:
raise AnsibleError("vars file %s was not found" % vars_file_item)
except UndefinedError:
continue
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_vars())
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_vars())
all_vars = combine_vars(all_vars, task.get_vars())
if host:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
all_vars = combine_vars(all_vars, self._extra_vars)
# FIXME: make sure all special vars are here
# Finally, we create special vars
all_vars['playbook_dir'] = loader.get_basedir()
if host:
all_vars['groups'] = [group.name for group in host.get_groups()]
if self._inventory is not None:
all_vars['groups'] = self._inventory.groups_list()
if include_hostvars:
hostvars = HostVars(vars_manager=self, play=play, inventory=self._inventory, loader=loader)
all_vars['hostvars'] = hostvars
if task:
if task._role:
all_vars['role_path'] = task._role._role_path
if self._inventory is not None:
all_vars['inventory_dir'] = self._inventory.basedir()
if play:
# add the list of hosts in the play, as adjusted for limit/filters
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
# however this would take work in the templating engine, so for now
# we'll add both so we can give users something transitional to use
host_list = [x.name for x in self._inventory.get_hosts()]
all_vars['play_hosts'] = host_list
all_vars['ansible_play_hosts'] = host_list
# the 'omit' value alows params to be left out if the variable they are based on is undefined
all_vars['omit'] = self._omit_token
all_vars['ansible_version'] = CLI.version_info(gitinfo=False)
if 'hostvars' in all_vars and host:
all_vars['vars'] = all_vars['hostvars'][host.get_name()]
#CACHED_VARS[cache_entry] = all_vars
debug("done with get_vars()")
return all_vars
def _get_inventory_basename(self, path):
'''
Returns the basename minus the extension of the given path, so the
bare filename can be matched against host/group names later
'''
(name, ext) = os.path.splitext(os.path.basename(path))
if ext not in ('.yml', '.yaml'):
return os.path.basename(path)
else:
return name
def _load_inventory_file(self, path, loader):
'''
helper function, which loads the file and gets the
basename of the file without the extension
'''
if loader.is_directory(path):
data = dict()
try:
names = loader.list_directory(path)
except os.error as err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
# evaluate files in a stable order rather than whatever
# order the filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
if results is not None:
data = combine_vars(data, results)
else:
file_name, ext = os.path.splitext(path)
data = None
if not ext or ext not in C.YAML_FILENAME_EXTENSIONS:
for test_ext in C.YAML_FILENAME_EXTENSIONS:
new_path = path + test_ext
if loader.path_exists(new_path):
data = loader.load_from_file(new_path)
break
else:
if loader.path_exists(path):
data = loader.load_from_file(path)
name = self._get_inventory_basename(path)
return (name, data)
def add_host_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._host_vars_files:
self._host_vars_files[name] = []
self._host_vars_files[name].append(data)
return data
else:
return dict()
def add_group_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
if name not in self._group_vars_files:
self._group_vars_files[name] = []
self._group_vars_files[name].append(data)
return data
else:
return dict()
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._fact_cache:
self._fact_cache[host.name] = facts
else:
try:
self._fact_cache[host.name].update(facts)
except KeyError:
self._fact_cache[host.name] = facts
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._nonpersistent_fact_cache:
self._nonpersistent_fact_cache[host.name] = facts
else:
try:
self._nonpersistent_fact_cache[host.name].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
self._vars_cache[host_name][varname] = value
|
netaccess0/kernel_lge_msm8974
|
refs/heads/L5
|
Documentation/target/tcm_mod_builder.py
|
4981
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
chirilo/remo
|
refs/heads/master
|
vendor-local/lib/python/unidecode/x061.py
|
252
|
data = (
'Qiao ', # 0x00
'Chou ', # 0x01
'Bei ', # 0x02
'Xuan ', # 0x03
'Wei ', # 0x04
'Ge ', # 0x05
'Qian ', # 0x06
'Wei ', # 0x07
'Yu ', # 0x08
'Yu ', # 0x09
'Bi ', # 0x0a
'Xuan ', # 0x0b
'Huan ', # 0x0c
'Min ', # 0x0d
'Bi ', # 0x0e
'Yi ', # 0x0f
'Mian ', # 0x10
'Yong ', # 0x11
'Kai ', # 0x12
'Dang ', # 0x13
'Yin ', # 0x14
'E ', # 0x15
'Chen ', # 0x16
'Mou ', # 0x17
'Ke ', # 0x18
'Ke ', # 0x19
'Yu ', # 0x1a
'Ai ', # 0x1b
'Qie ', # 0x1c
'Yan ', # 0x1d
'Nuo ', # 0x1e
'Gan ', # 0x1f
'Yun ', # 0x20
'Zong ', # 0x21
'Sai ', # 0x22
'Leng ', # 0x23
'Fen ', # 0x24
'[?] ', # 0x25
'Kui ', # 0x26
'Kui ', # 0x27
'Que ', # 0x28
'Gong ', # 0x29
'Yun ', # 0x2a
'Su ', # 0x2b
'Su ', # 0x2c
'Qi ', # 0x2d
'Yao ', # 0x2e
'Song ', # 0x2f
'Huang ', # 0x30
'Ji ', # 0x31
'Gu ', # 0x32
'Ju ', # 0x33
'Chuang ', # 0x34
'Ni ', # 0x35
'Xie ', # 0x36
'Kai ', # 0x37
'Zheng ', # 0x38
'Yong ', # 0x39
'Cao ', # 0x3a
'Sun ', # 0x3b
'Shen ', # 0x3c
'Bo ', # 0x3d
'Kai ', # 0x3e
'Yuan ', # 0x3f
'Xie ', # 0x40
'Hun ', # 0x41
'Yong ', # 0x42
'Yang ', # 0x43
'Li ', # 0x44
'Sao ', # 0x45
'Tao ', # 0x46
'Yin ', # 0x47
'Ci ', # 0x48
'Xu ', # 0x49
'Qian ', # 0x4a
'Tai ', # 0x4b
'Huang ', # 0x4c
'Yun ', # 0x4d
'Shen ', # 0x4e
'Ming ', # 0x4f
'[?] ', # 0x50
'She ', # 0x51
'Cong ', # 0x52
'Piao ', # 0x53
'Mo ', # 0x54
'Mu ', # 0x55
'Guo ', # 0x56
'Chi ', # 0x57
'Can ', # 0x58
'Can ', # 0x59
'Can ', # 0x5a
'Cui ', # 0x5b
'Min ', # 0x5c
'Te ', # 0x5d
'Zhang ', # 0x5e
'Tong ', # 0x5f
'Ao ', # 0x60
'Shuang ', # 0x61
'Man ', # 0x62
'Guan ', # 0x63
'Que ', # 0x64
'Zao ', # 0x65
'Jiu ', # 0x66
'Hui ', # 0x67
'Kai ', # 0x68
'Lian ', # 0x69
'Ou ', # 0x6a
'Song ', # 0x6b
'Jin ', # 0x6c
'Yin ', # 0x6d
'Lu ', # 0x6e
'Shang ', # 0x6f
'Wei ', # 0x70
'Tuan ', # 0x71
'Man ', # 0x72
'Qian ', # 0x73
'She ', # 0x74
'Yong ', # 0x75
'Qing ', # 0x76
'Kang ', # 0x77
'Di ', # 0x78
'Zhi ', # 0x79
'Lou ', # 0x7a
'Juan ', # 0x7b
'Qi ', # 0x7c
'Qi ', # 0x7d
'Yu ', # 0x7e
'Ping ', # 0x7f
'Liao ', # 0x80
'Cong ', # 0x81
'You ', # 0x82
'Chong ', # 0x83
'Zhi ', # 0x84
'Tong ', # 0x85
'Cheng ', # 0x86
'Qi ', # 0x87
'Qu ', # 0x88
'Peng ', # 0x89
'Bei ', # 0x8a
'Bie ', # 0x8b
'Chun ', # 0x8c
'Jiao ', # 0x8d
'Zeng ', # 0x8e
'Chi ', # 0x8f
'Lian ', # 0x90
'Ping ', # 0x91
'Kui ', # 0x92
'Hui ', # 0x93
'Qiao ', # 0x94
'Cheng ', # 0x95
'Yin ', # 0x96
'Yin ', # 0x97
'Xi ', # 0x98
'Xi ', # 0x99
'Dan ', # 0x9a
'Tan ', # 0x9b
'Duo ', # 0x9c
'Dui ', # 0x9d
'Dui ', # 0x9e
'Su ', # 0x9f
'Jue ', # 0xa0
'Ce ', # 0xa1
'Xiao ', # 0xa2
'Fan ', # 0xa3
'Fen ', # 0xa4
'Lao ', # 0xa5
'Lao ', # 0xa6
'Chong ', # 0xa7
'Han ', # 0xa8
'Qi ', # 0xa9
'Xian ', # 0xaa
'Min ', # 0xab
'Jing ', # 0xac
'Liao ', # 0xad
'Wu ', # 0xae
'Can ', # 0xaf
'Jue ', # 0xb0
'Cu ', # 0xb1
'Xian ', # 0xb2
'Tan ', # 0xb3
'Sheng ', # 0xb4
'Pi ', # 0xb5
'Yi ', # 0xb6
'Chu ', # 0xb7
'Xian ', # 0xb8
'Nao ', # 0xb9
'Dan ', # 0xba
'Tan ', # 0xbb
'Jing ', # 0xbc
'Song ', # 0xbd
'Han ', # 0xbe
'Jiao ', # 0xbf
'Wai ', # 0xc0
'Huan ', # 0xc1
'Dong ', # 0xc2
'Qin ', # 0xc3
'Qin ', # 0xc4
'Qu ', # 0xc5
'Cao ', # 0xc6
'Ken ', # 0xc7
'Xie ', # 0xc8
'Ying ', # 0xc9
'Ao ', # 0xca
'Mao ', # 0xcb
'Yi ', # 0xcc
'Lin ', # 0xcd
'Se ', # 0xce
'Jun ', # 0xcf
'Huai ', # 0xd0
'Men ', # 0xd1
'Lan ', # 0xd2
'Ai ', # 0xd3
'Lin ', # 0xd4
'Yan ', # 0xd5
'Gua ', # 0xd6
'Xia ', # 0xd7
'Chi ', # 0xd8
'Yu ', # 0xd9
'Yin ', # 0xda
'Dai ', # 0xdb
'Meng ', # 0xdc
'Ai ', # 0xdd
'Meng ', # 0xde
'Dui ', # 0xdf
'Qi ', # 0xe0
'Mo ', # 0xe1
'Lan ', # 0xe2
'Men ', # 0xe3
'Chou ', # 0xe4
'Zhi ', # 0xe5
'Nuo ', # 0xe6
'Nuo ', # 0xe7
'Yan ', # 0xe8
'Yang ', # 0xe9
'Bo ', # 0xea
'Zhi ', # 0xeb
'Kuang ', # 0xec
'Kuang ', # 0xed
'You ', # 0xee
'Fu ', # 0xef
'Liu ', # 0xf0
'Mie ', # 0xf1
'Cheng ', # 0xf2
'[?] ', # 0xf3
'Chan ', # 0xf4
'Meng ', # 0xf5
'Lan ', # 0xf6
'Huai ', # 0xf7
'Xuan ', # 0xf8
'Rang ', # 0xf9
'Chan ', # 0xfa
'Ji ', # 0xfb
'Ju ', # 0xfc
'Huan ', # 0xfd
'She ', # 0xfe
'Yi ', # 0xff
)
|
mhoffma/micropython
|
refs/heads/master
|
tests/basics/andor.py
|
115
|
# test short circuit expressions outside if conditionals
print(() or 1)
print((1,) or 1)
print(() and 1)
print((1,) and 1)
|
gangadharkadam/saloon_erp_install
|
refs/heads/master
|
erpnext/setup/page/setup_wizard/test_setup_wizard.py
|
45
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.setup.page.setup_wizard.test_setup_data import args
from erpnext.setup.page.setup_wizard.setup_wizard import setup_account
import frappe.utils.scheduler
if __name__=="__main__":
frappe.connect()
frappe.local.form_dict = frappe._dict(args)
setup_account()
frappe.utils.scheduler.disable_scheduler()
|
MatthewShao/mitmproxy
|
refs/heads/master
|
test/mitmproxy/addons/test_termstatus.py
|
3
|
from mitmproxy import proxy
from mitmproxy.addons import termstatus
from mitmproxy.test import taddons
def test_configure():
ts = termstatus.TermStatus()
with taddons.context() as ctx:
ctx.master.server = proxy.DummyServer()
ctx.configure(ts, server=False)
ts.running()
assert not ctx.master.logs
ctx.configure(ts, server=True)
ts.running()
assert ctx.master.logs
|
pdellaert/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/ios/argspec/facts/facts.py
|
20
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the ios facts module.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class FactsArgs(object):
""" The arg spec for the ios facts module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
'gather_subset': dict(default=['!config'], type='list'),
'gather_network_resources': dict(type='list'),
}
|
baozoumanhua/elk-rtf
|
refs/heads/master
|
kibana-4-darwin-x64/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py
|
505
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError, 'Strong dict for key ' + key + ' in ' + \
self.__class__.__name__
else:
that._properties[key] = value.copy()
else:
raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError, \
self.__class__.__name__ + ' must implement Name'
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError, \
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name())
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError, "Can't make " + value.__class__.__name__ + ' printable'
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError, property + ' not in ' + self.__class__.__name__
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError, \
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError, "Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError, key + ' not in ' + self.__class__.__name__
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list'
if not isinstance(value, property_type):
raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError, self.__class__.__name__ + ' requires ' + property
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError, 'Found multiple children with path ' + child_path
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError, 'Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path)
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'ttf': 'file',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError, name
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError, 'Variant values for ' + key
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError, \
self.__class__.__name__ + ' must implement FileGroup'
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError, 'Found multiple build files with path ' + path
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError, 'Found multiple build files for ' + \
xcfilelikeelement.Name()
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError, 'Can\'t use path %s in a %s' % \
(path, self.__class__.__name__)
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the filen ame
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
# Extension override.
suffix = '.' + force_extension
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
return [product_group, project_ref]
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y: CompareProducts(x, y, remote_products))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 45],
'rootObject': [0, PBXProject, 1, 1],
})
def SetXcodeVersion(self, version):
version_to_object_version = {
'2.4': 45,
'3.0': 45,
'3.1': 45,
'3.2': 46,
}
if not version in version_to_object_version:
supported_str = ', '.join(sorted(version_to_object_version.keys()))
raise Exception(
'Unsupported Xcode version %s (supported: %s)' %
( version, supported_str ) )
compatibility_version = 'Xcode %s' % version
self._properties['rootObject'].SetProperty('compatibilityVersion',
compatibility_version)
self.SetProperty('objectVersion', version_to_object_version[version]);
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
|
edunham/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/tests/__init__.py
|
621
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
asandyz/oppia
|
refs/heads/develop
|
extensions/triggers/trigger_classes.py
|
15
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for defining triggers.
Although this module is in extensions/, it is not provided as an extension
framework for third-party developers. This is because reacting to triggers
involves changes to core code.
"""
__author__ = 'Sean Lip'
from extensions import domain
class BaseTrigger(object):
"""Base trigger definition class.
This class is not meant to be user-editable. The only methods on it should
be get()-type methods.
"""
# Customization arg specifications for the trigger, including their
# descriptions, schemas and default values. Overridden in subclasses.
_customization_arg_specs = []
@classmethod
def get_trigger_type(cls):
return cls.__name__
@property
def customization_arg_specs(self):
return [
domain.CustomizationArgSpec(**cas)
for cas in self._customization_arg_specs]
class NthResubmission(BaseTrigger):
"""This trigger is invoked when an answer is submitted to the same state
for the nth time in succession, and the destination that would result due
to normal evaluation would cause a further loop-around to the same state.
"""
_customization_arg_specs = [{
'name': 'num_submits',
'description': (
'The number of submissions after which to react, if the last '
'submission would result in a further loop-around'),
'schema': {
'type': 'int'
},
'default_value': 3,
}]
class ClickButton(BaseTrigger):
"""The presence of this trigger adds a button to the UI. The trigger is
invoked when the learner clicks this button.
"""
_customization_arg_specs = [{
'name': 'button_text',
'description': 'The text of the button',
'schema': {
'type': 'unicode',
},
'default_value': 'Help, I\'m stuck',
}]
|
codeofdusk/ProjectMagenta
|
refs/heads/master
|
src/audio_services/__init__.py
|
3
|
from functools import wraps
def matches_url(url):
def url_setter(func):
@wraps(func)
def internal_url_setter(*args, **kwargs):
return func(*args, **kwargs)
internal_url_setter.url = url
return internal_url_setter
return url_setter
def find_url_transformer(url):
from audio_services import services
funcs = []
for i in dir(services):
possible = getattr(services, i)
if callable(possible) and hasattr(possible, 'url'):
funcs.append(possible)
for f in funcs:
if url.lower().startswith(f.url.lower()):
return f
return services.convert_generic_audio
|
annarev/tensorflow
|
refs/heads/master
|
tensorflow/python/training/saver.py
|
8
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables.
Symbols in this file are deprecated. See replacements in
tensorflow/python/training/trackable and tensorflow/python/training/saving.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import time
import numpy as np
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.core.protobuf import trackable_object_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training import training_util
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# TODO(allenl): Remove these aliases once all users are migrated off.
get_checkpoint_state = checkpoint_management.get_checkpoint_state
update_checkpoint_state = checkpoint_management.update_checkpoint_state
generate_checkpoint_state_proto = (
checkpoint_management.generate_checkpoint_state_proto)
latest_checkpoint = checkpoint_management.latest_checkpoint
checkpoint_exists = checkpoint_management.checkpoint_exists
get_checkpoint_mtimes = checkpoint_management.get_checkpoint_mtimes
remove_checkpoint = checkpoint_management.remove_checkpoint
class BaseSaverBuilder(object):
"""Base class for Savers.
Can be extended to create different Ops.
"""
SaveSpec = saveable_object.SaveSpec
SaveableObject = saveable_object.SaveableObject
# Aliases for code which was moved but still has lots of users.
VariableSaveable = saveable_object_util.ReferenceVariableSaveable
ResourceVariableSaveable = saveable_object_util.ResourceVariableSaveable
def __init__(self, write_version=saver_pb2.SaverDef.V2):
self._write_version = write_version
def save_op(self, filename_tensor, saveables):
"""Create an Op to save 'saveables'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
An Operation that save the variables.
Raises:
RuntimeError: (implementation detail) if "self._write_version" is an
unexpected value.
"""
# pylint: disable=protected-access
tensor_names = []
tensors = []
tensor_slices = []
for saveable in saveables:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
if self._write_version == saver_pb2.SaverDef.V1:
return io_ops._save(
filename=filename_tensor,
tensor_names=tensor_names,
tensors=tensors,
tensor_slices=tensor_slices)
elif self._write_version == saver_pb2.SaverDef.V2:
# "filename_tensor" is interpreted *NOT AS A FILENAME*, but as a prefix
# of a V2 checkpoint: e.g. "/fs/train/ckpt-<step>/tmp/worker<i>-<step>".
return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices,
tensors)
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
"""Restore all tensors contained in saveables.
By default, this issues separate calls to `restore_op` for each saveable.
Subclasses may override to load multiple saveables in a single call.
Args:
filename_tensor: String Tensor.
saveables: List of BaseSaverBuilder.SaveableObject objects.
preferred_shard: Int. Shard to open first when loading a sharded file.
restore_sequentially: Unused. Bool. If true, each restore is sequential.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
del restore_sequentially
all_tensors = []
for saveable in saveables:
if saveable.device:
device = saveable_object_util.set_cpu0(saveable.device)
else:
device = None
with ops.device(device):
all_tensors.extend(
self.restore_op(filename_tensor, saveable, preferred_shard))
return all_tensors
# pylint: disable=unused-argument
def restore_op(self, filename_tensor, saveable, preferred_shard):
"""Create ops to restore 'saveable'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveable: A BaseSaverBuilder.SaveableObject object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
# pylint: disable=protected-access
tensors = []
for spec in saveable.specs:
tensors.append(
io_ops.restore_v2(filename_tensor, [spec.name], [spec.slice_spec],
[spec.dtype])[0])
return tensors
# pylint: enable=unused-argument
def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
def _AddSaveOps(self, filename_tensor, saveables):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
saveables: A list of SaveableObject objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, saveables)
return control_flow_ops.with_dependencies([save], filename_tensor)
def _AddShardedSaveOpsForV2(self, checkpoint_prefix, per_device):
"""Add ops to save the params per shard, for the V2 format.
Note that the sharded save procedure for the V2 format is different from
V1: there is a special "merge" step that merges the small metadata produced
from each device.
Args:
checkpoint_prefix: scalar String Tensor. Interpreted *NOT AS A FILENAME*,
but as a prefix of a V2 checkpoint;
per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables, which, when evaluated, returns the prefix
"<user-fed prefix>" only and does not include the sharded spec suffix.
"""
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><_SHARDED_SUFFIX>.
# * If checkpoint_prefix is a S3 bucket path ".part" is appended to it
# * Otherwise _temp/part is appended which is normalized relative to the OS
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Filesystems with eventual consistency (such as S3), don't need a
# temporary location. Using a temporary directory in those cases might
# cause situations where files are not available during copy.
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
with ops.device("CPU"):
_SHARDED_SUFFIX = array_ops.where(
string_ops.regex_full_match(checkpoint_prefix, "^s3://.*"),
constant_op.constant(".part"),
constant_op.constant(os.path.normpath("_temp/part")))
tmp_checkpoint_prefix = string_ops.string_join(
[checkpoint_prefix, _SHARDED_SUFFIX])
num_shards = len(per_device)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saveables) in enumerate(per_device):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
sharded_filename = self.sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(sharded_filename)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
with ops.control_dependencies([x.op for x in sharded_saves]):
# Co-locates the merge step with the last device.
with ops.device(saveable_object_util.set_cpu0(last_device)):
# V2 format write path consists of a metadata merge step. Once merged,
# attempts to delete the temporary directory, "<user-fed prefix>_temp".
merge_step = gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, checkpoint_prefix, delete_old_dirs=True)
with ops.control_dependencies([merge_step]):
# Returns the prefix "<user-fed prefix>" only. DOES NOT include the
# sharded spec suffix.
return array_ops.identity(checkpoint_prefix)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: a scalar String Tensor.
per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
if self._write_version == saver_pb2.SaverDef.V2:
return self._AddShardedSaveOpsForV2(filename_tensor, per_device)
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(filename_tensor, shard,
num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
return gen_io_ops.sharded_filespec(filename_tensor, num_shards_tensor)
def _AddRestoreOps(self,
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=-1,
name="restore_all"):
"""Add operations to restore saveables.
Args:
filename_tensor: Tensor for the path of the file to load.
saveables: A list of SaveableObject objects.
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of the
corresponding variable.
preferred_shard: Shard to open first when loading a sharded file.
name: Name for the returned op.
Returns:
An Operation that restores the variables.
"""
all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard,
restore_sequentially)
assign_ops = []
idx = 0
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
for saveable in saveables:
shapes = None
if reshape:
# Compute the shapes, let the restore op decide if and how to do
# the reshape.
shapes = []
for spec in saveable.specs:
v = spec.tensor
shape = v.get_shape()
if not shape.is_fully_defined():
shape = array_ops.shape(v)
shapes.append(shape)
saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]
idx += len(saveable.specs)
assign_ops.append(saveable.restore(saveable_tensors, shapes))
# Create a Noop that has control dependencies from all the updates.
return control_flow_ops.group(*assign_ops, name=name)
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to restore variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, SaveableObject) pairs, as returned by
_GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of the
corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(
self._AddRestoreOps(
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
def _GroupByDevices(self, saveables):
"""Group Variable tensor slices per device.
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices are unspecified.
Args:
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
A list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples.
The list is sorted by ascending device_name.
Raises:
ValueError: If the tensors of a saveable are on different devices.
"""
per_device = collections.defaultdict(lambda: [])
for saveable in saveables:
canonical_device = set(
pydev.canonical_name(spec.device) for spec in saveable.specs)
if len(canonical_device) != 1:
raise ValueError("All tensors of a saveable object must be "
"on the same device: %s" % saveable.name)
per_device[canonical_device.pop()].append(saveable)
return sorted(per_device.items(), key=lambda t: t[0])
def build(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model"):
"""Builds save/restore graph nodes or runs save/restore in eager mode.
Args:
names_to_saveables: A dictionary mapping name to a Variable or
SaveableObject. Each name will be associated with the corresponding
variable in the checkpoint.
reshape: If True, allow restoring parameters from a checkpoint that where
the parameters have a different shape. This is only needed when you try
to restore from a Dist-Belief checkpoint, and only some times.
sharded: If True, shard the checkpoints, one per device that has Variable
nodes.
max_to_keep: Maximum number of checkpoints to keep. As new checkpoints
are created, old ones are deleted. If None or 0, no checkpoints are
deleted from the filesystem but only the last one is kept in the
`checkpoint` file. Presently the number is only roughly enforced. For
example in case of restarts more than max_to_keep checkpoints may be
kept.
keep_checkpoint_every_n_hours: How often checkpoints should be kept.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A Bool, which if true, causes restore of different
variables to happen sequentially within each device.
filename: If known at graph construction time, filename used for variable
loading/saving. If None, then the default name "model" will be used.
Returns:
A SaverDef proto.
Raises:
TypeError: If 'names_to_saveables' is not a dictionary mapping string
keys to variable Tensors.
ValueError: If any of the keys or values in 'names_to_saveables' is not
unique.
"""
return self._build_internal(
names_to_saveables=names_to_saveables,
reshape=reshape,
sharded=sharded,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
name=name,
restore_sequentially=restore_sequentially,
filename=filename)
def _build_internal(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model",
build_save=True,
build_restore=True):
"""build() with option to only perform save and restore."""
if not context.executing_eagerly() and (not build_save or
not build_restore):
raise ValueError("save and restore operations need to be built together "
" when eager execution is not enabled.")
saveables = saveable_object_util.validate_and_slice_inputs(
names_to_saveables)
if max_to_keep is None:
max_to_keep = 0
with ops.name_scope(name, "save",
[saveable.op for saveable in saveables]) as name:
# Add a placeholder string tensor for the filename.
filename_tensor = array_ops.placeholder_with_default(
filename or "model", shape=(), name="filename")
# Keep the name "Const" for backwards compatibility.
filename_tensor = array_ops.placeholder_with_default(
filename_tensor, shape=(), name="Const")
# Add the save ops.
if sharded:
per_device = self._GroupByDevices(saveables)
if build_save:
save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)
if build_restore:
restore_op = self._AddShardedRestoreOps(filename_tensor, per_device,
restore_sequentially, reshape)
else:
if build_save:
save_tensor = self._AddSaveOps(filename_tensor, saveables)
if build_restore:
restore_op = self._AddRestoreOps(filename_tensor, saveables,
restore_sequentially, reshape)
# In the following use case, it's possible to have restore_ops be called
# something else:
# - Build inference graph and export a meta_graph.
# - Import the inference meta_graph
# - Extend the inference graph to a train graph.
# - Export a new meta_graph.
# Now the second restore_op will be called "restore_all_1".
# As such, comment out the assert for now until we know whether supporting
# such usage model makes sense.
#
# assert restore_op.name.endswith("restore_all"), restore_op.name
if context.executing_eagerly():
# Store the tensor values to the tensor_names.
save_tensor_name = save_tensor.numpy() if build_save else ""
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.numpy(),
save_tensor_name=save_tensor_name,
restore_op_name="",
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
else:
graph = ops.get_default_graph()
# Do some sanity checking on collections containing
# PartitionedVariables. If a saved collection has a PartitionedVariable,
# the GraphDef needs to include concat ops to get the value (or there'll
# be a lookup error on load).
check_collection_list = graph.get_all_collection_keys()
for collection_type in check_collection_list:
for element in graph.get_collection(collection_type):
if isinstance(element, variables.PartitionedVariable):
try:
graph.get_operation_by_name(element.name)
except KeyError:
# Create a concat op for this PartitionedVariable. The user may
# not need it, but we'll try looking it up on MetaGraph restore
# since it's in a collection.
element.as_tensor()
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
class BulkSaverBuilder(BaseSaverBuilder):
"""SaverBuilder with support for bulk restoring multiple saveables."""
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
# Ignored: bulk restore is internally sequential.
del restore_sequentially
restore_specs = []
for saveable in saveables:
for spec in saveable.specs:
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
names, slices, dtypes = zip(*restore_specs)
# Load all tensors onto CPU 0 for compatibility with existing code.
with ops.device("cpu:0"):
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
def _get_saver_or_default():
"""Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`Scaffold`, or `CheckpointSaverHook`.
Returns:
`Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items.
"""
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if savers:
if len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
return savers[0]
saver = Saver(sharded=True, allow_empty=True)
if saver is not None:
ops.add_to_collection(collection_key, saver)
return saver
@tf_export(v1=["train.Saver"])
class Saver(object):
"""Saves and restores variables.
See [Variables](https://tensorflow.org/guide/variables)
for an overview of variables, saving and restoring.
The `Saver` class adds ops to save and restore variables to and from
*checkpoints*. It also provides convenience methods to run these ops.
Checkpoints are binary files in a proprietary format which map variable names
to tensor values. The best way to examine the contents of a checkpoint is to
load it using a `Saver`.
Savers can automatically number checkpoint filenames with a provided counter.
This lets you keep multiple checkpoints at different steps while training a
model. For example you can number the checkpoint filenames with the training
step number. To avoid filling up disks, savers manage checkpoint files
automatically. For example, they can keep only the N most recent files, or
one checkpoint for every N hours of training.
You number checkpoint filenames by passing a value to the optional
`global_step` argument to `save()`:
```python
saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'
...
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
```
Additionally, optional arguments to the `Saver()` constructor let you control
the proliferation of checkpoint files on disk:
* `max_to_keep` indicates the maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
no checkpoints are deleted from the filesystem but only the last one is
kept in the `checkpoint` file. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
* `keep_checkpoint_every_n_hours`: In addition to keeping the most recent
`max_to_keep` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
Note that you still have to call the `save()` method to save the model.
Passing these arguments to the constructor will not save variables
automatically for you.
A training program that saves regularly looks like:
```python
...
# Create a saver.
saver = tf.compat.v1.train.Saver(...variables...)
# Launch the graph and train, saving the model every 1,000 steps.
sess = tf.compat.v1.Session()
for step in xrange(1000000):
sess.run(..training_op..)
if step % 1000 == 0:
# Append the step number to the checkpoint name:
saver.save(sess, 'my-model', global_step=step)
```
In addition to checkpoint files, savers keep a protocol buffer on disk with
the list of recent checkpoints. This is used to manage numbered checkpoint
files and by `latest_checkpoint()`, which makes it easy to discover the path
to the most recent checkpoint. That protocol buffer is stored in a file named
'checkpoint' next to the checkpoint files.
If you create several savers, you can specify a different filename for the
protocol buffer file in the call to `save()`.
"""
def __init__(self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=False,
write_version=saver_pb2.SaverDef.V2,
pad_step_number=False,
save_relative_paths=False,
filename=None):
"""Creates a `Saver`.
The constructor adds ops to save and restore variables.
`var_list` specifies the variables that will be saved and restored. It can
be passed as a `dict` or a list:
* A `dict` of names to variables: The keys are the names that will be
used to save or restore the variables in the checkpoint files.
* A list of variables: The variables will be keyed with their op name in
the checkpoint files.
For example:
```python
v1 = tf.Variable(..., name='v1')
v2 = tf.Variable(..., name='v2')
# Pass the variables as a dict:
saver = tf.compat.v1.train.Saver({'v1': v1, 'v2': v2})
# Or pass them as a list.
saver = tf.compat.v1.train.Saver([v1, v2])
# Passing a list is equivalent to passing a dict with the variable op names
# as keys:
saver = tf.compat.v1.train.Saver({v.op.name: v for v in [v1, v2]})
```
Note: the newer `AutoTrackable` API is not supported by `Saver`. In this
case, the `tf.train.Checkpoint` class should be used.
The optional `reshape` argument, if `True`, allows restoring a variable from
a save file where the variable had a different shape, but the same number
of elements and type. This is useful if you have reshaped a variable and
want to reload it from an older checkpoint.
The optional `sharded` argument, if `True`, instructs the saver to shard
checkpoints per device.
Args:
var_list: A list of `Variable`/`SaveableObject`, or a dictionary mapping
names to `SaveableObject`s. If `None`, defaults to the list of all
saveable objects.
reshape: If `True`, allows restoring parameters from a checkpoint where
the variables have a different shape.
sharded: If `True`, shard the checkpoints, one per device.
max_to_keep: Maximum number of recent checkpoints to keep. Defaults to 5.
keep_checkpoint_every_n_hours: How often to keep checkpoints. Defaults to
10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A `Bool`, which if true, causes restore of different
variables to happen sequentially within each device. This can lower
memory usage when restoring very large models.
saver_def: Optional `SaverDef` proto to use instead of running the
builder. This is only useful for specialty code that wants to recreate a
`Saver` object for a previously built `Graph` that had a `Saver`. The
`saver_def` proto should be the one returned by the `as_saver_def()`
call of the `Saver` that was created for that `Graph`.
builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.
Defaults to `BulkSaverBuilder()`.
defer_build: If `True`, defer adding the save and restore ops to the
`build()` call. In that case `build()` should be called before
finalizing the graph or using the saver.
allow_empty: If `False` (default) raise an error if there are no variables
in the graph. Otherwise, construct the saver anyway and make it a no-op.
write_version: controls what format to use when saving checkpoints. It
also affects certain filepath matching logic. The V2 format is the
recommended choice: it is much more optimized than V1 in terms of memory
required and latency incurred during restore. Regardless of this
flag, the Saver is able to restore from both V2 and V1 checkpoints.
pad_step_number: if True, pads the global step number in the checkpoint
filepaths to some fixed width (8 by default). This is turned off by
default.
save_relative_paths: If `True`, will write relative paths to the
checkpoint state file. This is needed if the user wants to copy the
checkpoint directory and reload from the copied directory.
filename: If known at graph construction time, filename used for variable
loading/saving.
Raises:
TypeError: If `var_list` is invalid.
ValueError: If any of the keys or values in `var_list` are not unique.
RuntimeError: If eager execution is enabled and`var_list` does not specify
a list of variables to save.
@compatibility(eager)
When eager execution is enabled, `var_list` must specify a `list` or `dict`
of variables to save. Otherwise, a `RuntimeError` will be raised.
Although Saver works in some cases when executing eagerly, it is
fragile. Please switch to `tf.train.Checkpoint` or
`tf.keras.Model.save_weights`, which perform a more robust object-based
saving. These APIs will load checkpoints written by `Saver`.
@end_compatibility
"""
if defer_build and var_list:
raise ValueError(
"If `var_list` is provided then build cannot be deferred. "
"Either set defer_build=False or var_list=None.")
if context.executing_eagerly():
logging.warning(
"Saver is deprecated, please switch to tf.train.Checkpoint or "
"tf.keras.Model.save_weights for training checkpoints. When "
"executing eagerly variables do not necessarily have unique names, "
"and so the variable.name-based lookups Saver performs are "
"error-prone.")
if var_list is None:
raise RuntimeError(
"When eager execution is enabled, `var_list` must specify a list "
"or dict of variables to save")
self._var_list = var_list
self._reshape = reshape
self._sharded = sharded
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._name = name
self._restore_sequentially = restore_sequentially
self.saver_def = saver_def
self._builder = builder
self._is_built = False
self._allow_empty = allow_empty
self._is_empty = None
self._write_version = write_version
self._pad_step_number = pad_step_number
self._filename = filename
self._last_checkpoints = []
self._checkpoints_to_be_deleted = []
if context.executing_eagerly():
self._next_checkpoint_time = (
time.time() + self._keep_checkpoint_every_n_hours * 3600)
elif not defer_build:
self.build()
if self.saver_def:
self._check_saver_def()
self._write_version = self.saver_def.version
self._save_relative_paths = save_relative_paths
# For compatibility with object-based checkpoints, we may build a second
# Saver to read the renamed keys.
self._object_restore_saver = None
def build(self):
if context.executing_eagerly():
raise RuntimeError("Use save/restore instead of build in eager mode.")
self._build(self._filename, build_save=True, build_restore=True)
def _build_eager(self, checkpoint_path, build_save, build_restore):
self._build(
checkpoint_path, build_save=build_save, build_restore=build_restore)
def _build(self, checkpoint_path, build_save, build_restore):
"""Builds saver_def."""
if not context.executing_eagerly():
if self._is_built:
return
self._is_built = True
if not self.saver_def or context.executing_eagerly():
if self._builder is None:
self._builder = BulkSaverBuilder(self._write_version)
if self._var_list is None:
# pylint: disable=protected-access
self._var_list = variables._all_saveable_objects()
if not self._var_list:
if self._allow_empty:
self._is_empty = True
return
else:
raise ValueError("No variables to save")
self._is_empty = False
self.saver_def = self._builder._build_internal( # pylint: disable=protected-access
self._var_list,
reshape=self._reshape,
sharded=self._sharded,
max_to_keep=self._max_to_keep,
keep_checkpoint_every_n_hours=self._keep_checkpoint_every_n_hours,
name=self._name,
restore_sequentially=self._restore_sequentially,
filename=checkpoint_path,
build_save=build_save,
build_restore=build_restore)
elif self.saver_def and self._name:
# Since self._name is used as a name_scope by builder(), we are
# overloading the use of this field to represent the "import_scope" as
# well.
self.saver_def.filename_tensor_name = ops.prepend_name_scope(
self.saver_def.filename_tensor_name, self._name)
self.saver_def.save_tensor_name = ops.prepend_name_scope(
self.saver_def.save_tensor_name, self._name)
self.saver_def.restore_op_name = ops.prepend_name_scope(
self.saver_def.restore_op_name, self._name)
self._check_saver_def()
if not context.executing_eagerly():
# Updates next checkpoint time.
# Set in __init__ when executing eagerly.
self._next_checkpoint_time = (
time.time() + self.saver_def.keep_checkpoint_every_n_hours * 3600)
def _check_saver_def(self):
if not isinstance(self.saver_def, saver_pb2.SaverDef):
raise ValueError("saver_def must be a saver_pb2.SaverDef: %s" %
self.saver_def)
if not context.executing_eagerly():
if not self.saver_def.save_tensor_name:
raise ValueError("saver_def must specify the save_tensor_name: %s" %
str(self.saver_def))
if not self.saver_def.restore_op_name:
raise ValueError("saver_def must specify the restore_op_name: %s" %
str(self.saver_def))
def _CheckpointFilename(self, p):
"""Returns the checkpoint filename given a `(filename, time)` pair.
Args:
p: (filename, time) pair.
Returns:
Checkpoint file name.
"""
name, _ = p
return name
def _RecordLastCheckpoint(self, latest_save_path):
"""Manages the list of the latest checkpoints."""
if not self.saver_def.max_to_keep:
return
# Remove first from list if the same name was used before.
for p in self._last_checkpoints:
if latest_save_path == self._CheckpointFilename(p):
self._last_checkpoints.remove(p)
# Append new path to list
self._last_checkpoints.append((latest_save_path, time.time()))
# If more than max_to_keep, remove oldest.
if len(self._last_checkpoints) > self.saver_def.max_to_keep:
self._checkpoints_to_be_deleted.append(self._last_checkpoints.pop(0))
def _MaybeDeleteOldCheckpoints(self, meta_graph_suffix="meta"):
"""Deletes old checkpoints if necessary.
`self._checkpoints_to_be_deleted` is going to contain checkpoints that are
over `max_to_keep`. They are going to be deleted. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
if self._checkpoints_to_be_deleted:
p = self._checkpoints_to_be_deleted.pop(0)
# Do not delete the file if we keep_checkpoint_every_n_hours is set and we
# have reached N hours of training.
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += (
self.saver_def.keep_checkpoint_every_n_hours * 3600)
return
# Otherwise delete the files.
try:
checkpoint_management.remove_checkpoint(
self._CheckpointFilename(p), self.saver_def.version,
meta_graph_suffix)
except Exception as e: # pylint: disable=broad-except
logging.warning("Ignoring: %s", str(e))
def as_saver_def(self):
"""Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
"""
return self.saver_def
def to_proto(self, export_scope=None):
"""Converts this `Saver` to a `SaverDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaverDef` protocol buffer.
"""
if export_scope is None:
return self.saver_def
if not (self.saver_def.filename_tensor_name.startswith(export_scope) and
self.saver_def.save_tensor_name.startswith(export_scope) and
self.saver_def.restore_op_name.startswith(export_scope)):
return None
saver_def = saver_pb2.SaverDef()
saver_def.CopyFrom(self.saver_def)
saver_def.filename_tensor_name = ops.strip_name_scope(
saver_def.filename_tensor_name, export_scope)
saver_def.save_tensor_name = ops.strip_name_scope(
saver_def.save_tensor_name, export_scope)
saver_def.restore_op_name = ops.strip_name_scope(saver_def.restore_op_name,
export_scope)
return saver_def
@staticmethod
def from_proto(saver_def, import_scope=None):
"""Returns a `Saver` object created from `saver_def`.
Args:
saver_def: a `SaverDef` protocol buffer.
import_scope: Optional `string`. Name scope to use.
Returns:
A `Saver` built from saver_def.
"""
return Saver(saver_def=saver_def, name=import_scope)
@property
def last_checkpoints(self):
"""List of not-yet-deleted checkpoint filenames.
You can pass any of the returned values to `restore()`.
Returns:
A list of checkpoint filenames, sorted from oldest to newest.
"""
return list(self._CheckpointFilename(p) for p in self._last_checkpoints)
def set_last_checkpoints(self, last_checkpoints):
"""DEPRECATED: Use set_last_checkpoints_with_time.
Sets the list of old checkpoint filenames.
Args:
last_checkpoints: A list of checkpoint filenames.
Raises:
AssertionError: If last_checkpoints is not a list.
"""
assert isinstance(last_checkpoints, list)
# We use a timestamp of +inf so that this checkpoint will never be
# deleted. This is both safe and backwards compatible to a previous
# version of the code which used s[1] as the "timestamp".
self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
def set_last_checkpoints_with_time(self, last_checkpoints_with_time):
"""Sets the list of old checkpoint filenames and timestamps.
Args:
last_checkpoints_with_time: A list of tuples of checkpoint filenames and
timestamps.
Raises:
AssertionError: If last_checkpoints_with_time is not a list.
"""
assert isinstance(last_checkpoints_with_time, list)
self._last_checkpoints = last_checkpoints_with_time
def recover_last_checkpoints(self, checkpoint_paths):
"""Recovers the internal saver state after a crash.
This method is useful for recovering the "self._last_checkpoints" state.
Globs for the checkpoints pointed to by `checkpoint_paths`. If the files
exist, use their mtime as the checkpoint timestamp.
Args:
checkpoint_paths: a list of checkpoint paths.
"""
checkpoints_with_mtimes = []
for checkpoint_path in checkpoint_paths:
try:
mtime = checkpoint_management.get_checkpoint_mtimes([checkpoint_path])
except errors.NotFoundError:
# It's fine if some other thread/process is deleting some older
# checkpoint concurrently.
continue
if mtime:
checkpoints_with_mtimes.append((checkpoint_path, mtime[0]))
self.set_last_checkpoints_with_time(checkpoints_with_mtimes)
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False,
save_debug_info=False):
# pylint: disable=line-too-long
"""Saves variables.
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path prefix of the newly created checkpoint files.
This string can be passed directly to a call to `restore()`.
Args:
sess: A Session to use to save the variables.
save_path: String. Prefix of filenames created for the checkpoint.
global_step: If provided the global step number is appended to `save_path`
to create the checkpoint filenames. The optional argument can be a
`Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoints. That file, kept in the
same directory as the checkpoint files, is automatically managed by the
saver to keep track of recent checkpoints. Defaults to 'checkpoint'.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
write_meta_graph: `Boolean` indicating whether or not to write the meta
graph file.
write_state: `Boolean` indicating whether or not to write the
`CheckpointStateProto`.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of save_path and with `_debug` added before
the file extension. This is only enabled when `write_meta_graph` is
`True`
Returns:
A string: path prefix used for the checkpoint files. If the saver is
sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'
is the number of shards created.
If the saver is empty, returns None.
Raises:
TypeError: If `sess` is not a `Session`.
ValueError: If `latest_filename` contains path components, or if it
collides with `save_path`.
RuntimeError: If save and restore ops weren't built.
"""
# pylint: enable=line-too-long
if not self._is_built and not context.executing_eagerly():
raise RuntimeError(
"`build()` should be called before save if defer_build==True")
if latest_filename is None:
latest_filename = "checkpoint"
if self._write_version != saver_pb2.SaverDef.V2:
logging.warning("*******************************************************")
logging.warning("TensorFlow's V1 checkpoint format has been deprecated.")
logging.warning("Consider switching to the more efficient V2 format:")
logging.warning(" `tf.train.Saver(write_version=tf.train.SaverDef.V2)`")
logging.warning("now on by default.")
logging.warning("*******************************************************")
if os.path.split(latest_filename)[0]:
raise ValueError("'latest_filename' must not contain path components")
save_path = compat.as_str(save_path)
if global_step is not None:
if not isinstance(global_step, compat.integral_types):
global_step = training_util.global_step(sess, global_step)
checkpoint_file = "%s-%d" % (save_path, global_step)
if self._pad_step_number:
# Zero-pads the step numbers, so that they are sorted when listed.
checkpoint_file = "%s-%s" % (save_path, "{:08d}".format(global_step))
else:
checkpoint_file = save_path
if os.path.basename(save_path) == latest_filename and not self._sharded:
# Guard against collision between data file and checkpoint state file.
raise ValueError(
"'latest_filename' collides with 'save_path': '%s' and '%s'" %
(latest_filename, save_path))
if (not context.executing_eagerly() and
not isinstance(sess, session.SessionInterface)):
raise TypeError("'sess' must be a Session; %s" % sess)
save_path_parent = os.path.dirname(save_path)
if not self._is_empty:
try:
if context.executing_eagerly():
self._build_eager(
checkpoint_file, build_save=True, build_restore=False)
model_checkpoint_path = self.saver_def.save_tensor_name
else:
model_checkpoint_path = sess.run(
self.saver_def.save_tensor_name,
{self.saver_def.filename_tensor_name: checkpoint_file})
model_checkpoint_path = compat.as_str(model_checkpoint_path)
if write_state:
self._RecordLastCheckpoint(model_checkpoint_path)
checkpoint_management.update_checkpoint_state_internal(
save_dir=save_path_parent,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=self.last_checkpoints,
latest_filename=latest_filename,
save_relative_paths=self._save_relative_paths)
self._MaybeDeleteOldCheckpoints(meta_graph_suffix=meta_graph_suffix)
except (errors.FailedPreconditionError, errors.NotFoundError) as exc:
if not gfile.IsDirectory(save_path_parent):
exc = ValueError(
"Parent directory of {} doesn't exist, can't save.".format(
save_path))
raise exc
if write_meta_graph:
meta_graph_filename = checkpoint_management.meta_graph_filename(
checkpoint_file, meta_graph_suffix=meta_graph_suffix)
if not context.executing_eagerly():
with sess.graph.as_default():
self.export_meta_graph(
meta_graph_filename,
strip_default_attrs=strip_default_attrs,
save_debug_info=save_debug_info)
if self._is_empty:
return None
else:
return model_checkpoint_path
def export_meta_graph(self,
filename=None,
collection_list=None,
as_text=False,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False,
save_debug_info=False):
# pylint: disable=line-too-long
"""Writes `MetaGraphDef` to save_path/filename.
Args:
filename: Optional meta_graph filename including the path.
collection_list: List of string keys to collect.
as_text: If `True`, writes the meta_graph as an ASCII proto.
export_scope: Optional `string`. Name scope to remove.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated with
this Saver.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of filename and with `_debug` added before
the file extension.
Returns:
A `MetaGraphDef` proto.
"""
# pylint: enable=line-too-long
return export_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
saver_def=self.saver_def,
collection_list=collection_list,
as_text=as_text,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs,
save_debug_info=save_debug_info)
def restore(self, sess, save_path):
"""Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters. None in eager mode.
save_path: Path where parameters were previously saved.
Raises:
ValueError: If save_path is None or not a valid checkpoint.
"""
if self._is_empty:
return
if save_path is None:
raise ValueError("Can't load save_path when it is None.")
checkpoint_prefix = compat.as_text(save_path)
if not checkpoint_management.checkpoint_exists_internal(checkpoint_prefix):
raise ValueError("The passed save_path is not a valid checkpoint: " +
checkpoint_prefix)
logging.info("Restoring parameters from %s", checkpoint_prefix)
try:
if context.executing_eagerly():
self._build_eager(save_path, build_save=False, build_restore=True)
else:
sess.run(self.saver_def.restore_op_name,
{self.saver_def.filename_tensor_name: save_path})
except errors.NotFoundError as err:
# There are three common conditions that might cause this error:
# 0. The file is missing. We ignore here, as this is checked above.
# 1. This is an object-based checkpoint trying name-based loading.
# 2. The graph has been altered and a variable or other name is missing.
# 1. The checkpoint would not be loaded successfully as is. Try to parse
# it as an object-based checkpoint.
try:
names_to_keys = object_graph_key_mapping(save_path)
except errors.NotFoundError:
# 2. This is not an object-based checkpoint, which likely means there
# is a graph mismatch. Re-raise the original error with
# a helpful message (b/110263146)
raise _wrap_restore_error_with_msg(
err, "a Variable name or other graph key that is missing")
# This is an object-based checkpoint. We'll print a warning and then do
# the restore.
logging.warning(
"Restoring an object-based checkpoint using a name-based saver. This "
"may be somewhat fragile, and will re-build the Saver. Instead, "
"consider loading object-based checkpoints using "
"tf.train.Checkpoint().")
self._object_restore_saver = saver_from_object_based_checkpoint(
checkpoint_path=save_path,
var_list=self._var_list,
builder=self._builder,
names_to_keys=names_to_keys,
cached_saver=self._object_restore_saver)
self._object_restore_saver.restore(sess=sess, save_path=save_path)
except errors.InvalidArgumentError as err:
# There is a mismatch between the graph and the checkpoint being loaded.
# We add a more reasonable error message here to help users (b/110263146)
raise _wrap_restore_error_with_msg(
err, "a mismatch between the current graph and the graph")
@staticmethod
def _add_collection_def(meta_graph_def, key, export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
export_scope: Optional `string`. Name scope to remove.
"""
meta_graph.add_collection_def(
meta_graph_def, key, export_scope=export_scope)
@tf_export(v1=["train.import_meta_graph"])
def import_meta_graph(meta_graph_or_file,
clear_devices=False,
import_scope=None,
**kwargs):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
```Python
...
# Create a saver.
saver = tf.compat.v1.train.Saver(...variables...)
# Remember the training_op we want to run by adding it to a collection.
tf.compat.v1.add_to_collection('train_op', train_op)
sess = tf.compat.v1.Session()
for step in xrange(1000000):
sess.run(train_op)
if step % 1000 == 0:
# Saves checkpoint, which by default also exports a meta_graph
# named 'my-model-global_step.meta'.
saver.save(sess, 'my-model', global_step=step)
```
Later we can continue training from this saved `meta_graph` without building
the model from scratch.
```Python
with tf.Session() as sess:
new_saver =
tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')
new_saver.restore(sess, 'my-save-dir/my-model-10000')
# tf.get_collection() returns a list. In this example we only want
# the first one.
train_op = tf.get_collection('train_op')[0]
for step in xrange(1000000):
sess.run(train_op)
```
NOTE: Restarting training from saved `meta_graph` only works if the
device assignments have not changed.
Example:
Variables, placeholders, and independent operations can also be stored, as
shown in the following example.
```Python
# Saving contents and operations.
v1 = tf.placeholder(tf.float32, name="v1")
v2 = tf.placeholder(tf.float32, name="v2")
v3 = tf.math.multiply(v1, v2)
vx = tf.Variable(10.0, name="vx")
v4 = tf.add(v3, vx, name="v4")
saver = tf.train.Saver([vx])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(vx.assign(tf.add(vx, vx)))
result = sess.run(v4, feed_dict={v1:12.0, v2:3.3})
print(result)
saver.save(sess, "./model_ex1")
```
Later this model can be restored and contents loaded.
```Python
# Restoring variables and running operations.
saver = tf.train.import_meta_graph("./model_ex1.meta")
sess = tf.Session()
saver.restore(sess, "./model_ex1")
result = sess.run("v4:0", feed_dict={"v1:0": 12.0, "v2:0": 3.3})
print(result)
```
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during import.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
**kwargs: Optional keyed arguments.
Returns:
A saver constructed from `saver_def` in `MetaGraphDef` or None.
A None value is returned if no variables exist in the `MetaGraphDef`
(i.e., there are no variables to restore).
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported. No graph exists when eager
execution is enabled.
@end_compatibility
""" # pylint: disable=g-doc-exception
return _import_meta_graph_with_return_elements(meta_graph_or_file,
clear_devices, import_scope,
**kwargs)[0]
def _import_meta_graph_with_return_elements(meta_graph_or_file,
clear_devices=False,
import_scope=None,
return_elements=None,
**kwargs):
"""Import MetaGraph, and return both a saver and returned elements."""
if context.executing_eagerly():
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
if not isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file)
else:
meta_graph_def = meta_graph_or_file
imported_vars, imported_return_elements = (
meta_graph.import_scoped_meta_graph_with_return_elements(
meta_graph_def,
clear_devices=clear_devices,
import_scope=import_scope,
return_elements=return_elements,
**kwargs))
saver = _create_saver_from_imported_meta_graph(meta_graph_def, import_scope,
imported_vars)
return saver, imported_return_elements
def _create_saver_from_imported_meta_graph(meta_graph_def, import_scope,
imported_vars):
"""Return a saver for restoring variable values to an imported MetaGraph."""
if meta_graph_def.HasField("saver_def"):
# Infer the scope that is prepended by `import_scoped_meta_graph`.
scope = import_scope
var_names = list(imported_vars.keys())
if var_names:
sample_key = var_names[0]
sample_var = imported_vars[sample_key]
scope = sample_var.name[:-len(sample_key)]
return Saver(saver_def=meta_graph_def.saver_def, name=scope)
else:
if variables._all_saveable_objects(scope=import_scope): # pylint: disable=protected-access
# Return the default saver instance for all graph variables.
return Saver()
else:
# If no graph variables exist, then a Saver cannot be constructed.
logging.info("Saver not created because there are no variables in the"
" graph to restore")
return None
@tf_export(v1=["train.export_meta_graph"])
def export_meta_graph(filename=None,
meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
as_text=False,
graph=None,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False,
save_debug_info=False,
**kwargs):
# pylint: disable=line-too-long
"""Returns `MetaGraphDef` proto.
Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the generated
`MetaGraphDef` protocol buffer.
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
graph: The `Graph` to export. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract the
subgraph. The scope name will be striped from the node definitions for
easy import later into new name scopes. If `None`, the whole graph is
exported. graph_def and export_scope cannot both be specified.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the graph
(both Save/Restore ops and SaverDefs) that are not associated with the
provided SaverDef.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of filename and with `_debug` added before the
file extend.
**kwargs: Optional keyed arguments.
Returns:
A `MetaGraphDef` proto.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported unless both `graph_def` and
`graph` are provided. No graph exists when eager execution is enabled.
@end_compatibility
"""
# pylint: enable=line-too-long
if context.executing_eagerly() and not (graph_def is not None and
graph is not None):
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
filename=filename,
meta_info_def=meta_info_def,
graph_def=graph_def,
saver_def=saver_def,
collection_list=collection_list,
as_text=as_text,
graph=graph,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs,
save_debug_info=save_debug_info,
**kwargs)
return meta_graph_def
def _wrap_restore_error_with_msg(err, extra_verbiage):
err_msg = ("Restoring from checkpoint failed. This is most likely "
"due to {} from the checkpoint. Please ensure that you "
"have not altered the graph expected based on the checkpoint. "
"Original error:\n\n{}").format(extra_verbiage, err.message)
return err.__class__(err.node_def, err.op, err_msg)
ops.register_proto_function(
ops.GraphKeys.SAVERS,
proto_type=saver_pb2.SaverDef,
to_proto=Saver.to_proto,
from_proto=Saver.from_proto)
def object_graph_key_mapping(checkpoint_path):
"""Return name to key mappings from the checkpoint.
Args:
checkpoint_path: string, path to object-based checkpoint
Returns:
Dictionary mapping tensor names to checkpoint keys.
"""
reader = py_checkpoint_reader.NewCheckpointReader(checkpoint_path)
object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
names_to_keys = {}
for node in object_graph_proto.nodes:
for attribute in node.attributes:
names_to_keys[attribute.full_name] = attribute.checkpoint_key
return names_to_keys
def saver_from_object_based_checkpoint(checkpoint_path,
var_list=None,
builder=None,
names_to_keys=None,
cached_saver=None):
"""Return a `Saver` which reads from an object-based checkpoint.
This function validates that all variables in the variables list are remapped
in the object-based checkpoint (or `names_to_keys` dict if provided). A
saver will be created with the list of remapped variables.
The `cached_saver` argument allows the user to pass in a previously created
saver, so multiple `saver.restore()` calls don't pollute the graph when graph
building. This assumes that keys are consistent, meaning that the
1) `checkpoint_path` checkpoint, and
2) checkpoint used to create the `cached_saver`
are the same type of object-based checkpoint. If this argument is set, this
function will simply validate that all variables have been remapped by the
checkpoint at `checkpoint_path`.
Note that in general, `tf.train.Checkpoint` should be used to restore/save an
object-based checkpoint.
Args:
checkpoint_path: string, path to object-based checkpoint
var_list: list of `Variables` that appear in the checkpoint. If `None`,
`var_list` will be set to all saveable objects.
builder: a `BaseSaverBuilder` instance. If `None`, a new `BulkSaverBuilder`
will be created.
names_to_keys: dict mapping string tensor names to checkpoint keys. If
`None`, this dict will be generated from the checkpoint file.
cached_saver: Cached `Saver` object with remapped variables.
Returns:
`Saver` with remapped variables for reading from an object-based checkpoint.
Raises:
ValueError if the checkpoint provided is not an object-based checkpoint.
NotFoundError: If one of the variables in `var_list` can not be found in the
checkpoint. This could mean the checkpoint or `names_to_keys` mapping is
missing the variable.
"""
if names_to_keys is None:
try:
names_to_keys = object_graph_key_mapping(checkpoint_path)
except errors.NotFoundError:
raise ValueError("Checkpoint in %s not an object-based checkpoint." %
checkpoint_path)
if var_list is None:
var_list = variables._all_saveable_objects() # pylint: disable=protected-access
if builder is None:
builder = BulkSaverBuilder()
saveables = saveable_object_util.validate_and_slice_inputs(var_list)
current_names = set()
for saveable in saveables:
for spec in saveable.specs:
current_names.add(spec.name)
previous_names = set(names_to_keys.keys())
missing_names = current_names - previous_names
if missing_names:
extra_names = previous_names - current_names
intersecting_names = previous_names.intersection(current_names)
raise errors.NotFoundError(
None,
None,
message=(
"\n\nExisting variables not in the checkpoint: %s\n\n"
"Variables names when this checkpoint was written which don't "
"exist now: %s\n\n"
"(%d variable name(s) did match)\n\n"
"Could not find some variables in the checkpoint (see names "
"above). Saver was attempting to load an object-based checkpoint "
"(saved using tf.train.Checkpoint or tf.keras.Model.save_weights) "
"using variable names. If the checkpoint was written with eager "
"execution enabled, it's possible that variable names have "
"changed (for example missing a '_1' suffix). It's also "
"possible that there are new variables which did not exist "
"when the checkpoint was written. You can construct a "
"Saver(var_list=...) with only the variables which previously "
"existed, and if variable names have changed you may need to "
"make this a dictionary with the old names as keys. If you're "
"using an Estimator, you'll need to return a tf.train.Saver "
"inside a tf.train.Scaffold from your model_fn.") %
(", ".join(sorted(missing_names)), ", ".join(
sorted(extra_names)), len(intersecting_names)))
for saveable in saveables:
for spec in saveable.specs:
spec.name = names_to_keys[spec.name]
if cached_saver is None:
return Saver(saveables)
return cached_saver
|
orbitfp7/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/232_drop_dump_tables.py
|
47
|
# Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions',
'instance_actions_events', 'instance_faults', 'migrations']
for table_name in table_names:
table = Table('dump_' + table_name, meta)
table.drop(checkfirst=True)
def downgrade(migrate_engine):
pass
|
hbldh/pymetawear
|
refs/heads/master
|
examples/led.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`led`
==================
Created by hbldh <henrik.blidh@nedomkull.com>
Created on 2016-04-02
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import time
from pymetawear.discover import select_device
from pymetawear.client import MetaWearClient
address = select_device()
c = MetaWearClient(str(address), debug=True)
print("New client created: {0}".format(c))
print("Blinking 10 times with green LED...")
pattern = c.led.load_preset_pattern('blink', repeat_count=10)
c.led.write_pattern(pattern, 'g')
c.led.play()
time.sleep(5.0)
c.disconnect()
|
myzj/dop
|
refs/heads/master
|
dop/dop/debug_settings.py
|
1
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# DATABASES = {
# 'default': {
# #'ENGINE': 'django.db.backends.sqlite3',#
# #'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'myzj',
# 'USER':'root',
# 'PASSWORD':'111222',
# 'HOST':'127.0.0.1',
# 'PORT':'3306',
# }
# }
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'postdb',
# 'USER': 'root',
# 'PASSWORD': '123456',
# 'HOST': '192.168.60.84',
# 'PORT': 3306,
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dop',
'USER': 'db_dev',
'PASSWORD': 'db_dev',
'HOST': '192.168.100.160',
'PORT': 3306,
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'postdb',
# 'USER': 'root',
# 'PASSWORD': '111222',
# 'HOST': '127.0.0.1',
# 'PORT': 3306,
# }
# }
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "web/static"),
)
|
piotroxp/scibibscan
|
refs/heads/master
|
scib/lib/python3.5/site-packages/numpy/f2py/f2py_testing.py
|
148
|
from __future__ import division, absolute_import, print_function
import sys
import re
from numpy.testing.utils import jiffies, memusage
def cmdline():
m = re.compile(r'\A\d+\Z')
args = []
repeat = 1
for a in sys.argv[1:]:
if m.match(a):
repeat = eval(a)
else:
args.append(a)
f2py_opts = ' '.join(args)
return repeat, f2py_opts
def run(runtest, test_functions, repeat=1):
l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions]
start_memusage = memusage()
diff_memusage = None
start_jiffies = jiffies()
i = 0
while i < repeat:
i += 1
for t, fname in l:
runtest(t)
if start_memusage is None:
continue
if diff_memusage is None:
diff_memusage = memusage() - start_memusage
else:
diff_memusage2 = memusage() - start_memusage
if diff_memusage2 != diff_memusage:
print('memory usage change at step %i:' % i,
diff_memusage2 - diff_memusage,
fname)
diff_memusage = diff_memusage2
current_memusage = memusage()
print('run', repeat * len(test_functions), 'tests',
'in %.2f seconds' % ((jiffies() - start_jiffies) / 100.0))
if start_memusage:
print('initial virtual memory size:', start_memusage, 'bytes')
print('current virtual memory size:', current_memusage, 'bytes')
|
sdpython/cvxpy
|
refs/heads/master
|
examples/extensions/ncvx/boolean.py
|
12
|
from cvxpy.expressions.variables import Variable
from cvxpy.expressions.constants import Parameter
import cvxopt
import numpy as np
class Boolean(Variable):
def __init__(self, rows=1, cols=1, *args, **kwargs):
self._LB = Parameter(rows, cols)
self._LB.value = cvxopt.matrix(0,(rows, cols), tc='d')
self._UB = Parameter(rows, cols)
self._UB.value = cvxopt.matrix(1,(rows, cols), tc='d')
self._fix_values = cvxopt.matrix(False,(rows, cols))
super(Boolean, self).__init__(rows, cols, *args, **kwargs)
def round(self):
self.LB = cvxopt.matrix(self._rounded, self.size)
self.UB = cvxopt.matrix(self._rounded, self.size)
def relax(self):
# if fix_value is true, do not change LB and UB
for i in range(self.size[0]):
for j in range(self.size[1]):
if not self.fix_values[i, j]:
self.LB[i, j] = 0
self.UB[i, j] = 1
def set(self, value):
if not isinstance(value, bool): raise "Must set to boolean value"
self.LB = cvxopt.matrix(value, self.size)
self.UB = cvxopt.matrix(value, self.size)
self.fix_values = cvxopt.matrix(True, self.size)
def unset(self):
self.fix_values = cvxopt.matrix(False, self.size)
@property
def _rounded(self):
# WARNING: attempts to access self.value
if self.size == (1, 1):
return round(self.value)
else:
return np.around(self.value)
@property
def LB(self):
return self._LB.value
@LB.setter
def LB(self, value):
self._LB.value = value
@property
def UB(self):
return self._UB.value
@UB.setter
def UB(self, value):
self._UB.value = value
@property
def fix_values(self):
return self._fix_values
@fix_values.setter
def fix_values(self, value):
self._fix_values = value
|
Sweetgrassbuffalo/ReactionSweeGrass-v2
|
refs/heads/master
|
.meteor/local/dev_bundle/python/Lib/encodings/tis_620.py
|
593
|
""" Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='tis-620',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\ufffe'
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
desirable-objects/hotwire-shell
|
refs/heads/master
|
hotwire_ui/completion.py
|
2
|
# This file is part of the Hotwire Shell user interface.
#
# Copyright (C) 2007 Colin Walters <walters@verbum.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os, sys, re, logging
import gtk, gobject, pango
import hotwire_ui.widgets as hotwidgets
from hotwire.command import PipelineLanguageRegistry
from hotwire.completion import Completion, CompletionSystem, CompletionResults
from hotwire.util import markup_for_match
from hotwire_ui.pixbufcache import PixbufCache
from hotwire.state import History
from hotwire.builtin import Builtin
from hotwire.logutil import log_except
from hotwire.cmdalias import Alias
from hotwire.sysdep.fs import File, Filesystem
from hotwire.sysdep.proc import Process
_logger = logging.getLogger("hotwire.ui.Completion")
class MatchView(gtk.VBox):
def __init__(self, title, maxcount=500, keybinding=None):
super(MatchView, self).__init__()
self.__maxcount = maxcount
headerhbox = gtk.HBox()
self.__label = gtk.Label()
self.__label.set_alignment(0.0, 0.5)
headerhbox.add(self.__label)
if keybinding:
self.__keybinding_label = gtk.Label()
self.__keybinding_label.set_markup(_('Key: <tt>%s</tt>') % (keybinding,))
self.__keybinding_label.set_alignment(1.0, 0.5)
headerhbox.add(self.__keybinding_label)
self.__title = title
self.__keybinding = keybinding
self.pack_start(headerhbox, expand=False)
self.__scroll = gtk.ScrolledWindow()
# FIXME - we should really be using a combo box here
self.__scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_NEVER)
self.__model = gtk.ListStore(gobject.TYPE_PYOBJECT)
self.__view = gtk.TreeView(self.__model)
self.__selection = self.__view.get_selection()
self.__selection.set_mode(gtk.SELECTION_SINGLE)
self.__selection.connect('changed', self.__on_selection_changed)
self.__view.set_headers_visible(False)
if maxcount > 1:
self.__scroll.add(self.__view)
self.add(self.__scroll)
else:
self.add(self.__view)
colidx = self.__view.insert_column_with_data_func(-1, '',
hotwidgets.CellRendererText(),
self._render_item)
self.__none_label = gtk.Label()
self.__none_label.set_alignment(0.0, 0.5)
self.__none_label.set_no_show_all(True)
self.__none_label.set_markup('<i>%s</i>' % (_('(No matches)'),))
self.pack_start(self.__none_label, expand=False)
def get_view(self):
return self.__view
def prepare_max_size_request(self):
self.__scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_NEVER)
def finish_max_size_request(self):
self.__scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
def get_model(self):
return self.__model
def get_selection(self):
return self.__selection
def set_content(self, results, uniquify=False, reverse=True, do_select=True):
model = gtk.ListStore(gobject.TYPE_PYOBJECT)
overmax = False
uniqueresults = set()
i = 0
for completion in results:
if i >= self.__maxcount:
overmax = True
break
if uniquify and completion in uniqueresults:
continue
uniqueresults.add(completion)
i += 1
if reverse:
itr = model.prepend([completion])
else:
itr = model.append([completion])
self.__model = model
self.__view.set_model(model)
nchildren = self.__model.iter_n_children(None)
if results and do_select:
self.__selection.unselect_all()
itr = self.__model.iter_nth_child(None, nchildren-1)
self.__selection.select_iter(itr)
if results:
self.__none_label.hide()
else:
self.__none_label.show()
self.set_total(nchildren)
def set_total(self, total):
self.__label.set_markup(_(' %s - <b>%d</b> total ') % \
(gobject.markup_escape_text(self.__title),
total))
def iter_matches(self):
i = self.__model.iter_n_children(None)-1
while i >= 0:
yield self.__model[i][0]
i -= 1
def __vadjust(self, pos, full):
adjustment = self.__scroll.get_vadjustment()
if not full:
val = self.__scroll.get_vadjustment().page_increment
if not pos:
val = 0 - val;
newval = adjustment.value + val
else:
if pos:
newval = adjustment.upper
else:
newval = adjustment.lower
newval = max(min(newval, adjustment.upper-adjustment.page_size), adjustment.lower)
adjustment.value = newval
def page_up(self, pos):
self.__vadjust(False)
def page_down(self, pos):
self.__vadjust(True)
def get_total(self):
return self.__model.iter_n_children(None)
@log_except(_logger)
def __on_selection_changed(self, sel):
(model, itr) = sel.get_selected()
_logger.debug("selection changed: %r %r", model, itr)
if itr is not None:
path = model.get_path(itr)
_logger.debug("scrolling to path: %r", path)
self.__view.scroll_to_cell(path)
class MatchPopup(hotwidgets.TransientPopup):
__gsignals__ = {
"item-selected" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self, title, viewklass, entry, window, context=None, **kwargs):
super(MatchPopup, self).__init__(entry, window, **kwargs)
self.__entry = entry
self.__window = window
self.__maxcount = 10
self.__view = viewklass(title)
self.__selection = self.__view.get_selection()
self.get_box().pack_start(self.__view, expand=True)
self.__miniview = viewklass(title, maxcount=1)
self.__view.get_view().connect("row-activated", self.__on_row_activated)
self.__morelabel = gtk.Label()
self.__morelabel.set_no_show_all(True)
self.get_box().pack_start(self.__morelabel, expand=False)
self.__none_label = gtk.Label()
self.__none_label.set_alignment(0.0, 0.5)
self.__none_label.set_no_show_all(True)
self.__none_label.set_markup('<i>%s</i>' % (_('(No matches)'),))
self.get_box().pack_start(self.__none_label, expand=False)
def _get_view(self):
return self.__view
def get_miniview(self):
return self.__miniview
def set_content(self, results, **kwargs):
self.__view.set_content(results, **kwargs)
self.__miniview.set_content(results, do_select=False, **kwargs)
self.__miniview.set_total(self.__view.get_total())
if results:
self.__none_label.hide()
else:
self.__none_label.show()
def set_matchtext(self, matchtext):
self.__view.set_matchtext(matchtext)
self.__miniview.set_matchtext(matchtext)
def iter_matches(self, *args, **kwargs):
for x in self.__view.iter_matches(*args, **kwargs):
yield x
def get_total(self):
return self.__view.get_total()
def _set_size_request(self):
(ref_x, ref_y, ref_w, ref_h, bits) = self.__entry.get_parent_window().get_geometry()
_logger.debug("setting size request width to %d*0.75", ref_w)
#self.set_size_request((int(ref_w*0.75)), -1)
def get_selected_path(self):
(model, itr) = self.__selection.get_selected()
return itr and model.get_path(itr)
def select_next(self):
path = self.get_selected_path()
if not path:
return
previdx = path[-1]-1
if previdx < 0:
return
model = self.__view.get_model()
previter = model.iter_nth_child(None, previdx)
if not previter:
return
self.__selection.select_iter(previter)
def select_prev(self):
path = self.get_selected_path()
if not path:
return
model = self.__view.get_model()
seliter = model.get_iter(path)
iternext = model.iter_next(seliter)
if not iternext:
return
self.__selection.select_iter(iternext)
def page_up(self):
self.__view.page_up()
def page_down(self):
self.__view.page_down()
def emit_itemselected(self):
(model, itr) = self.__selection.get_selected()
if not itr:
self.emit('item-selected', None)
return
self.emit('item-selected', model.get_value(itr, 0))
def __on_row_activated(self, tv, path, vc):
_logger.debug("row activated: %s", path)
model = self.__view.get_model()
itr = model.get_iter(path)
self.emit('item-selected', model.get_value(itr, 0))
class MatchingHistoryView(MatchView):
def __init__(self, *args, **kwargs):
super(MatchingHistoryView, self).__init__(*args, **kwargs)
self.__matchtext = None
self.get_view().insert_column_with_data_func(0, '',
gtk.CellRendererPixbuf(),
self.__render_item_icon)
def set_matchtext(self, text):
self.__matchtext = text
self.get_model().foreach(gtk.TreeModel.row_changed)
def _render_item(self, col, cell, model, itr):
(lang, histitem) = model.get_value(itr, 0)
if self.__matchtext:
idx = histitem.find(self.__matchtext)
if idx >= 0:
markup = markup_for_match(histitem, idx, idx+len(self.__matchtext))
cell.set_property('markup', markup)
return
cell.set_property('text', histitem)
@log_except(_logger)
def __render_item_icon(self, col, cell, model, itr):
(lang, histitem) = model.get_value(itr, 0)
langs = PipelineLanguageRegistry.getInstance()
pbcache = PixbufCache.getInstance()
pixbuf = pbcache.get(langs[lang].icon, size=16, trystock=True, stocksize=gtk.ICON_SIZE_MENU)
cell.set_property('pixbuf', pixbuf)
class TabCompletionView(MatchView):
def __init__(self, *args, **kwargs):
super(TabCompletionView, self).__init__(*args, **kwargs)
self.__fs = Filesystem.getInstance()
colidx = self.get_view().insert_column_with_data_func(0, '',
gtk.CellRendererPixbuf(),
self.__render_icon)
def __get_icon_func_for_klass(self, klass):
if isinstance(klass, File):
return lambda x: x.icon
elif isinstance(klass, Builtin):
return lambda x: 'hotwire'
elif isinstance(klass, Alias):
return lambda x: 'gtk-convert'
elif isinstance(klass, Process):
return lambda x: 'gtk-execute'
else:
return None
def __render_icon(self, col, cell, model, itr):
compl = model.get_value(itr, 0)
icon_name = compl.icon
if (not icon_name) and compl.target:
ifunc = self.__get_icon_func_for_klass(compl.target)
if ifunc:
icon_name = ifunc(compl.target)
if icon_name:
if icon_name.startswith(os.sep):
pixbuf = PixbufCache.getInstance().get(icon_name)
cell.set_property('pixbuf', pixbuf)
else:
cell.set_property('icon-name', icon_name)
else:
cell.set_property('icon-name', None)
def __findobj(self, obj):
model = self.get_model()
iter = model.get_iter_first()
while iter:
val = model.get_value(itr, 0)
if val is obj:
return iter
iter = model.iter_next(itr)
def _render_item(self, col, cell, model, itr):
compl = model.get_value(itr, 0)
if compl.matchbase:
cell.set_property('text', compl.matchbase)
else:
cell.set_property('text', compl.suffix)
class CompletionStatusDisplay(hotwidgets.TransientPopup):
__gsignals__ = {
"histitem-selected" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"completion-selected" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"completions-loaded" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []),
}
def __init__(self, entry, window, context=None, tabhistory=[], **kwargs):
super(CompletionStatusDisplay, self).__init__(entry, window, **kwargs)
self.__entry = entry
self.__window = window
self.__context = context
self.__tabhistory = tabhistory
self.__token = None
self.__completer = None
self.__complsys = CompletionSystem()
self.__current_completion = None
self.__current_history = None
self.__pending_completion_load = False
self.__completion_display = MatchPopup(_('Completions (%s)') % ('TAB',),
TabCompletionView,
self.__entry, self.__window, self.__context)
self.__completion_display.connect('item-selected', self.__on_completion_selected)
self.__tab_history_display = MatchPopup(_('Tab History'),
MatchingHistoryView,
self.__entry, self.__window, self.__context)
self.__tab_history_display.connect('item-selected', self.__on_histitem_selected)
self.__global_history_display = MatchPopup(_('Global History Search (%s)') % ('Ctrl-R',),
MatchingHistoryView,
self.__entry, self.__window, self.__context)
self.__global_history_display.connect('item-selected', self.__on_histitem_selected)
self.__overview_visible = False
self.__completion_visible = False
self.__tab_history_visible = False
self.__global_history_visible = False
self.get_box().pack_start(self.__completion_display.get_miniview(), expand=True)
self.get_box().pack_start(gtk.VSeparator(), expand=False)
self.get_box().pack_start(self.__global_history_display.get_miniview(), expand=True)
def __on_histitem_selected(self, th, histitem):
self.emit('histitem-selected', histitem)
def __on_completion_selected(self, ac, compl):
self.emit('completion-selected', compl)
def invalidate(self):
self.__token = None
self.__completer = None
self.__current_completion = None
self.__pending_completion_load = False
self.hide_all()
def hide_all(self):
if self.__completion_visible:
self.__completion_display.hide()
self.__completion_visible = False
if self.__tab_history_visible:
self.__tab_history_display.hide()
self.__tab_history_visible = False
if self.__global_history_visible:
self.__global_history_display.hide()
self.__global_history_visible = False
if self.__overview_visible:
super(CompletionStatusDisplay, self).hide()
self.__overview_visible = False
def set_completion(self, completer, text, context):
if text == self.__token and completer == self.__completer:
return
_logger.debug("new completion: %s", text)
self.invalidate()
self.__token = text
self.__completer = completer
if completer:
self.__complsys.async_complete(completer, text, context.get_cwd(), self.__completions_result)
def completion_request(self):
if self.__current_completion is not None:
if not self.__completion_visible:
self.hide_all()
self.__completion_visible = True
self.__completion_display.show()
self.__completion_display.reposition()
self.__completion_display.queue_reposition()
return self.__current_completion
if self.__completer:
self.hide_all()
self.__pending_completion_load = True
return True
return None
def show(self):
self.__overview_visible = True
super(CompletionStatusDisplay, self).show()
self.reposition()
self.queue_reposition()
def hide(self):
self.__overview_visible = False
super(CompletionStatusDisplay, self).hide()
def __completions_result(self, completer, text, results):
if not (text == self.__token and completer == self.__completer):
_logger.debug("stale completion result")
return
self.__current_completion = results
self.__completion_display.set_content(self.__current_completion.results)
if self.__pending_completion_load:
self.__current_completion = results
self.emit('completions-loaded')
self.__pending_completion_load = False
else:
if self.__current_completion.results or self.__current_history:
self.show()
self.queue_reposition()
def _set_size_request(self):
(ref_x, ref_y, ref_w, ref_h, bits) = self.__entry.get_parent_window().get_geometry()
_logger.debug("setting size request width to %d*0.75", ref_w)
self.set_size_request((int(ref_w*0.75)), -1)
def set_history_search(self, lang_uuid, histsearch):
histitems = map(lambda result: (lang_uuid,result), self.__context.history.search_commands(lang_uuid, histsearch))
self.__current_history = not not histitems
self.__global_history_display.set_content(histitems, uniquify=True)
self.__global_history_display.set_matchtext(histsearch)
def popup_tab_history(self):
if self.__tab_history_visible:
return
_logger.debug("doing tab history popup")
self.hide()
self.__tab_history_display.set_content(self.__tabhistory, uniquify=False)
self.__tab_history_display.reposition()
self.__tab_history_display.queue_reposition()
self.__tab_history_visible = True
self.__tab_history_display.show()
def popup_global_history(self):
if self.__global_history_visible:
return
self.hide()
self.__global_history_display.reposition()
self.__global_history_display.queue_reposition()
self.__global_history_visible = True
self.__global_history_display.show()
def get_state(self):
if self.__tab_history_visible:
return 'tabhistory'
elif self.__global_history_visible:
return 'globalhistory'
elif self.__completion_visible:
return 'completions'
return None
def select_next(self):
if self.__tab_history_visible:
self.__tab_history_display.select_next()
return True
elif self.__global_history_visible:
self.__global_history_display.select_next()
return True
elif self.__completion_visible:
self.__completion_display.select_next()
return True
return False
def select_prev(self):
if self.__tab_history_visible:
self.__tab_history_display.select_prev()
return True
elif self.__global_history_visible:
self.__global_history_display.select_prev()
return True
elif self.__completion_visible:
self.__completion_display.select_prev()
return True
return False
def page_up(self):
if self.__tab_history_visible:
self.__tab_history_display.page_up()
return True
elif self.__global_history_visible:
self.__global_history_display.page_up()
return True
elif self.__completion_visible:
self.__completion_display.page_up()
return True
return False
def page_down(self):
if self.__tab_history_visible:
self.__tab_history_display.page_down()
return True
elif self.__global_history_visible:
self.__global_history_display.page_down()
return True
elif self.__completion_visible:
self.__completion_display.page_down()
return True
return False
def activate_selected(self):
if self.__tab_history_visible:
self.__tab_history_display.emit_itemselected()
return True
elif self.__global_history_visible:
self.__global_history_display.emit_itemselected()
return True
elif self.__completion_visible:
self.__completion_display.emit_itemselected()
return True
return False
|
Teagan42/home-assistant
|
refs/heads/dev
|
tests/components/zha/test_lock.py
|
4
|
"""Test zha lock."""
from unittest.mock import patch
import zigpy.zcl.clusters.closures as closures
import zigpy.zcl.clusters.general as general
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.lock import DOMAIN
from homeassistant.const import STATE_LOCKED, STATE_UNAVAILABLE, STATE_UNLOCKED
from .common import (
async_enable_traffic,
async_init_zigpy_device,
find_entity_id,
make_attribute,
make_zcl_header,
)
from tests.common import mock_coro
LOCK_DOOR = 0
UNLOCK_DOOR = 1
async def test_lock(hass, config_entry, zha_gateway):
"""Test zha lock platform."""
# create zigpy device
zigpy_device = await async_init_zigpy_device(
hass,
[closures.DoorLock.cluster_id, general.Basic.cluster_id],
[],
None,
zha_gateway,
)
# load up lock domain
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
cluster = zigpy_device.endpoints.get(1).door_lock
zha_device = zha_gateway.get_device(zigpy_device.ieee)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
# test that the lock was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, zha_gateway, [zha_device])
# test that the state has changed from unavailable to unlocked
assert hass.states.get(entity_id).state == STATE_UNLOCKED
# set state to locked
attr = make_attribute(0, 1)
hdr = make_zcl_header(zcl_f.Command.Report_Attributes)
cluster.handle_message(hdr, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_LOCKED
# set state to unlocked
attr.value.value = 2
cluster.handle_message(hdr, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNLOCKED
# lock from HA
await async_lock(hass, cluster, entity_id)
# unlock from HA
await async_unlock(hass, cluster, entity_id)
async def async_lock(hass, cluster, entity_id):
"""Test lock functionality from hass."""
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([zcl_f.Status.SUCCESS])
):
# lock via UI
await hass.services.async_call(
DOMAIN, "lock", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.call_args[0][0] is False
assert cluster.request.call_args[0][1] == LOCK_DOOR
async def async_unlock(hass, cluster, entity_id):
"""Test lock functionality from hass."""
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([zcl_f.Status.SUCCESS])
):
# lock via UI
await hass.services.async_call(
DOMAIN, "unlock", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.call_args[0][0] is False
assert cluster.request.call_args[0][1] == UNLOCK_DOOR
|
ForgottenKahz/CloudOPC
|
refs/heads/master
|
venv/Lib/site-packages/werkzeug/posixemulation.py
|
148
|
# -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
from ._compat import to_unicode
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
src = to_unicode(src, sys.getfilesystemencoding())
dst = to_unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
|
ehudmagal/robotqcapp
|
refs/heads/master
|
dojango/decorators.py
|
6
|
from django.http import HttpResponseNotAllowed, HttpResponseServerError
from django.utils import simplejson as json
from util import to_json_response
from util import to_dojo_data
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
def expect_post_request(func):
"""Allow only POST requests to come in, throw an exception otherwise.
This relieves from checking every time that the request is
really a POST request, which it should be when using this
decorator.
"""
def _ret(*args, **kwargs):
ret = func(*args, **kwargs)
request = args[0]
if not request.method=='POST':
return HttpResponseNotAllowed(['POST'])
return ret
return _ret
def add_request_getdict(func):
"""Add the method getdict() to the request object.
This works just like getlist() only that it decodes any nested
JSON encoded object structure.
Since sending deep nested structures is not possible via
GET/POST by default, this enables it. Of course you need to
make sure that on the JavaScript side you are also sending
the data properly, which dojango.send() automatically does.
Example:
this is being sent:
one:1
two:{"three":3, "four":4}
using
request.POST.getdict('two')
returns a dict containing the values sent by the JavaScript.
"""
def _ret(*args, **kwargs):
args[0].POST.__class__.getdict = __getdict
ret = func(*args, **kwargs)
return ret
return _ret
def __getdict(self, key):
ret = self.get(key)
try:
ret = json.loads(ret)
except ValueError: # The value was not JSON encoded :-)
raise Exception('"%s" was not JSON encoded as expected (%s).' % (key, str(ret)))
return ret
def json_response(func):
"""
A simple json response decorator. Use it on views, where a python data object should be converted
to a json response:
@json_response
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret)
return wraps(func)(inner)
def jsonp_response_custom(callback_param_name):
"""
A jsonp (JSON with Padding) response decorator, where you can define your own callbackParamName.
It acts like the json_response decorator but with the difference, that it
wraps the returned json string into a client-specified function name (that is the Padding).
You can add this decorator to a function like that:
@jsonp_response_custom("my_callback_param")
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
Your now can access this view from a foreign URL using JSONP.
An example with Dojo looks like that:
dojo.io.script.get({ url:"http://example.com/my_url/",
callbackParamName:"my_callback_param",
load: function(response){
console.log(response);
}
});
Note: the callback_param_name in the decorator and in your JavaScript JSONP call must be the same.
"""
def decorator(func):
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, callback_param_name=callback_param_name)
return wraps(func)(inner)
return decorator
jsonp_response = jsonp_response_custom("jsonp_callback")
jsonp_response.__doc__ = "A predefined jsonp response decorator using 'jsoncallback' as a fixed callback_param_name."
def json_iframe_response(func):
"""
A simple json response decorator but wrapping the json response into a html page.
It helps when doing a json request using an iframe (e.g. file up-/download):
@json_iframe
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, use_iframe=True)
return wraps(func)(inner)
def __prepare_json_ret(request, ret, callback_param_name=None, use_iframe=False):
if ret==False:
ret = {'success':False}
elif ret==None: # Sometimes there is no return.
ret = {}
# Add the 'ret'=True, since it was obviously no set yet and we got valid data, no exception.
func_name = None
if callback_param_name:
func_name = request.GET.get(callback_param_name, "callbackParamName")
try:
if not ret.has_key('success'):
ret['success'] = True
except AttributeError, e:
raise Exception("The returned data of your function must be a dictionary!")
json_ret = ""
try:
# Sometimes the serialization fails, i.e. when there are too deeply nested objects or even classes inside
json_ret = to_json_response(ret, func_name, use_iframe)
except Exception, e:
print '\n\n===============Exception=============\n\n'+str(e)+'\n\n'
print ret
print '\n\n'
return HttpResponseServerError(content=str(e))
return json_ret
|
xrg/django-static-gitified
|
refs/heads/master
|
django/contrib/staticfiles/storage.py
|
66
|
from __future__ import with_statement
import hashlib
import os
import posixpath
import re
from urllib import unquote
from urlparse import urlsplit, urlunsplit, urldefrag
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode, smart_str
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
r"""(@import\s*["']\s*(.*?)["'])""",
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
compiled = re.compile(pattern)
self._patterns.setdefault(extension, []).append(compiled)
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
# Get the MD5 hash of the file
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
md5sum = md5.hexdigest()[:12]
hashed_name = os.path.join(path, u"%s.%s%s" %
(root, md5sum, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def cache_key(self, name):
return u'staticfiles:%s' % hashlib.md5(smart_str(name)).hexdigest()
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(clean_name).replace('\\', '/')
# set the cache if there was a miss
# (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
final_url = super(CachedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name):
"""
Returns the custom URL converter for the given file name.
"""
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return 'url("%s")' % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read()
converter = self.url_converter(name)
for patterns in self._patterns.values():
for pattern in patterns:
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(smart_str(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_unicode(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
asanfilippo7/osf.io
|
refs/heads/develop
|
scripts/github/migrate_to_external_accounts.py
|
18
|
import logging
import os
import sys
import urlparse
from modularodm import Q
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from website import settings
from website.app import init_app
from website.models import User, Node
from website.oauth.models import ExternalAccount
from website.addons.github.api import GitHubClient
from website.addons.github import settings as github_settings
from website.addons.github.utils import make_hook_secret
from website.addons.github.exceptions import GitHubError, ApiError
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
PROVIDER = 'github'
PROVIDER_NAME = 'GitHub'
HOOK_DOMAIN = github_settings.HOOK_DOMAIN or settings.DOMAIN
# set of {ExternalAccount._id: (user_settings, oauth_settings)} mappings
# with invalid credentials, for logging purposes
invalid_oauth_creds = {}
# set of (node_settings[_'id'], external_account._id) tuples without a
# hook_secret, whether they were repairable or not. for logging purposes
settings_need_repair = []
def verify_user_and_oauth_settings_documents(user_document, oauth_document):
try:
assert('_id' in user_document)
assert('oauth_settings' in user_document)
assert('deleted' in user_document)
assert('owner' in user_document)
assert('_id' in oauth_document)
assert('github_user_id' in oauth_document)
assert('github_user_name' in oauth_document)
assert('oauth_access_token' in oauth_document)
assert(user_document.get('owner', None))
assert(user_document['oauth_settings'] == oauth_document['github_user_id'])
except AssertionError:
return False
else:
return True
def verify_node_settings_document(document, account):
try:
assert('_id' in document)
assert('deleted' in document)
assert('repo' in document)
assert('user' in document)
assert('registration_data' in document)
assert('owner' in document)
assert(document.get('owner', None))
assert('user_settings' in document)
except AssertionError:
return False
try:
assert('hook_id' in document)
assert('hook_secret' in document)
except AssertionError:
settings_need_repair.append((document['_id'], account._id))
logger.info(
'Making GH API request attempting to repair node settings<_id: {}> with ExternalAccount<_id: {}>'.format(document['_id'], account._id)
)
add_hook_to_old_node_settings(document, account)
return True
def add_hook_to_old_node_settings(document, account):
connect = GitHubClient(external_account=account)
secret = make_hook_secret()
hook = None
try:
hook = connect.add_hook(
document['user'], document['repo'],
'web',
{
'url': urlparse.urljoin(
HOOK_DOMAIN,
os.path.join(
Node.load(document['owner']).api_url, 'github', 'hook/'
)
),
'content_type': github_settings.HOOK_CONTENT_TYPE,
'secret': secret,
},
events=github_settings.HOOK_EVENTS,
)
except ApiError:
pass
if hook:
database['addongithubnodesettings'].find_and_modify(
{'_id': document['_id']},
{
'$set': {
'hook_id': hook.id,
'hook_secret': secret
}
}
)
def migrate_to_external_account(user_settings_document, oauth_settings_document):
if not oauth_settings_document.get('oauth_access_token'):
return (None, None, None)
try:
user_info = GitHubClient(access_token=oauth_settings_document['oauth_access_token']).user()
except (GitHubError, ApiError):
user_id = oauth_settings_document['github_user_id']
profile_url = None
display_name = oauth_settings_document['github_user_name']
else:
user_id = user_info.id
profile_url = user_info.html_url
display_name = user_info.login
new = False
user = User.load(user_settings_document['owner'])
try:
external_account = ExternalAccount.find(Q('provider_id', 'eq', user_id))[0]
logger.info('Duplicate account use found: User {0} with github_user_id {1}'.format(user.username, user_id))
except IndexError:
new = True
external_account = ExternalAccount(
provider=PROVIDER,
provider_name=PROVIDER_NAME,
provider_id=user_id,
profile_url=profile_url,
oauth_key=oauth_settings_document['oauth_access_token'],
display_name=display_name,
)
external_account.save()
if not profile_url:
invalid_oauth_creds[external_account._id] = (user_settings_document['_id'], oauth_settings_document['_id'])
logger.info("Created ExternalAccount<_id:{0}> with invalid oauth credentials.".format(
external_account._id
))
user.external_accounts.append(external_account)
user.save()
return external_account, user, new
def make_new_user_settings(user):
# kill backrefs to old models
database['user'].find_and_modify(
{'_id': user._id},
{
'$unset': {
'__backrefs.addons.addongithubusersettings': ''
}
}
)
user.reload()
return user.get_or_add_addon('github', override=True)
def make_new_node_settings(node, node_settings_document, external_account=None, user_settings_instance=None):
# kill backrefs to old models
database['node'].find_and_modify(
{'_id': node._id},
{
'$unset': {
'__backrefs.addons.addongithubnodesettings': ''
}
}
)
node.reload()
node_settings_instance = node.get_or_add_addon('github', auth=None, override=True, log=False)
node_settings_instance.repo = node_settings_document['repo']
node_settings_instance.user = node_settings_document['user']
node_settings_instance.hook_id = node_settings_document.get('hook_id', None)
node_settings_instance.hook_secret = node_settings_document.get('hook_secret', None)
node_settings_instance.registration_data = node_settings_document['registration_data']
node_settings_instance.save()
if external_account and user_settings_instance:
node_settings_instance.set_auth(
external_account,
user_settings_instance.owner,
log=False
)
return node_settings_instance
def migrate(dry_run=True):
user_settings_list = list(database['addongithubusersettings'].find())
# get in-memory versions of collections and collection sizes
old_user_settings_collection = database['addongithubusersettings']
old_user_settings_count = old_user_settings_collection.count()
old_node_settings_collection = database['addongithubnodesettings']
old_node_settings_count = old_node_settings_collection.count()
old_oauth_settings_collection = database['addongithuboauthsettings']
old_oauth_settings_count = old_oauth_settings_collection.count()
# Lists of IDs for logging purposes
external_accounts_created = []
migrated_user_settings = []
migrated_node_settings = []
user_no_oauth_settings = []
deleted_user_settings = []
broken_user_or_oauth_settings = []
no_oauth_creds = []
inactive_user_or_no_owner = []
unverifiable_node_settings = []
deleted_node_settings = []
nodeless_node_settings = []
for user_settings_document in user_settings_list:
oauth_settings_document = None
try:
if user_settings_document.get('oauth_settings', None):
oauth_settings_document = old_oauth_settings_collection.find_one({'github_user_id': user_settings_document['oauth_settings']})
except KeyError:
pass
if not oauth_settings_document:
logger.info(
"Found addongithubusersettings document (id:{0}) with no associated oauth_settings. It will not be migrated.".format(user_settings_document['_id'])
)
user_no_oauth_settings.append(user_settings_document['_id'])
continue
if user_settings_document['deleted']:
logger.info(
"Found addongithubusersettings document (id:{0}) that is marked as deleted.".format(user_settings_document['_id'])
)
deleted_user_settings.append(user_settings_document['_id'])
continue
if not verify_user_and_oauth_settings_documents(user_settings_document, oauth_settings_document):
logger.info(
"Found broken addongithubusersettings document (id:{0}) that could not be fixed.".format(user_settings_document['_id'])
)
broken_user_or_oauth_settings.append((user_settings_document['_id'], oauth_settings_document['_id']))
continue
external_account, user, new = migrate_to_external_account(user_settings_document, oauth_settings_document)
if not external_account:
logger.info("AddonGitHubUserSettings<_id:{0}> has no oauth credentials and will not be migrated.".format(
user_settings_document['_id']
))
no_oauth_creds.append(user_settings_document['_id'])
continue
else:
if new:
external_accounts_created.append(external_account._id)
linked_node_settings_documents = old_node_settings_collection.find({
'user_settings': user_settings_document['_id']
})
if not user or not user.is_active:
if linked_node_settings_documents.count() and not user.is_merged:
logger.warn("AddonGitHubUserSettings<_id:{0}> has no owner, but is used by AddonGitHubNodeSettings: {1}.".format(
user_settings_document['_id'],
', '.join([each['_id'] for each in linked_node_settings_documents])
))
raise RuntimeError("This should never happen.")
else:
logger.info("AddonGitHubUserSettings<_id:{0}> either has no owner or the owner's account is not active, and will not be migrated.".format(
user_settings_document['_id']
))
inactive_user_or_no_owner.append(user_settings_document['_id'])
continue
else:
user_settings_instance = make_new_user_settings(user)
for node_settings_document in linked_node_settings_documents:
if not verify_node_settings_document(node_settings_document, external_account):
logger.info(
"Found addongithubnodesettings document (id:{0}) that could not be verified. It will not be migrated.".format(
node_settings_document['_id'],
)
)
unverifiable_node_settings.append((node_settings_document['_id'], external_account._id))
continue
if node_settings_document['deleted']:
logger.info(
"Found addongithubnodesettings document (id:{0}) that is marked as deleted.".format(
node_settings_document['_id'],
)
)
deleted_node_settings.append(node_settings_document['_id'])
continue
node = Node.load(node_settings_document['owner'])
if not node:
logger.info("AddonGitHubNodeSettings<_id:{0}> has no associated Node, and will not be migrated.".format(
node_settings_document['_id']
))
nodeless_node_settings.append(node_settings_document['_id'])
continue
else:
node_settings_document = database['addongithubnodesettings'].find_one({'_id': node_settings_document['_id']})
make_new_node_settings(
node,
node_settings_document,
external_account,
user_settings_instance
)
migrated_node_settings.append(node_settings_document['_id'])
migrated_user_settings.append(user_settings_document['_id'])
logger.info(
"Created {0} new external accounts from {1} old oauth settings documents:\n{2}".format(
len(external_accounts_created), old_oauth_settings_count, [e for e in external_accounts_created]
)
)
logger.info(
"Successfully migrated {0} user settings from {1} old user settings documents:\n{2}".format(
len(migrated_user_settings), old_user_settings_count, [e for e in migrated_user_settings]
)
)
logger.info(
"Successfully migrated {0} node settings from {1} old node settings documents:\n{2}".format(
len(migrated_node_settings), old_node_settings_count, [e for e in migrated_node_settings]
)
)
if user_no_oauth_settings:
logger.warn(
"Skipped {0} user settings due to a lack of associated oauth settings:\n{1}".format(
len(user_no_oauth_settings), [e for e in user_no_oauth_settings]
)
)
if deleted_user_settings:
logger.warn(
"Skipped {0} deleted user settings: {1}".format(
len(deleted_user_settings), [e for e in deleted_user_settings]
)
)
if broken_user_or_oauth_settings:
logger.warn(
"Skipped {0} (user, oauth) settings tuples because they could not be verified:\n{1}".format(
len(broken_user_or_oauth_settings), ['({}, {})'.format(e, f) for e, f in broken_user_or_oauth_settings]
)
)
if invalid_oauth_creds:
logger.warn(
"Created {0} invalid ExternalAccounts from (user, oauth) settings tuples due to invalid oauth credentials:\n{1}".format(
len(invalid_oauth_creds), ['{}: ({}, {})'.format(e, invalid_oauth_creds[e][0], invalid_oauth_creds[e][1]) for e in invalid_oauth_creds.keys()]
)
)
if inactive_user_or_no_owner:
logger.warn(
"Skipped {0} user settings due to an inactive or null owner:\n{1}".format(
len(inactive_user_or_no_owner), [e for e in inactive_user_or_no_owner]
)
)
if no_oauth_creds:
logger.warn(
"Skipped {0} user settings due a lack of oauth credentials:\n{1}".format(
len(no_oauth_creds), [e for e in no_oauth_creds]
)
)
if settings_need_repair:
logger.warn(
"Made GH API calls for {0} node settings documents with external accounts because they needed to be repaired:\n{1}".format(
len(settings_need_repair), ['({}, {})'.format(e, f) for e, f in settings_need_repair]
)
)
if unverifiable_node_settings:
logger.warn(
"Skipped {0} (node settings, external_account) tuples because they could not be verified or repaired:\n{1}".format(
len(unverifiable_node_settings), ['({}, {})'.format(e, f) for e, f in unverifiable_node_settings]
)
)
if deleted_node_settings:
logger.warn(
"Skipped {0} deleted node settings:\n{1}".format(
len(deleted_node_settings), [e for e in deleted_node_settings]
)
)
if nodeless_node_settings:
logger.warn(
"Skipped {0} node settings without an associated node:\n{1}".format(
len(nodeless_node_settings), [e for e in nodeless_node_settings]
)
)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
def main():
dry_run = False
remove_old = True
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate(dry_run=dry_run)
if __name__ == "__main__":
main()
|
OptiPop/external_chromium_org
|
refs/heads/opti-5.1
|
chrome/browser/web_dev_style/html_checker.py
|
36
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Presubmit for Chromium HTML resources. See chrome/browser/PRESUBMIT.py.
"""
import regex_check
class HtmlChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def LabelCheck(self, line_number, line):
return regex_check.RegexCheck(self.input_api.re, line_number, line,
"(for=)",
"Avoid 'for' attribute on <label>. Place the input within the <label>, "
"or use aria-labelledby for <select>.")
def RunChecks(self):
"""Check for violations of the Chromium web development style guide. See
http://chromium.org/developers/web-development-style-guide
"""
results = []
affected_files = self.input_api.change.AffectedFiles(
file_filter=self.file_filter, include_deletes=False)
for f in affected_files:
errors = []
for line_number, line in f.ChangedContents():
error = self.LabelCheck(line_number, line)
if error:
errors.append(error)
if errors:
abs_local_path = f.AbsoluteLocalPath()
file_indicator = 'Found HTML style issues in %s' % abs_local_path
prompt_msg = file_indicator + '\n\n' + '\n'.join(errors) + '\n'
results.append(self.output_api.PresubmitPromptWarning(prompt_msg))
return results
|
beni55/olympia
|
refs/heads/master
|
apps/pages/tests.py
|
14
|
from django.conf import settings
from nose.tools import eq_
import amo
import amo.tests
from amo.urlresolvers import reverse
class TestPages(amo.tests.TestCase):
def _check(self, url, status):
resp = self.client.get(reverse(url))
eq_(resp.status_code, status)
def test_status(self):
pages = ['pages.about', 'pages.credits', 'pages.faq',
'pages.acr_firstrun', 'pages.dev_faq', 'pages.review_guide',
'pages.sunbird']
for page in pages:
self._check(page, 200)
class TestRedirects(amo.tests.TestCase):
def _check(self, pages):
for old, new in pages.iteritems():
if new.startswith('http'):
r = self.client.get(old)
eq_(r['Location'], new)
else:
r = self.client.get(old, follow=True)
self.assertRedirects(r, new, 301)
def test_app_pages(self):
self._check({
'/en-US/firefox/pages/compatibility_firstrun':
reverse('pages.acr_firstrun'),
'/en-US/firefox/pages/validation': settings.VALIDATION_FAQ_URL,
})
def test_nonapp_pages(self):
self._check({
'/en-US/pages/developer_faq': reverse('pages.dev_faq'),
'/en-US/pages/review_guide': reverse('pages.review_guide'),
'/en-US/pages/developer_agreement': reverse(
'devhub.docs', args=['policies/agreement']),
})
|
LUTAN/tensorflow
|
refs/heads/master
|
tensorflow/python/training/server_lib_multiple_containers_test.py
|
133
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class MultipleContainersTest(test.TestCase):
# Verifies behavior of tf.Session.reset() with multiple containers using
# tf.container.
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testMultipleContainers(self):
with ops.container("test0"):
v0 = variables.Variable(1.0, name="v0")
with ops.container("test1"):
v1 = variables.Variable(2.0, name="v0")
server = server_lib.Server.create_local_server()
sess = session.Session(server.target)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# Resets container. Session aborts.
session.Session.reset(server.target, ["test0"])
with self.assertRaises(errors_impl.AbortedError):
sess.run(v1)
# Connects to the same target. Device memory for the v0 would have
# been released, so it will be uninitialized. But v1 should still
# be valid.
sess = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(v0)
self.assertAllEqual(2.0, sess.run(v1))
if __name__ == "__main__":
test.main()
|
ChameleonCloud/horizon
|
refs/heads/chameleoncloud/train
|
openstack_dashboard/test/selenium/selenium_tests.py
|
7
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon.test import helpers as test
class BrowserTests(test.SeleniumTestCase):
def test_splash(self):
self.selenium.get(self.live_server_url)
button = self.selenium.find_element_by_id("loginBtn")
# Ensure button has something; must be language independent.
self.assertGreater(len(button.text), 0)
|
CrazyGamerGR/CrazySuperKernel-CM14.1-G5
|
refs/heads/master
|
tools/perf/util/setup.py
|
989
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
mmagnus/rna-pdb-tools
|
refs/heads/master
|
rna_tools/SecondaryStructure.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Secondary structure analysis"""
import os
import tempfile
import shutil
import subprocess
from rna_tools.rna_tools_config import VARNA_JAR_NAME, VARNA_PATH
class ExceptionOpenPairsProblem(Exception):
pass
def draw_ss(title, seq, ss, img_out, resolution=4, verbose=False):
"""Draw Secondary Structure using VARNA (you need correct configuration for this).
If everything is OK, return None, if an error (=exception) return stderr.
Usage::
>>> seq = 'GGAAACC'
>>> ss = '((...))'
>>> img_out = 'output/demo.png'
>>> draw_ss('rna', seq, ss, img_out)
>>> print('Made %s' % img_out)
Made output/demo.png
.. image:: ../../rna_tools/output/demo.png
:scale: 25 %
Can be used with http://geekbook.readthedocs.io/en/latest/rna.html"""
curr = os.getcwd()
os.chdir(VARNA_PATH) # VARNAv3-93-src')
if verbose:
print(VARNA_PATH)
t = tempfile.NamedTemporaryFile(delete=False)
t.name += '.png'
cmd = 'java -cp ' + VARNA_JAR_NAME + ' fr.orsay.lri.varna.applications.VARNAcmd -sequenceDBN ' + seq + \
" -structureDBN '" + ss + "' -o " + t.name + " -title '" + \
title + "' -resolution '" + str(resolution) + "'"
if verbose:
print(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
out = p.stderr.read().decode().strip()
os.chdir(curr)
if out.find('Exception') > -1:
return out
else:
if verbose:
print(t.name)
shutil.move(t.name, img_out)
def parse_vienna_to_pairs(ss, remove_gaps_in_ss=False):
"""Parse Vienna (dot-bracket notation) to get pairs.
Args:
ss (str): secondary stucture in Vienna (dot-bracket notation) notation
remove_gaps_in_ss (bool): remove - from ss or not, design for DCA (tpp case
``ss = "(((((((((.((((.(((.....))))))......------)....."``
works with pk of the first level, ``[[]]``
Returns:
list of two lists: (pairs, pairs_pk)
Examples::
>>> parse_vienna_to_pairs('((..))')
([[1, 6], [2, 5]], [])
>>> parse_vienna_to_pairs('(([[))]]')
([[1, 6], [2, 5]], [[3, 8], [4, 7]])
>>> parse_vienna_to_pairs('((--))')
([[1, 6], [2, 5]], [])
>>> parse_vienna_to_pairs('((--))', remove_gaps_in_ss=True)
([[1, 4], [2, 3]], [])
>>> parse_vienna_to_pairs('((((......')
Traceback (most recent call last):
File "/usr/lib/python2.7/doctest.py", line 1315, in __run
compileflags, 1) in test.globs
File "<doctest __main__.parse_vienna_to_pairs[4]>", line 1, in <module>
parse_vienna_to_pairs('((((......')
File "./SecondaryStructure.py", line 106, in parse_vienna_to_pairs
raise ExceptionOpenPairsProblem('Too many open pairs (()) in structure')
ExceptionOpenPairsProblem: Too many open pairs (()) in structure
"""
if remove_gaps_in_ss:
ss = ss.replace('-', '')
stack = []
pairs = []
pairs_pk = []
stack_pk = []
for c, s in enumerate(ss):
if s == '(':
stack.append(c + 1)
if s == ')':
pairs.append([stack.pop(), c + 1])
if s == '[':
stack_pk.append(c + 1)
if s == ']':
pairs_pk.append([stack_pk.pop(), c + 1])
if stack:
raise ExceptionOpenPairsProblem('Too many open pairs (()) in structure')
if stack_pk:
raise ExceptionOpenPairsProblem('Too many open pairs [[]] in structure')
pairs.sort()
pairs_pk.sort()
return(pairs, pairs_pk)
# main
if __name__ == '__main__':
import doctest
doctest.testmod()
|
lordzuko/DeepEduVision
|
refs/heads/master
|
classroom_analyst/urls.py
|
1
|
"""DeepEduVision URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from classroom_analyst import views
urlpatterns = [
url(r'^mood_analysis/$', views.class_mood, name='class_mood'),
url(r'profiles/(?P<user_id>[0-9]+?)/$', views.profile, name='profile'),
url(r'profiles/(?P<user_id>[0-9]+?)/edit/$', views.edit_profile, name='edit-profile'),
]
|
lento/cortex
|
refs/heads/master
|
test/IECore/All.py
|
2
|
##########################################################################
#
# Copyright (c) 2007-2014, Image Engine Design Inc. All rights reserved.
#
# Copyright (c) 2010, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import warnings
import sys
import IECore
warnings.simplefilter( "error", DeprecationWarning )
from ClassLoader import *
from BlindDataHolder import *
from CompoundData import *
from CompoundObject import *
from Imath import *
from ImathVectorData import *
from IndexedIO import *
from KDTree import *
from BoundedKDTree import *
from MessageHandler import *
from ObjectIO import *
from Object import *
from ObjectReader import *
from ObjectWriter import *
from ParameterParser import *
from Parameterised import *
from Parameters import *
from PDCReader import *
from PDCWriter import *
from SimpleTypedData import *
from TypedDataAsObject import *
from VectorData import *
from FileSequence import *
from EXRImageReader import *
from EXRImageWriter import *
from PointsPrimitive import *
from ImagePrimitive import *
from PerlinNoise import *
from Turbulence import *
from MeshPrimitive import *
from Shader import *
from SearchPath import *
from CachedReader import *
from Reader import *
from RunTimeTyped import *
from Op import *
from MemoryUsage import *
from FileSequenceParameter import *
from WrapperToPython import *
from RemovePrimitiveVariables import *
from RenamePrimitiveVariables import *
from WrapperGarbageCollection import *
from FormattedParameterHelp import *
from MotionPrimitive import *
from Transform import *
from Group import *
from NamespacePollution import *
from OptionalCompoundParameter import *
from ObjectInterpolation import *
from TransformationMatrixData import *
from ReversedFrameList import *
from BinaryFrameList import *
from PointsExpressionOp import *
from FrameList import *
from FrameListParameter import *
from Struct import *
from Enum import *
from HeaderGenerator import *
from Camera import *
from NURBS import *
from Curry import *
from Menus import *
from DataCastOp import *
from DataPromoteOp import *
from MatrixMultiplyOp import *
from PointBoundsOp import *
from PrimitiveEvaluator import *
from MeshPrimitiveEvaluator import *
from InternedStringTest import InternedStringTest
from Writer import *
from TriangulateOp import *
from SpherePrimitiveEvaluator import *
from SearchReplaceOp import *
from CINImageReader import *
from CINImageWriter import *
from DPXImageReader import *
from DPXImageWriter import *
from InverseDistanceWeightedInterpolation import *
from ImageCropOp import *
from MeshPrimitiveShrinkWrapOp import *
from ImagePrimitiveEvaluator import *
from CapturingMessageHandler import *
from Math import *
from FileSequenceVectorParameter import *
from TriangleAlgoTest import *
from ColorTransformOpTest import *
from TransformOpTest import *
from LineSegmentTest import *
from CubicBasisTest import *
from CurvesPrimitiveTest import *
from ImageDiffOp import *
from TriangulatorTest import *
from BezierAlgoTest import *
from MeshNormalsOpTest import *
from PrimitiveTest import *
from MeshMergeOpTest import *
from UnicodeToStringTest import *
from RadixSortTest import *
from ImathRootsTest import *
from AngleConversionTest import *
from LuminanceOpTest import *
from SummedAreaOpTest import *
from GradeTest import *
from MedianCutSamplerTest import *
from EnvMapSamplerTest import *
from RandomTest import *
from MeshVertexReorderOpTest import *
from SplineTest import *
from SplineDataTest import *
from TypeIdTest import *
from LayeredDictTest import *
from SplineParameterTest import *
from AttributeStateTest import *
from CoordinateSystemTest import *
from SplineToImageTest import *
from DisplayTest import *
from MeshTangentsOpTest import *
from CubeColorLookupTest import *
from CubeColorLookupDataTest import *
from CubeColorTransformOpTest import *
from CompoundVectorParameterTest import *
from UVDistortOpTest import *
from ObjectVectorTest import *
from ImagePremultiplyOpTest import *
from ImageUnpremultiplyOpTest import *
from ImageCompositeOpTest import *
from ImageSequenceCompositeOpTest import *
from YUVImageWriter import *
from OversamplesCalculatorTest import *
from DateTimeDataTest import *
from DateTimeParameterTest import *
from SequenceLsOpTest import *
from SGIImageReaderTest import *
from TimeDurationDataTest import *
from TimePeriodDataTest import *
from PatchMeshPrimitiveTest import *
from CurveExtrudeOp import *
from ParameterisedProceduralTest import *
from LevenbergMarquardtTest import *
from TypedDataTest import *
from DataTraitsTest import *
from ColorSpaceTransformOpTest import *
from TGAImageReaderTest import *
from TGAImageWriterTest import *
from NParticleReader import *
from OBJReaderTest import TestOBJReader
from FaceAreaOpTest import FaceAreaOpTest
from CurvesMergeOpTest import CurvesMergeOpTest
from CurvesPrimitiveEvaluatorTest import CurvesPrimitiveEvaluatorTest
from SubstitutedDictTest import SubstitutedDictTest
from PointDistributionTest import PointDistributionTest
from CurveTracerTest import CurveTracerTest
from ImageThinnerTest import ImageThinnerTest
from CurveLineariserTest import CurveLineariserTest
from IDXReaderTest import IDXReaderTest
from ThreadingTest import ThreadingTest
from StringUtilTest import *
from ClassParameterTest import ClassParameterTest
from ClassVectorParameterTest import ClassVectorParameterTest
from CurveTangentsOpTest import CurveTangentsOpTest
from SmoothSkinningDataTest import *
from IgnoredExceptionsTest import IgnoredExceptionsTest
from PrimitiveVariableTest import PrimitiveVariableTest
from FaceVaryingPromotionOpTest import FaceVaryingPromotionOpTest
from MeshDistortionsOpTest import TestMeshDistortionsOp
from PointVelocityDisplaceOp import *
from HexConversionTest import HexConversionTest
from CompressAndDecompressSmoothSkinningDataOpsTest import CompressAndDecompressSmoothSkinningDataOpsTest
from BasicPreset import TestBasicPreset
from RelativePreset import TestRelativePreset
from ReorderSmoothSkinningInfluencesOpTest import ReorderSmoothSkinningInfluencesOpTest
from NormalizeSmoothSkinningWeightsOpTest import NormalizeSmoothSkinningWeightsOpTest
from LimitSmoothSkinningInfluencesOpTest import LimitSmoothSkinningInfluencesOpTest
from MixSmoothSkinningWeightsOpTest import MixSmoothSkinningWeightsOpTest
from SmoothSmoothSkinningWeightsOpTest import SmoothSmoothSkinningWeightsOpTest
from PointSmoothSkinningOpTest import PointSmoothSkinningOpTest
from AddAndRemoveSmoothSkinningInfluencesOpTest import AddAndRemoveSmoothSkinningInfluencesOpTest
from LookupTest import LookupTest
from ParameterAlgoTest import ParameterAlgoTest
from PointsPrimitiveEvaluatorTest import PointsPrimitiveEvaluatorTest
from PointsMotionOpTest import PointsMotionOpTest
from CamelCaseTest import CamelCaseTest
from CapturingRendererTest import CapturingRendererTest
from LightTest import LightTest
from ContrastSmoothSkinningWeightsOpTest import ContrastSmoothSkinningWeightsOpTest
from CameraControllerTest import CameraControllerTest
from PointDistributionOpTest import PointDistributionOpTest
from LRUCacheTest import LRUCacheTest
from DataInterleaveOpTest import DataInterleaveOpTest
from DataConvertOpTest import DataConvertOpTest
from DeepPixelTest import DeepPixelTest
from ConfigLoaderTest import ConfigLoaderTest
from MurmurHashTest import MurmurHashTest
from BoolVectorData import BoolVectorDataTest
from CompoundParameterTest import CompoundParameterTest
from DiskPrimitiveTest import DiskPrimitiveTest
from ClampOpTest import ClampOpTest
from SWAReaderTest import SWAReaderTest
from ImfTest import *
from TimeCodeDataTest import TimeCodeDataTest
from TimeCodeParameterTest import TimeCodeParameterTest
from OptionsTest import OptionsTest
from NullObjectTest import NullObjectTest
from SceneCacheTest import SceneCacheTest
from LinkedSceneTest import LinkedSceneTest
from StandardRadialLensModelTest import StandardRadialLensModelTest
from LensDistortOpTest import LensDistortOpTest
from ObjectPoolTest import ObjectPoolTest
from RefCountedTest import RefCountedTest
from ExternalProceduralTest import ExternalProceduralTest
from ClippingPlaneTest import ClippingPlaneTest
if IECore.withDeepEXR() :
from EXRDeepImageReaderTest import EXRDeepImageReaderTest
from EXRDeepImageWriterTest import EXRDeepImageWriterTest
if IECore.withASIO() :
from DisplayDriverTest import *
if IECore.withTIFF() :
from TIFFImageReader import *
from TIFFImageWriter import *
if IECore.withJPEG() :
from JPEGImageReader import *
from JPEGImageWriter import *
if IECore.withFreeType() :
from FontTest import *
if IECore.withPNG() :
from PNGImageReader import TestPNGReader
unittest.TestProgram(
testRunner = unittest.TextTestRunner(
stream = IECore.CompoundStream(
[
sys.stderr,
open( "test/IECore/resultsPython.txt", "w" )
]
),
verbosity = 2
)
)
|
nhicher/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_sshkey_facts.py
|
52
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: digital_ocean_sshkey_facts
short_description: DigitalOcean SSH keys facts
description:
- Fetch DigitalOcean SSH keys facts.
version_added: "2.5"
author: "Patrick Marques (@pmarques)"
extends_documentation_fragment: digital_ocean.documentation
notes:
- Version 2 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- digital_ocean_sshkey_facts:
oauth_token: "{{ my_do_key }}"
- set_fact:
pubkey: "{{ item.public_key }}"
with_items: "{{ ssh_keys|json_query(ssh_pubkey) }}"
vars:
ssh_pubkey: "[?name=='ansible_ctrl']"
- debug:
msg: "{{ pubkey }}"
'''
RETURN = '''
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#list-all-keys
data:
description: List of SSH keys on DigitalOcean
returned: success and no resource constraint
type: dict
sample: {
"ssh_keys": [
{
"id": 512189,
"fingerprint": "3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example",
"name": "My SSH Public Key"
}
],
"links": {
},
"meta": {
"total": 1
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
def core(module):
rest = DigitalOceanHelper(module)
response = rest.get("account/keys")
status_code = response.status_code
json = response.json
if status_code == 200:
module.exit_json(changed=False, ansible_facts=json)
else:
module.fail_json(msg='Error fetching facts [{0}: {1}]'.format(
status_code, response.json['message']))
def main():
module = AnsibleModule(
argument_spec=DigitalOceanHelper.digital_ocean_argument_spec(),
supports_check_mode=False,
)
core(module)
if __name__ == '__main__':
main()
|
midori1/midorinoblog
|
refs/heads/master
|
site-packages/django/db/backends/postgresql_psycopg2/introspection.py
|
81
|
from __future__ import unicode_literals
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall() if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
null_map = dict(cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6] + (null_map[force_text(line[0])] == 'YES',)))
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
# row[0] and row[1] are single-item lists, so grab the single item.
relations[row[0][0] - 1] = (row[1][0] - 1, row[2])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text FROM information_schema.constraint_column_usage WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
|
ClearCorp-dev/odoo-clearcorp
|
refs/heads/9.0
|
TODO-9.0/report_xls_template/ir_actions_report_xml.py
|
2
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
REPORT_TYPES = [('qweb-xls', 'XLS'), ('qweb-ods', 'ODS')]
class ReportAction(models.Model):
_inherit = 'ir.actions.report.xml'
def _lookup_report(self, cr, name):
"""
Look up a report definition.
"""
cr.execute(
'SELECT * FROM ir_act_report_xml WHERE report_name=%s',
(name,))
r = cr.dictfetchone()
if r:
# Check if the report type fits with xls or ods reports
if r['report_type'] in ['qweb-xls', 'qweb-ods']:
# Return tuple (report name, report_type, module name)
return (r['report_name'],
r['report_type'],
'report_xls_template')
return super(ReportAction, self)._lookup_report(cr, name)
def render_report(self, cr, uid, res_ids, name, data, context=None):
"""
Look up a report definition and render the report for the provided IDs.
"""
new_report = self._lookup_report(cr, name)
if isinstance(new_report, tuple): # Check the type of object
# Check if the module is report_xls_template
if new_report[2] == 'report_xls_template':
# Check report type
if new_report[1] == 'qweb-xls':
return self.pool['report'].get_xls(
cr, uid, res_ids, new_report[0],
data=data, context=context), 'xls'
elif new_report[1] == 'qweb-ods':
return self.pool['report'].get_ods(
cr, uid, res_ids, new_report[0],
data=data, context=context), 'xls'
return super(ReportAction, self).render_report(
cr, uid, res_ids, name, data, context=context)
report_type = fields.Selection(selection_add=REPORT_TYPES)
|
mdkent/percona-xtrabackup
|
refs/heads/master
|
test/kewpie/lib/util/mysqlBaseTestCase.py
|
19
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import os
import time
import difflib
import subprocess
import MySQLdb
# silence annoying errors
from warnings import filterwarnings
filterwarnings('ignore', category = MySQLdb.Warning)
servers = None
class mysqlBaseTestCase(unittest.TestCase):
def setUp(self):
""" If we need to do anything pre-test, we do it here.
Any code here is executed before any test method we
may execute
"""
self.servers = servers
return
def tearDown(self):
#server_manager.reset_servers(test_executor.name)
queries = ["DROP SCHEMA IF EXISTS test"
,"CREATE SCHEMA IF NOT EXISTS test"
]
for server in self.servers:
retcode, result = self.execute_queries(queries, server, schema='mysql')
self.assertEqual(retcode,0,result)
# Begin our utility code here
# This is where we add methods that enable a test to do magic : )
def execute_cmd(self, cmd, stdout_path, exec_path=None, get_output=False):
stdout_file = open(stdout_path,'w')
cmd_subproc = subprocess.Popen( cmd
, shell=True
, cwd=exec_path
, stdout = stdout_file
, stderr = subprocess.STDOUT
)
cmd_subproc.wait()
retcode = cmd_subproc.returncode
stdout_file.close()
if get_output:
data_file = open(stdout_path,'r')
output = ''.join(data_file.readlines())
else:
output = None
return retcode, output
def get_tables(self, server, schema):
""" Return a list of the tables in the
schema on the server
"""
results = []
query = "SHOW TABLES IN %s" %(schema)
retcode, table_set = self.execute_query(query, server)
for table_data in table_set:
table_name = table_data[0]
results.append(table_name)
return results
def check_slaves_by_query( self
, master_server
, other_servers
, query
, expected_result = None
):
""" We execute the query across all servers
and return a dict listing any diffs found,
None if all is good.
If a user provides an expected_result, we
will skip executing against the master
This is done as it is assumed the expected
result has been generated / tested against
the master
"""
comp_results = {}
if expected_result:
pass # don't bother getting it
else:
# run against master for 'good' value
retcode, expected_result = self.execute_query(query, master_server)
for server in other_servers:
retcode, slave_result = self.execute_query(query, server)
#print "%s: expected_result= %s | slave_result= %s" % ( server.name
# , expected_result
# , slave_result_
# )
if not expected_result == slave_result:
comp_data = "%s: expected_result= %s | slave_result= %s" % ( server.name
, expected_result
, slave_result
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def check_slaves_by_checksum( self
, master_server
, other_servers
, schemas=['test']
, tables=[]
):
""" We compare the specified tables (default = all)
from the specified schemas between the 'master'
and the other servers provided (via list)
via CHECKSUM
We return a dictionary listing the server
and any tables that differed
"""
comp_results = {}
logging = master_server.logging
for server in other_servers:
for schema in schemas:
for table in self.get_tables(master_server, schema):
query = "CHECKSUM TABLE %s.%s" %(schema, table)
retcode, master_checksum = self.execute_query(query, master_server)
retcode, slave_checksum = self.execute_query(query, server)
logging.test_debug ("%s: master_checksum= %s | slave_checksum= %s" % ( table
, master_checksum
, slave_checksum
))
logging.test_debug( '#'*80)
if not master_checksum == slave_checksum:
comp_data = "%s: master_checksum= %s | slave_checksum= %s" % ( table
, master_checksum
, slave_checksum
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def take_mysqldump( self
, server
, databases=[]
, tables=[]
, dump_path = None
, cmd_root = None):
""" Take a mysqldump snapshot of the given
server, storing the output to dump_path
"""
if not dump_path:
dump_path = os.path.join(server.vardir, 'dumpfile.dat')
if cmd_root:
dump_cmd = cmd_root
else:
dump_cmd = "%s --no-defaults --user=root --port=%d --host=127.0.0.1 --protocol=tcp --result-file=%s" % ( server.mysqldump
, server.master_port
, dump_path
)
if databases:
if len(databases) > 1:
# We have a list of db's that are to be dumped so we handle things
dump_cmd = ' '.join([dump_cmd, '--databases', ' '.join(databases)])
else:
dump_cmd = ' '.join([dump_cmd, databases[0], ' '.join(tables)])
self.execute_cmd(dump_cmd, os.devnull)
def diff_dumpfiles(self, orig_file_path, new_file_path):
""" diff two dumpfiles useful for comparing servers """
orig_file = open(orig_file_path,'r')
restored_file = open(new_file_path,'r')
orig_file_data = []
rest_file_data = []
orig_file_data= self.filter_data(orig_file.readlines(),'Dump completed')
rest_file_data= self.filter_data(restored_file.readlines(),'Dump completed')
server_diff = difflib.unified_diff( orig_file_data
, rest_file_data
, fromfile=orig_file_path
, tofile=new_file_path
)
diff_output = []
for line in server_diff:
diff_output.append(line)
output = '\n'.join(diff_output)
orig_file.close()
restored_file.close()
return (diff_output==[]), output
def filter_data(self, input_data, filter_text ):
return_data = []
for line in input_data:
if filter_text in line.strip():
pass
else:
return_data.append(line)
return return_data
def execute_query( self
, query
, server
, password=None
, schema='test'):
try:
if server.client_init_command:
if password:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, passwd=password
, db = schema
, init_command = server.client_init_command)
else:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, db = schema
, init_command=server.client_init_command)
else:
if password:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, passwd=password
, db = schema)
else:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, db = schema)
cursor = conn.cursor()
cursor.execute(query)
result_set = cursor.fetchall()
cursor.close()
except MySQLdb.Error, e:
return 1, ("Error %d: %s" %(e.args[0], e.args[1]))
conn.commit()
conn.close()
return 0, result_set
def execute_queries( self
, query_list
, server
, schema= 'test'):
""" Execute a set of queries as a single transaction """
results = {}
retcode = 0
try:
if server.client_init_command:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, db = schema
, init_command = server.client_init_command)
else:
conn = MySQLdb.connect( host = '127.0.0.1'
, port = server.master_port
, user = 'root'
, db = schema)
cursor = conn.cursor()
for idx, query in enumerate(query_list):
try:
cursor.execute(query)
result_set = cursor.fetchall()
except MySQLdb.Error, e:
result_set = "Error %d: %s" %(e.args[0], e.args[1])
retcode = 1
finally:
results[query+str(idx)] = result_set
conn.commit()
cursor.close()
conn.close()
except Exception, e:
retcode = 1
results = (Exception, e)
finally:
return retcode, results
def execute_randgen(self, test_cmd, test_executor, server, schema='test'):
randgen_outfile = os.path.join(test_executor.logdir,'randgen.out')
randgen_output = open(randgen_outfile,'w')
server_type = test_executor.master_server.type
if server_type in ['percona','galera']:
# it is mysql for dbd::perl purposes
server_type = 'mysql'
dsn = "--dsn=dbi:%s:host=127.0.0.1:port=%d:user=root:password="":database=%s" %( server_type
, server.master_port
, schema)
randgen_cmd = " ".join([test_cmd, dsn])
randgen_subproc = subprocess.Popen( randgen_cmd
, shell=True
, cwd=test_executor.system_manager.randgen_path
, env=test_executor.working_environment
, stdout = randgen_output
, stderr = subprocess.STDOUT
)
randgen_subproc.wait()
retcode = randgen_subproc.returncode
randgen_output.close()
randgen_file = open(randgen_outfile,'r')
output = ''.join(randgen_file.readlines())
randgen_file.close()
if retcode == 0:
if not test_executor.verbose:
output = None
return retcode, output
def get_randgen_process( self
, cmd_sequence
, test_executor
, server
, schema='test'
, randgen_outfile=None
, shell_flag = False):
""" There are times when we want finer grained control over our process
and perhaps to kill it so it doesn't waste time running to completion
for those cases, we have this function
"""
if not randgen_outfile:
randgen_outfile = os.path.join(test_executor.logdir,'randgen.out')
randgen_output = open(randgen_outfile,'w')
server_type = test_executor.master_server.type
if server_type in ['percona','galera']:
# it is mysql for dbd::perl purposes
server_type = 'mysql'
dsn = "--dsn=dbi:%s:host=127.0.0.1:port=%d:user=root:password="":database=%s" %( server_type
, server.master_port
, schema)
cmd_sequence.append(dsn)
# if we use shell=True, we need to supply a string vs. a seq.
if shell_flag:
cmd_sequence = " ".join(cmd_sequence)
randgen_subproc = subprocess.Popen( cmd_sequence
, cwd=test_executor.system_manager.randgen_path
, env=test_executor.working_environment
, shell=shell_flag
, stdout = randgen_output
, stderr = subprocess.STDOUT
)
return randgen_subproc
def find_backup_path(self, output):
""" Determine xtrabackup directory from output """
backup_path = None
output = output.split('\n')
flag_string = "Backup created in directory"
for line in output:
if flag_string in line:
backup_path = line.split(flag_string)[1].strip().replace("'",'')
return backup_path
def wait_slaves_ready(self, master_server, slave_servers, cycles = 30):
""" Utility func to pause until the slaves are 'ready'
The definition of 'ready' will vary upon server
implementation
"""
while slave_servers and cycles:
for idx, slave_server in enumerate(slave_servers):
if slave_server.slave_ready():
slave_servers.pop(idx)
cycles -= 1
# short sleep to avoid polling slaves in busy loop
time.sleep(0.5)
if cycles == 0 and slave_servers:
raise Exception("Max cycles reached when waiting for slave servers to start")
|
eli-b/pytest
|
refs/heads/master
|
_pytest/compat.py
|
4
|
"""
python version compatibility code
"""
import sys
import inspect
import types
import re
import functools
import py
import _pytest
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
NoneType = type(None)
NOTSET = object()
PY36 = sys.version_info[:2] >= (3, 6)
MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError'
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
isfunction = inspect.isfunction
isclass = inspect.isclass
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,
which in turns also initializes the "logging" module as side-effect (see issue #8).
"""
return (getattr(func, '_is_coroutine', False) or
(hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func)))
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
if sys.version_info[:2] == (2, 6):
def isclass(object):
""" Return true if the object is a class. Overrides inspect.isclass for
python 2.6 because it will return True for objects which always return
something on __getattr__ calls (see #1035).
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
"""
return isinstance(object, (type, types.ClassType))
if _PY3:
import codecs
STRING_TYPES = bytes, str
def _escape_strings(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode('ascii')
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ''
else:
return val.encode('unicode_escape').decode('ascii')
else:
STRING_TYPES = bytes, str, unicode
def _escape_strings(val):
"""In py2 bytes and str are the same type, so return if it's a bytes
object, return it unchanged if it is a full ascii string,
otherwise escape it into its binary form.
If it's a unicode string, change the unicode characters into
unicode escapes.
"""
if isinstance(val, bytes):
try:
return val.encode('ascii')
except UnicodeDecodeError:
return val.encode('string-escape')
else:
return val.encode('unicode-escape')
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
def _is_unittest_unexpected_success_a_failure():
"""Return if the test suite should fail if a @expectedFailure unittest test PASSES.
From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
Changed in version 3.4: Returns False if there were any
unexpectedSuccesses from tests marked with the expectedFailure() decorator.
"""
return sys.version_info >= (3, 4)
if _PY3:
def safe_str(v):
"""returns v as string"""
return str(v)
else:
def safe_str(v):
"""returns v as string, converting to ascii if necessary"""
try:
return str(v)
except UnicodeError:
errors = 'replace'
return v.encode('ascii', errors)
|
ar7z1/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_gslbapplicationpersistenceprofile.py
|
20
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbapplicationpersistenceprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of GslbApplicationPersistenceProfile Avi RESTful Object
description:
- This module is used to configure GslbApplicationPersistenceProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- Field introduced in 17.1.1.
name:
description:
- A user-friendly name for the persistence profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the persistence profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbApplicationPersistenceProfile object
avi_gslbapplicationpersistenceprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbapplicationpersistenceprofile
"""
RETURN = '''
obj:
description: GslbApplicationPersistenceProfile (api/gslbapplicationpersistenceprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbapplicationpersistenceprofile',
set([]))
if __name__ == '__main__':
main()
|
bernardopires/django-tenant-schemas
|
refs/heads/master
|
tenant_schemas/models.py
|
3
|
from django.core.management import call_command
from django.db import connection, models
from tenant_schemas.postgresql_backend.base import _check_schema_name
from tenant_schemas.signals import post_schema_sync
from tenant_schemas.utils import get_public_schema_name, schema_exists
class TenantQueryset(models.QuerySet):
"""
QuerySet for instances that inherit from the TenantMixin.
"""
def delete(self):
"""
Make sure we call the delete method of each object in the queryset so
that safety checks and schema deletion (if requested) are executed
even when using bulk delete.
"""
counter, counter_dict = 0, {}
for obj in self:
result = obj.delete()
if result is not None:
current_counter, current_counter_dict = result
counter += current_counter
counter_dict.update(current_counter_dict)
if counter:
return counter, counter_dict
class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
domain_url = models.CharField(max_length=128, unique=True)
schema_name = models.CharField(max_length=63, unique=True,
validators=[_check_schema_name])
objects = TenantQueryset.as_manager()
class Meta:
abstract = True
def save(self, verbosity=1, *args, **kwargs):
is_new = self.pk is None
if is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super(TenantMixin, self).save(*args, **kwargs)
if is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
except:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
else:
post_schema_sync.send(sender=TenantMixin, tenant=self)
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
if connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
cursor = connection.cursor()
cursor.execute('DROP SCHEMA IF EXISTS %s CASCADE' % self.schema_name)
return super(TenantMixin, self).delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
if sync_schema:
call_command('migrate_schemas',
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
|
sergio-incaser/odoo
|
refs/heads/8.0
|
addons/mass_mailing/models/mail_thread.py
|
66
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
import re
from openerp.addons.mail.mail_message import decode
from openerp.addons.mail.mail_thread import decode_header
from openerp.osv import osv
_logger = logging.getLogger(__name__)
class MailThread(osv.AbstractModel):
""" Update MailThread to add the feature of bounced emails and replied emails
in message_process. """
_name = 'mail.thread'
_inherit = ['mail.thread']
def message_route_check_bounce(self, cr, uid, message, context=None):
""" Override to verify that the email_to is the bounce alias. If it is the
case, log the bounce, set the parent and related document as bounced and
return False to end the routing process. """
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
# 0. Verify whether this is a bounced email (wrong destination,...) -> use it to collect data, such as dead leads
if bounce_alias and bounce_alias in email_to:
# Bounce regex
# Typical form of bounce is bounce_alias-128-crm.lead-34@domain
# group(1) = the mail ID; group(2) = the model (if any); group(3) = the record ID
bounce_re = re.compile("%s-(\d+)-?([\w.]+)?-?(\d+)?" % re.escape(bounce_alias), re.UNICODE)
bounce_match = bounce_re.search(email_to)
if bounce_match:
bounced_model, bounced_thread_id = None, False
bounced_mail_id = bounce_match.group(1)
stat_ids = self.pool['mail.mail.statistics'].set_bounced(cr, uid, mail_mail_ids=[bounced_mail_id], context=context)
for stat in self.pool['mail.mail.statistics'].browse(cr, uid, stat_ids, context=context):
bounced_model = stat.model
bounced_thread_id = stat.res_id
_logger.info('Routing mail from %s to %s with Message-Id %s: bounced mail from mail %s, model: %s, thread_id: %s',
email_from, email_to, message_id, bounced_mail_id, bounced_model, bounced_thread_id)
if bounced_model and bounced_model in self.pool and hasattr(self.pool[bounced_model], 'message_receive_bounce') and bounced_thread_id:
self.pool[bounced_model].message_receive_bounce(cr, uid, [bounced_thread_id], mail_id=bounced_mail_id, context=context)
return False
return True
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
if not self.message_route_check_bounce(cr, uid, message, context=context):
return []
return super(MailThread, self).message_route(cr, uid, message, message_dict, model, thread_id, custom_values, context)
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. The default
behavior is to check is an integer ``message_bounce`` column exists.
If it is the case, its content is incremented. """
if 'message_bounce' in self._fields:
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
""" Override to update the parent mail statistics. The parent is found
by using the References header of the incoming message and looking for
matching message_id in mail.mail.statistics. """
if message.get('References'):
message_ids = [x.strip() for x in decode(message['References']).split()]
self.pool['mail.mail.statistics'].set_replied(cr, uid, mail_message_ids=message_ids, context=context)
return super(MailThread, self).message_route_process(cr, uid, message, message_dict, routes, context=context)
|
namhyung/uftrace
|
refs/heads/master
|
tests/t106_report_time.py
|
2
|
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'sleep', """
Total time Self time Calls Function
========== ========== ========== ====================================
2.103 ms 0.910 us 1 main
2.102 ms 18.787 us 1 foo
2.084 ms 4.107 us 1 bar
2.080 ms 2.080 ms 1 usleep
""", sort='report')
def prepare(self):
self.subcmd = 'record'
return self.runcmd()
def setup(self):
self.subcmd = 'report'
self.option = '-t 1ms'
|
nickvandewiele/RMG-Py
|
refs/heads/master
|
rmgpy/molecule/group.py
|
3
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module provides classes and methods for working with molecular substructure
groups. These enable molecules to be searched for common motifs (e.g.
reaction sites).
"""
import cython
from .graph import Vertex, Edge, Graph
from .atomtype import atomTypes
################################################################################
class ActionError(Exception):
"""
An exception class for errors that occur while applying reaction recipe
actions. Pass a string describing the circumstances that caused the
exceptional behavior.
"""
pass
################################################################################
class GroupAtom(Vertex):
"""
An atom group. This class is based on the :class:`Atom` class, except that
it uses :ref:`atom types <atom-types>` instead of elements, and all
attributes are lists rather than individual values. The attributes are:
=================== =================== ====================================
Attribute Type Description
=================== =================== ====================================
`atomType` ``list`` The allowed atom types (as :class:`AtomType` objects)
`radicalElectrons` ``list`` The allowed numbers of radical electrons (as short integers)
`charge` ``list`` The allowed formal charges (as short integers)
`label` ``str`` A string label that can be used to tag individual atoms
`lonePairs` ``list`` The number of lone electron pairs
=================== =================== ====================================
Each list represents a logical OR construct, i.e. an atom will match the
group if it matches *any* item in the list. However, the
`radicalElectrons`, and `charge` attributes are linked
such that an atom must match values from the same index in each of these in
order to match.
"""
def __init__(self, atomType=None, radicalElectrons=None, charge=None, label='', lonePairs=None):
Vertex.__init__(self)
self.atomType = atomType or []
for index in range(len(self.atomType)):
if isinstance(self.atomType[index], str):
self.atomType[index] = atomTypes[self.atomType[index]]
self.radicalElectrons = radicalElectrons or []
self.charge = charge or []
self.label = label
self.lonePairs = lonePairs or []
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
d = {
'edges': self.edges,
'connectivity1': self.connectivity1,
'connectivity2': self.connectivity2,
'connectivity3': self.connectivity3,
'sortingLabel': self.sortingLabel,
}
atomType = self.atomType
if atomType is not None:
atomType = [a.label for a in atomType]
return (GroupAtom, (atomType, self.radicalElectrons, self.charge, self.label, self.lonePairs), d)
def __setstate__(self, d):
"""
A helper function used when unpickling an object.
"""
self.edges = d['edges']
self.connectivity1 = d['connectivity1']
self.connectivity2 = d['connectivity2']
self.connectivity3 = d['connectivity3']
self.sortingLabel = d['sortingLabel']
def __str__(self):
"""
Return a human-readable string representation of the object.
"""
return '[{0}]'.format(','.join([repr(a.label) for a in self.atomType]))
def __repr__(self):
"""
Return a representation that can be used to reconstruct the object.
"""
return "<GroupAtom {0!s}>".format(self)
@property
def bonds(self): return self.edges
def copy(self):
"""
Return a deep copy of the :class:`GroupAtom` object. Modifying the
attributes of the copy will not affect the original.
"""
return GroupAtom(self.atomType[:], self.radicalElectrons[:], self.charge[:], self.label)
def __changeBond(self, order):
"""
Update the atom group as a result of applying a CHANGE_BOND action,
where `order` specifies whether the bond is incremented or decremented
in bond order, and should be 1 or -1.
"""
atomType = []
for atom in self.atomType:
if order == 1:
atomType.extend(atom.incrementBond)
elif order == -1:
atomType.extend(atom.decrementBond)
else:
raise ActionError('Unable to update GroupAtom due to CHANGE_BOND action: Invalid order "{0}".'.format(order))
if len(atomType) == 0:
raise ActionError('Unable to update GroupAtom due to CHANGE_BOND action: Unknown atom type produced from set "{0}".'.format(self.atomType))
# Set the new atom types, removing any duplicates
self.atomType = list(set(atomType))
def __formBond(self, order):
"""
Update the atom group as a result of applying a FORM_BOND action,
where `order` specifies the order of the forming bond, and should be
'S' (since we only allow forming of single bonds).
"""
if order != 'S':
raise ActionError('Unable to update GroupAtom due to FORM_BOND action: Invalid order "{0}".'.format(order))
atomType = []
for atom in self.atomType:
atomType.extend(atom.formBond)
if len(atomType) == 0:
raise ActionError('Unable to update GroupAtom due to FORM_BOND action: Unknown atom type produced from set "{0}".'.format(self.atomType))
# Set the new atom types, removing any duplicates
self.atomType = list(set(atomType))
def __breakBond(self, order):
"""
Update the atom group as a result of applying a BREAK_BOND action,
where `order` specifies the order of the breaking bond, and should be
'S' (since we only allow breaking of single bonds).
"""
if order != 'S':
raise ActionError('Unable to update GroupAtom due to BREAK_BOND action: Invalid order "{0}".'.format(order))
atomType = []
for atom in self.atomType:
atomType.extend(atom.breakBond)
if len(atomType) == 0:
raise ActionError('Unable to update GroupAtom due to BREAK_BOND action: Unknown atom type produced from set "{0}".'.format(self.atomType))
# Set the new atom types, removing any duplicates
self.atomType = list(set(atomType))
def __gainRadical(self, radical):
"""
Update the atom group as a result of applying a GAIN_RADICAL action,
where `radical` specifies the number of radical electrons to add.
"""
radicalElectrons = []
if any([len(atomType.incrementRadical) == 0 for atomType in self.atomType]):
raise ActionError('Unable to update GroupAtom due to GAIN_RADICAL action: Unknown atom type produced from set "{0}".'.format(self.atomType))
for electron in self.radicalElectrons:
radicalElectrons.append(electron + radical)
# Set the new radical electron counts
self.radicalElectrons = radicalElectrons
def __loseRadical(self, radical):
"""
Update the atom group as a result of applying a LOSE_RADICAL action,
where `radical` specifies the number of radical electrons to remove.
"""
radicalElectrons = []
pairs = set()
if any([len(atomType.decrementRadical) == 0 for atomType in self.atomType]):
raise ActionError('Unable to update GroupAtom due to LOSE_RADICAL action: Unknown atom type produced from set "{0}".'.format(self.atomType))
for electron in self.radicalElectrons:
electron = electron - radical
if electron < 0:
raise ActionError('Unable to update GroupAtom due to LOSE_RADICAL action: Invalid radical electron set "{0}".'.format(self.radicalElectrons))
radicalElectrons.append(electron)
# Set the new radical electron counts
self.radicalElectrons = radicalElectrons
def __gainPair(self, pair):
"""
Update the atom group as a result of applying a GAIN_PAIR action,
where `pair` specifies the number of lone electron pairs to add.
"""
lonePairs = []
if any([len(atomType.incrementLonePair) == 0 for atomType in self.atomType]):
raise ActionError('Unable to update GroupAtom due to GAIN_PAIR action: Unknown atom type produced from set "{0}".'.format(self.atomType))
for lonePairs in zip(self.lonePairs):
lonePairs.append(lonePairs + pair)
# Set the new lone electron pair count
self.lonePairs = lonePairs
def __losePair(self, pair):
"""
Update the atom group as a result of applying a LOSE_PAIR action,
where `pair` specifies the number of lone electron pairs to remove.
"""
lonePairs = []
if any([len(atomType.decrementLonePair) == 0 for atomType in self.atomType]):
raise ActionError('Unable to update GroupAtom due to LOSE_PAIR action: Unknown atom type produced from set "{0}".'.format(self.atomType))
for lonePairs in zip(self.lonePairs):
if lonePairs - pair < 0:
raise ActionError('Unable to update GroupAtom due to LOSE_PAIR action: Invalid lone electron pairs set "{0}".'.format(self.lonePairs))
lonePairs.append(lonePairs - pair)
# Set the new lone electron pair count
self.lonePairs = lonePairs
def applyAction(self, action):
"""
Update the atom group as a result of applying `action`, a tuple
containing the name of the reaction recipe action along with any
required parameters. The available actions can be found
:ref:`here <reaction-recipe-actions>`.
"""
act = action[0].upper()
if act == 'CHANGE_BOND':
self.__changeBond(action[2])
elif act == 'FORM_BOND':
self.__formBond(action[2])
elif act == 'BREAK_BOND':
self.__breakBond(action[2])
elif act == 'GAIN_RADICAL':
self.__gainRadical(action[2])
elif act == 'LOSE_RADICAL':
self.__loseRadical(action[2])
elif action[0].upper() == 'GAIN_PAIR':
self.__gainPair(action[2])
elif action[0].upper() == 'LOSE_PAIR':
self.__losePair(action[2])
else:
raise ActionError('Unable to update GroupAtom: Invalid action {0}".'.format(action))
def equivalent(self, other):
"""
Returns ``True`` if `other` is equivalent to `self` or ``False`` if not,
where `other` can be either an :class:`Atom` or an :class:`GroupAtom`
object. When comparing two :class:`GroupAtom` objects, this function
respects wildcards, e.g. ``R!H`` is equivalent to ``C``.
"""
cython.declare(group=GroupAtom)
if not isinstance(other, GroupAtom):
# Let the equivalent method of other handle it
# We expect self to be an Atom object, but can't test for it here
# because that would create an import cycle
return other.equivalent(self)
group=other
cython.declare(atomType1=AtomType, atomtype2=AtomType, radical1=cython.short, radical2=cython.short,
lp1=cython.short, lp2=cython.short, charge1=cython.short, charge2=cython.short)
# Compare two atom groups for equivalence
# Each atom type in self must have an equivalent in other (and vice versa)
for atomType1 in self.atomType:
for atomType2 in group.atomType:
if atomType1.equivalent(atomType2): break
else:
return False
for atomType1 in group.atomType:
for atomType2 in self.atomType:
if atomType1.equivalent(atomType2): break
else:
return False
# Each free radical electron state in self must have an equivalent in other (and vice versa)
for radical1 in self.radicalElectrons:
if group.radicalElectrons: # Only check if the list is non-empty. An empty list indicates a wildcard.
for radical2 in group.radicalElectrons:
if radical1 == radical2: break
else:
return False
for radical1 in group.radicalElectrons:
if self.radicalElectrons:
for radical2 in self.radicalElectrons:
if radical1 == radical2: break
else:
return False
for lp1 in self.lonePairs:
if group.lonePairs:
for lp2 in group.lonePairs:
if lp1 == lp2: break
else:
return False
#Each charge in self must have an equivalent in other (and vice versa)
for charge1 in self.charge:
if group.charge:
for charge2 in group.charge:
if charge1 == charge2: break
else:
return False
for charge1 in group.charge:
if self.charge:
for charge2 in self.charge:
if charge1 == charge2: break
else:
return False
# Otherwise the two atom groups are equivalent
return True
def isSpecificCaseOf(self, other):
"""
Returns ``True`` if `other` is the same as `self` or is a more
specific case of `self`. Returns ``False`` if some of `self` is not
included in `other` or they are mutually exclusive.
"""
cython.declare(group=GroupAtom)
if not isinstance(other, GroupAtom):
# Let the isSpecificCaseOf method of other handle it
# We expect self to be an Atom object, but can't test for it here
# because that would create an import cycle
return other.isSpecificCaseOf(self)
group=other
cython.declare(atomType1=AtomType, atomtype2=AtomType, radical1=cython.short, radical2=cython.short,
lp1=cython.short, lp2=cython.short, charge1=cython.short, charge2=cython.short)
# Compare two atom groups for equivalence
# Each atom type in self must have an equivalent in other (and vice versa)
for atomType1 in self.atomType: # all these must match
for atomType2 in group.atomType: # can match any of these
if atomType1.isSpecificCaseOf(atomType2): break
else:
return False
# Each free radical electron state in self must have an equivalent in other (and vice versa)
if self.radicalElectrons:
for radical1 in self.radicalElectrons:
if group.radicalElectrons:
for radical2 in group.radicalElectrons:
if radical1 == radical2: break
else:
return False
else:
if group.radicalElectrons: return False
if self.lonePairs:
for lp1 in self.lonePairs:
if group.lonePairs:
for lp2 in group.lonePairs:
if lp1 == lp2: break
else:
return False
else:
if group.lonePairs: return False
#Each charge in self must have an equivalent in other
if self.charge:
for charge1 in self.charge:
if group.charge:
for charge2 in group.charge:
if charge1 == charge2: break
else:
return False
else:
if group.charge: return False
# Otherwise self is in fact a specific case of other
return True
################################################################################
class GroupBond(Edge):
"""
A bond group. This class is based on the :class:`Bond` class, except that
all attributes are lists rather than individual values. The allowed bond
types are given :ref:`here <bond-types>`. The attributes are:
=================== =================== ====================================
Attribute Type Description
=================== =================== ====================================
`order` ``list`` The allowed bond orders (as character strings)
=================== =================== ====================================
Each list represents a logical OR construct, i.e. a bond will match the
group if it matches *any* item in the list.
"""
def __init__(self, atom1, atom2, order=None):
Edge.__init__(self, atom1, atom2)
self.order = order or []
def __str__(self):
"""
Return a human-readable string representation of the object.
"""
return str(self.order)
def __repr__(self):
"""
Return a representation that can be used to reconstruct the object.
"""
return "<GroupBond {0!r}>".format(self.order)
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (GroupBond, (self.vertex1, self.vertex2, self.order))
def copy(self):
"""
Return a deep copy of the :class:`GroupBond` object. Modifying the
attributes of the copy will not affect the original.
"""
return GroupBond(self.vertex1, self.vertex2, self.order[:])
def __changeBond(self, order):
"""
Update the bond group as a result of applying a CHANGE_BOND action,
where `order` specifies whether the bond is incremented or decremented
in bond order, and should be 1 or -1.
"""
newOrder = []
for bond in self.order:
if order == 1:
if bond == 'S': newOrder.append('D')
elif bond == 'D': newOrder.append('T')
else:
raise ActionError('Unable to update GroupBond due to CHANGE_BOND action: Invalid bond order "{0}" in set {1}".'.format(bond, self.order))
elif order == -1:
if bond == 'D': newOrder.append('S')
elif bond == 'T': newOrder.append('D')
else:
raise ActionError('Unable to update GroupBond due to CHANGE_BOND action: Invalid bond order "{0}" in set {1}".'.format(bond, self.order))
else:
raise ActionError('Unable to update GroupBond due to CHANGE_BOND action: Invalid order "{0}".'.format(order))
# Set the new bond orders, removing any duplicates
self.order = list(set(newOrder))
def applyAction(self, action):
"""
Update the bond group as a result of applying `action`, a tuple
containing the name of the reaction recipe action along with any
required parameters. The available actions can be found
:ref:`here <reaction-recipe-actions>`.
"""
if action[0].upper() == 'CHANGE_BOND':
self.__changeBond(action[2])
else:
raise ActionError('Unable to update GroupBond: Invalid action {0}".'.format(action))
def equivalent(self, other):
"""
Returns ``True`` if `other` is equivalent to `self` or ``False`` if not,
where `other` can be either an :class:`Bond` or an :class:`GroupBond`
object.
"""
cython.declare(gb=GroupBond)
if not isinstance(other, GroupBond):
# Let the equivalent method of other handle it
# We expect self to be a Bond object, but can't test for it here
# because that would create an import cycle
return other.equivalent(self)
gb = other
cython.declare(order1=str, order2=str)
# Compare two bond groups for equivalence
# Each atom type in self must have an equivalent in other (and vice versa)
for order1 in self.order:
for order2 in gb.order:
if order1 == order2: break
else:
return False
for order1 in gb.order:
for order2 in self.order:
if order1 == order2: break
else:
return False
# Otherwise the two bond groups are equivalent
return True
def isSpecificCaseOf(self, other):
"""
Returns ``True`` if `other` is the same as `self` or is a more
specific case of `self`. Returns ``False`` if some of `self` is not
included in `other` or they are mutually exclusive.
"""
cython.declare(gb=GroupBond)
if not isinstance(other, GroupBond):
# Let the isSpecificCaseOf method of other handle it
# We expect self to be a Bond object, but can't test for it here
# because that would create an import cycle
return other.isSpecificCaseOf(self)
gb = other
cython.declare(order1=str, order2=str)
# Compare two bond groups for equivalence
# Each atom type in self must have an equivalent in other
for order1 in self.order: # all these must match
for order2 in gb.order: # can match any of these
if order1 == order2: break
else:
return False
# Otherwise self is in fact a specific case of other
return True
################################################################################
class Group(Graph):
"""
A representation of a molecular substructure group using a graph data
type, extending the :class:`Graph` class. The `atoms` and `bonds` attributes
are aliases for the `vertices` and `edges` attributes, and store
:class:`GroupAtom` and :class:`GroupBond` objects, respectively.
Corresponding alias methods have also been provided.
"""
def __init__(self, atoms=None, multiplicity=None):
Graph.__init__(self, atoms)
self.multiplicity = multiplicity if multiplicity else []
self.update()
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (Group, (self.vertices,))
def __getAtoms(self): return self.vertices
def __setAtoms(self, atoms): self.vertices = atoms
atoms = property(__getAtoms, __setAtoms)
def addAtom(self, atom):
"""
Add an `atom` to the graph. The atom is initialized with no bonds.
"""
return self.addVertex(atom)
def addBond(self, bond):
"""
Add a `bond` to the graph as an edge connecting the two atoms `atom1`
and `atom2`.
"""
return self.addEdge(bond)
def getBonds(self, atom):
"""
Return a list of the bonds involving the specified `atom`.
"""
return self.getEdges(atom)
def getBond(self, atom1, atom2):
"""
Returns the bond connecting atoms `atom1` and `atom2`.
"""
return self.getEdge(atom1, atom2)
def hasAtom(self, atom):
"""
Returns ``True`` if `atom` is an atom in the graph, or ``False`` if
not.
"""
return self.hasVertex(atom)
def hasBond(self, atom1, atom2):
"""
Returns ``True`` if atoms `atom1` and `atom2` are connected
by an bond, or ``False`` if not.
"""
return self.hasEdge(atom1, atom2)
def removeAtom(self, atom):
"""
Remove `atom` and all bonds associated with it from the graph. Does
not remove atoms that no longer have any bonds as a result of this
removal.
"""
return self.removeVertex(atom)
def removeBond(self, bond):
"""
Remove the bond between atoms `atom1` and `atom2` from the graph.
Does not remove atoms that no longer have any bonds as a result of
this removal.
"""
return self.removeEdge(bond)
def sortAtoms(self):
"""
Sort the atoms in the graph. This can make certain operations, e.g.
the isomorphism functions, much more efficient.
"""
return self.sortVertices()
def copy(self, deep=False):
"""
Create a copy of the current graph. If `deep` is ``True``, a deep copy
is made: copies of the vertices and edges are used in the new graph.
If `deep` is ``False`` or not specified, a shallow copy is made: the
original vertices and edges are used in the new graph.
"""
other = cython.declare(Group)
g = Graph.copy(self, deep)
other = Group(g.vertices)
return other
def update(self):
self.updateConnectivityValues()
self.updateFingerprint()
def merge(self, other):
"""
Merge two groups so as to store them in a single
:class:`Group` object. The merged :class:`Group`
object is returned.
"""
g = Graph.merge(self, other)
molecule = Group(atoms=g.vertices)
return molecule
def split(self):
"""
Convert a single :class:`Group` object containing two or more
unconnected groups into separate class:`Group` objects.
"""
graphs = Graph.split(self)
molecules = []
for g in graphs:
molecule = Group(atoms=g.vertices)
molecules.append(molecule)
return molecules
def clearLabeledAtoms(self):
"""
Remove the labels from all atoms in the molecular group.
"""
for atom in self.vertices:
atom.label = ''
def containsLabeledAtom(self, label):
"""
Return ``True`` if the group contains an atom with the label
`label` and ``False`` otherwise.
"""
for atom in self.vertices:
if atom.label == label: return True
return False
def getLabeledAtom(self, label):
"""
Return the atom in the group that is labeled with the given `label`.
Raises :class:`ValueError` if no atom in the group has that label.
"""
for atom in self.vertices:
if atom.label == label: return atom
raise ValueError('No atom in the functional group has the label "{0}".'.format(label))
def getLabeledAtoms(self):
"""
Return the labeled atoms as a ``dict`` with the keys being the labels
and the values the atoms themselves. If two or more atoms have the
same label, the value is converted to a list of these atoms.
"""
labeled = {}
for atom in self.vertices:
if atom.label != '':
if atom.label in labeled:
if isinstance(labeled[atom.label],list):
labeled[atom.label].append(atom)
else:
labeled[atom.label] = [labeled[atom.label]]
labeled[atom.label].append(atom)
else:
labeled[atom.label] = atom
return labeled
def fromAdjacencyList(self, adjlist):
"""
Convert a string adjacency list `adjlist` to a molecular structure.
Skips the first line (assuming it's a label) unless `withLabel` is
``False``.
"""
from .adjlist import fromAdjacencyList
self.vertices, multiplicity = fromAdjacencyList(adjlist, group=True)
if multiplicity is not None:
self.multiplicity = multiplicity
self.update()
return self
def toAdjacencyList(self, label=''):
"""
Convert the molecular structure to a string adjacency list.
"""
from .adjlist import toAdjacencyList
return toAdjacencyList(self.vertices, multiplicity=self.multiplicity, label='', group=True)
def updateFingerprint(self):
"""
Update the molecular fingerprint used to accelerate the subgraph
isomorphism checks.
"""
cython.declare(atom=GroupAtom, atomType=AtomType)
cython.declare(carbon=AtomType, nitrogen=AtomType, oxygen=AtomType, sulfur=AtomType)
cython.declare(isCarbon=cython.bint, isNitrogen=cython.bint, isOxygen=cython.bint, isSulfur=cython.bint, radical=cython.int)
carbon = atomTypes['C']
nitrogen = atomTypes['N']
oxygen = atomTypes['O']
sulfur = atomTypes['S']
self.carbonCount = 0
self.nitrogenCount = 0
self.oxygenCount = 0
self.sulfurCount = 0
self.radicalCount = 0
for atom in self.vertices:
if len(atom.atomType) == 1:
atomType = atom.atomType[0]
isCarbon = atomType.equivalent(carbon)
isNitrogen = atomType.equivalent(nitrogen)
isOxygen = atomType.equivalent(oxygen)
isSulfur = atomType.equivalent(sulfur)
if isCarbon and not isNitrogen and not isOxygen and not isSulfur:
self.carbonCount += 1
elif isNitrogen and not isCarbon and not isOxygen and not isSulfur:
self.nitrogenCount += 1
elif isOxygen and not isCarbon and not isNitrogen and not isSulfur:
self.oxygenCount += 1
elif isSulfur and not isCarbon and not isNitrogen and not isOxygen:
self.sulfurCount += 1
if len(atom.radicalElectrons) == 1:
radical = atom.radicalElectrons[0]
self.radicalCount += radical
def isIsomorphic(self, other, initialMap=None):
"""
Returns ``True`` if two graphs are isomorphic and ``False``
otherwise. The `initialMap` attribute can be used to specify a required
mapping from `self` to `other` (i.e. the atoms of `self` are the keys,
while the atoms of `other` are the values). The `other` parameter must
be a :class:`Group` object, or a :class:`TypeError` is raised.
"""
# It only makes sense to compare a Group to a Group for full
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Group):
raise TypeError('Got a {0} object for parameter "other", when a Group object is required.'.format(other.__class__))
# Do the isomorphism comparison
return Graph.isIsomorphic(self, other, initialMap)
def findIsomorphism(self, other, initialMap=None):
"""
Returns ``True`` if `other` is isomorphic and ``False``
otherwise, and the matching mapping. The `initialMap` attribute can be
used to specify a required mapping from `self` to `other` (i.e. the
atoms of `self` are the keys, while the atoms of `other` are the
values). The returned mapping also uses the atoms of `self` for the keys
and the atoms of `other` for the values. The `other` parameter must
be a :class:`Group` object, or a :class:`TypeError` is raised.
"""
# It only makes sense to compare a Group to a Group for full
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Group):
raise TypeError('Got a {0} object for parameter "other", when a Group object is required.'.format(other.__class__))
# Do the isomorphism comparison
return Graph.findIsomorphism(self, other, initialMap)
def isSubgraphIsomorphic(self, other, initialMap=None):
"""
Returns ``True`` if `other` is subgraph isomorphic and ``False``
otherwise. In other words, return ``True`` if self is more specific than other.
The `initialMap` attribute can be used to specify a required
mapping from `self` to `other` (i.e. the atoms of `self` are the keys,
while the atoms of `other` are the values). The `other` parameter must
be a :class:`Group` object, or a :class:`TypeError` is raised.
"""
cython.declare(group=Group)
cython.declare(mult1=cython.short, mult2=cython.short)
# It only makes sense to compare a Group to a Group for subgraph
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Group):
raise TypeError('Got a {0} object for parameter "other", when a Group object is required.'.format(other.__class__))
group = other
if self.multiplicity:
for mult1 in self.multiplicity:
if group.multiplicity:
for mult2 in group.multiplicity:
if mult1 == mult2: break
else:
return False
else:
if group.multiplicity: return False
# Do the isomorphism comparison
return Graph.isSubgraphIsomorphic(self, other, initialMap)
def findSubgraphIsomorphisms(self, other, initialMap=None):
"""
Returns ``True`` if `other` is subgraph isomorphic and ``False``
otherwise. In other words, return ``True`` is self is more specific than other.
Also returns the lists all of valid mappings. The
`initialMap` attribute can be used to specify a required mapping from
`self` to `other` (i.e. the atoms of `self` are the keys, while the
atoms of `other` are the values). The returned mappings also use the
atoms of `self` for the keys and the atoms of `other` for the values.
The `other` parameter must be a :class:`Group` object, or a
:class:`TypeError` is raised.
"""
cython.declare(group=Group)
cython.declare(mult1=cython.short, mult2=cython.short)
# It only makes sense to compare a Group to a Group for subgraph
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Group):
raise TypeError('Got a {0} object for parameter "other", when a Group object is required.'.format(other.__class__))
group = other
if self.multiplicity:
for mult1 in self.multiplicity:
if group.multiplicity:
for mult2 in group.multiplicity:
if mult1 == mult2: break
else:
return []
else:
if group.multiplicity: return []
# Do the isomorphism comparison
return Graph.findSubgraphIsomorphisms(self, other, initialMap)
def isIdentical(self, other):
"""
Returns ``True`` if `other` is identical and ``False`` otherwise.
The function `isIsomorphic` respects wildcards, while this function
does not, make it more useful for checking groups to groups (as
opposed to molecules to groups)
"""
# It only makes sense to compare a Group to a Group for full
# isomorphism, so raise an exception if this is not what was requested
if not isinstance(other, Group):
raise TypeError('Got a {0} object for parameter "other", when a Group object is required.'.format(other.__class__))
# An identical group is always a child of itself and
# is the only case where that is true. Therefore
# if we do both directions of isSubgraphIsmorphic, we need
# to get True twice for it to be identical
if not self.isSubgraphIsomorphic(other):
return False
elif not other.isSubgraphIsomorphic(self):
return False
else:
return True
|
esrille/ibus-replace-with-kanji
|
refs/heads/master
|
dic_tools/okuri.py
|
1
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Esrille Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import re
import sys
from signal import signal, SIGPIPE, SIG_DFL
import dic
if __name__ == '__main__':
signal(SIGPIPE, SIG_DFL)
path = 'restrained.dic'
if 2 <= len(sys.argv):
path = sys.argv[1]
dict = dic.load(path)
okuri = dic.okuri(dict)
dic.output(okuri)
|
chenc10/Spark-PAF
|
refs/heads/master
|
python/pyspark/statcounter.py
|
130
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is ported from spark/util/StatCounter.scala
import copy
import math
try:
from numpy import maximum, minimum, sqrt
except ImportError:
maximum = max
minimum = min
sqrt = math.sqrt
class StatCounter(object):
def __init__(self, values=None):
if values is None:
values = list()
self.n = 0 # Running count of our values
self.mu = 0.0 # Running mean of our values
self.m2 = 0.0 # Running variance numerator (sum of (x - mean)^2)
self.maxValue = float("-inf")
self.minValue = float("inf")
for v in values:
self.merge(v)
# Add a value into this StatCounter, updating the internal statistics.
def merge(self, value):
delta = value - self.mu
self.n += 1
self.mu += delta / self.n
self.m2 += delta * (value - self.mu)
self.maxValue = maximum(self.maxValue, value)
self.minValue = minimum(self.minValue, value)
return self
# Merge another StatCounter into this one, adding up the internal statistics.
def mergeStats(self, other):
if not isinstance(other, StatCounter):
raise Exception("Can only merge Statcounters!")
if other is self: # reference equality holds
self.merge(copy.deepcopy(other)) # Avoid overwriting fields in a weird order
else:
if self.n == 0:
self.mu = other.mu
self.m2 = other.m2
self.n = other.n
self.maxValue = other.maxValue
self.minValue = other.minValue
elif other.n != 0:
delta = other.mu - self.mu
if other.n * 10 < self.n:
self.mu = self.mu + (delta * other.n) / (self.n + other.n)
elif self.n * 10 < other.n:
self.mu = other.mu - (delta * self.n) / (self.n + other.n)
else:
self.mu = (self.mu * self.n + other.mu * other.n) / (self.n + other.n)
self.maxValue = maximum(self.maxValue, other.maxValue)
self.minValue = minimum(self.minValue, other.minValue)
self.m2 += other.m2 + (delta * delta * self.n * other.n) / (self.n + other.n)
self.n += other.n
return self
# Clone this StatCounter
def copy(self):
return copy.deepcopy(self)
def count(self):
return int(self.n)
def mean(self):
return self.mu
def sum(self):
return self.n * self.mu
def min(self):
return self.minValue
def max(self):
return self.maxValue
# Return the variance of the values.
def variance(self):
if self.n == 0:
return float('nan')
else:
return self.m2 / self.n
#
# Return the sample variance, which corrects for bias in estimating the variance by dividing
# by N-1 instead of N.
#
def sampleVariance(self):
if self.n <= 1:
return float('nan')
else:
return self.m2 / (self.n - 1)
# Return the standard deviation of the values.
def stdev(self):
return sqrt(self.variance())
#
# Return the sample standard deviation of the values, which corrects for bias in estimating the
# variance by dividing by N-1 instead of N.
#
def sampleStdev(self):
return sqrt(self.sampleVariance())
def asDict(self, sample=False):
"""Returns the :class:`StatCounter` members as a ``dict``.
>>> sc.parallelize([1., 2., 3., 4.]).stats().asDict()
{'count': 4L,
'max': 4.0,
'mean': 2.5,
'min': 1.0,
'stdev': 1.2909944487358056,
'sum': 10.0,
'variance': 1.6666666666666667}
"""
return {
'count': self.count(),
'mean': self.mean(),
'sum': self.sum(),
'min': self.min(),
'max': self.max(),
'stdev': self.stdev() if sample else self.sampleStdev(),
'variance': self.variance() if sample else self.sampleVariance()
}
def __repr__(self):
return ("(count: %s, mean: %s, stdev: %s, max: %s, min: %s)" %
(self.count(), self.mean(), self.stdev(), self.max(), self.min()))
|
thast/EOSC513
|
refs/heads/master
|
DC/SparseGN/DCT_withW_rad/DCT_withW.py
|
1
|
from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
import pandas as pd
from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
from scipy.interpolate import LinearNDInterpolator, interp1d
from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
import SimPEG
import scipy.sparse as sp
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 123,41
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
def getCylinderPoints(xc,zc,r):
xLocOrig1 = np.arange(-r,r+r/10.,r/10.)
xLocOrig2 = np.arange(r,-r-r/10.,-r/10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2.+r**2.)+zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2.+r**2.)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1,zLoc1]).T
topHalf = topHalf[0:-1,:]
bottomHalf = np.vstack([xLoc2,zLoc2]).T
bottomHalf = bottomHalf[0:-1,:]
cylinderPoints = np.vstack([topHalf,bottomHalf])
cylinderPoints = np.vstack([cylinderPoints,topHalf[0,:]])
return cylinderPoints
cylinderPoints0 = getCylinderPoints(x0,z1,r0)
cylinderPoints1 = getCylinderPoints(x1,z1,r1)
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
survey.dobs = survey.dpred(mtrue)
survey.std = 0.05*np.ones_like(survey.dobs)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
dmisAll = DataMisfit.l2_DataMisfit(survey)
print '# of data: ', survey.dobs.shape
class SimultaneousSrc(DC.Src.BaseSrc):
"""
Dipole source
"""
QW = None
Q = None
W = None
def __init__(self, rxList,Q,W, **kwargs):
SimPEG.Survey.BaseSrc.__init__(self, rxList, **kwargs)
def eval(self, prob):
return self.QW
class SimultaneousRx(DC.Rx.BaseRx):
"""
SimultaneousRx receiver
"""
def __init__(self, locs, rxType='phi', **kwargs):
# We may not need this ...
SimPEG.Survey.BaseRx.__init__(self, locs, rxType)
@property
def nD(self):
"""Number of data in the receiver."""
return self.locs.shape[0]
# Not sure why ...
# return int(self.locs[0].size / 2)
def getP(self, mesh, Gloc):
return self.locs
P = []
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
P = rx.getP(mesh,'CC')
from SimPEG.Maps import IdentityMap
from scipy.fftpack import dct,idct
class DCTMap(IdentityMap):
def __init__(self, mesh=None, nP=None, **kwargs):
super(DCTMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m):
return Utils.mkvc(dct(dct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
def deriv(self, m, v=None):
if v is not None:
return dct(dct(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho')
else:
print "not implemented"
def inverse(self, m):
return Utils.mkvc(idct(idct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
class iDCTMap(IdentityMap):
def __init__(self, mesh, nP=None, **kwargs):
super(iDCTMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m):
return Utils.mkvc(idct(idct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
def deriv(self, m, v=None):
if v is not None:
return idct(idct(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho')
else:
print "not implemented"
def inverse(self, m):
return Utils.mkvc(dct(dct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
idctmap = iDCTMap(mesh)
dctmap = DCTMap(mesh)
import spgl1
#Parameter for SPGL1 iterations
nits = 10
mdct = (-5.)*np.ones_like(mtrue)
it = 0
#phi_d_normal = np.load('../phid_normal.npy')
#ratio = np.r_[6.5,phi_d_normal[0:-1]/phi_d_normal[1:]]
ratio = 10.*np.ones(nits)
min_progress = 1.2
xlist = []
#Parameters for W
nsubSrc = 5
InnerIt = 1
dmisfitsub = []
dmisfitall = []
dmisfitall.append(dmisAll.eval(mdct)/survey.nD)
#Initialize Random Source
W = np.random.randint(0, high=2, size=[survey.nSrc,nsubSrc])*2-1
#problem.unpair()
#roblem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
dmis = DataMisfit.l2_DataMisfit(survey_r)
dmisfitsub.append(dmis.eval(mdct)/survey_r.nD)
problem.unpair()
problem.pair(survey)
print "end iteration: ",it, '; Overall Normalized Misfit: ', dmisAll.eval(mdct)/survey.nD
while (dmisAll.eval(mdct)/survey.nD)>0.5 and it<nits:
problem.unpair()
problem.pair(survey_r)
def JS(x,mode):
if mode == 1:
return problem.Jvec(mdct,idctmap*x)
else:
return dctmap*problem.Jtvec(mdct,x)
b = survey_r.dpred(mdct)-survey_r.dpred(mtrue)
print "# of data: ", b.shape
opts = spgl1.spgSetParms({'iterations':100, 'verbosity':2})
sigtol = np.linalg.norm(b)/np.maximum(ratio[it],min_progress)
#tautol = 20000.
x,resid,grad,info = spgl1.spg_bpdn(JS, b, sigma = sigtol,options=opts)
#x,resid,grad,info = spgl1.spg_lasso(JS,b,tautol,opts)
assert dmis.eval(mdct) > dmis.eval(mdct - idctmap*x)
mdct = mdct - idctmap*x
xlist.append(x)
it +=1
print "end iteration: ",it, '; Subsample Normalized Misfit: ', dmis.eval(mdct)/survey_r.nD
dmisfitsub.append(dmis.eval(mdct)/survey_r.nD)
problem.unpair()
problem.pair(survey)
dmisfitall.append(dmisAll.eval(mdct)/survey.nD)
print "Dmisfit compared to full dataset: ",dmisAll.eval(mdct)/survey.nD
if np.mod(it,InnerIt) ==0:
W = np.random.randint(0, high=2, size=[survey.nSrc,nsubSrc])*2-1
print 'update W'
#problem.unpair()
#roblem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
dmis = DataMisfit.l2_DataMisfit(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
print "end Update W; iteration: ",it, '; New Subsample Normalized Misfit: ', dmis.eval(mdct)/survey_r.nD
problem.unpair()
problem.pair(survey)
np.save('./dmisfitsub.npy',dmisfitsub)
np.save('./dmisfitall.npy',dmisfitall)
np.save('./mfinal.npy',mdct)
np.save('./xlist.npy',x)
mm = mesh.plotImage(mdct)
plt.colorbar(mm[0])
plt.gca().set_xlim([-10.,10.])
plt.gca().set_ylim([-10.,0.])
plt.plot(cylinderPoints0[:,0],cylinderPoints0[:,1], linestyle = 'dashed', color='k')
plt.plot(cylinderPoints1[:,0],cylinderPoints1[:,1], linestyle = 'dashed', color='k')
plt.show()
|
ilyes14/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
286
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
adamncasey/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pytest/testing/test_pytester.py
|
203
|
import pytest
import os
from _pytest.pytester import HookRecorder
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED
def test_make_hook_recorder(testdir):
item = testdir.getitem("def test_func(): pass")
recorder = testdir.make_hook_recorder(item.config.pluginmanager)
assert not recorder.getfailures()
pytest.xfail("internal reportrecorder tests need refactoring")
class rep:
excinfo = None
passed = False
failed = True
skipped = False
when = "call"
recorder.hook.pytest_runtest_logreport(report=rep)
failures = recorder.getfailures()
assert failures == [rep]
failures = recorder.getfailures()
assert failures == [rep]
class rep:
excinfo = None
passed = False
failed = False
skipped = True
when = "call"
rep.passed = False
rep.skipped = True
recorder.hook.pytest_runtest_logreport(report=rep)
modcol = testdir.getmodulecol("")
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
rep.passed = False
rep.failed = True
rep.skipped = False
recorder.hook.pytest_collectreport(report=rep)
passed, skipped, failed = recorder.listoutcomes()
assert not passed and skipped and failed
numpassed, numskipped, numfailed = recorder.countoutcomes()
assert numpassed == 0
assert numskipped == 1
assert numfailed == 1
assert len(recorder.getfailedcollections()) == 1
recorder.unregister()
recorder.clear()
recorder.hook.pytest_runtest_logreport(report=rep)
pytest.raises(ValueError, "recorder.getfailures()")
def test_parseconfig(testdir):
config1 = testdir.parseconfig()
config2 = testdir.parseconfig()
assert config2 != config1
assert config1 != pytest.config
def test_testdir_runs_with_plugin(testdir):
testdir.makepyfile("""
pytest_plugins = "pytester"
def test_hello(testdir):
assert 1
""")
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def make_holder():
class apiclass:
def pytest_xyz(self, arg):
"x"
def pytest_xyz_noarg(self):
"x"
apimod = type(os)('api')
def pytest_xyz(arg):
"x"
def pytest_xyz_noarg():
"x"
apimod.pytest_xyz = pytest_xyz
apimod.pytest_xyz_noarg = pytest_xyz_noarg
return apiclass, apimod
@pytest.mark.parametrize("holder", make_holder())
def test_hookrecorder_basic(holder):
pm = PytestPluginManager()
pm.addhooks(holder)
rec = HookRecorder(pm)
pm.hook.pytest_xyz(arg=123)
call = rec.popcall("pytest_xyz")
assert call.arg == 123
assert call._name == "pytest_xyz"
pytest.raises(pytest.fail.Exception, "rec.popcall('abc')")
pm.hook.pytest_xyz_noarg()
call = rec.popcall("pytest_xyz_noarg")
assert call._name == "pytest_xyz_noarg"
def test_makepyfile_unicode(testdir):
global unichr
try:
unichr(65)
except NameError:
unichr = chr
testdir.makepyfile(unichr(0xfffd))
def test_inline_run_clean_modules(testdir):
test_mod = testdir.makepyfile("def test_foo(): assert True")
result = testdir.inline_run(str(test_mod))
assert result.ret == EXIT_OK
# rewrite module, now test should fail if module was re-imported
test_mod.write("def test_foo(): assert False")
result2 = testdir.inline_run(str(test_mod))
assert result2.ret == EXIT_TESTSFAILED
|
sklprogs/mclient
|
refs/heads/master
|
src/plugins/multitrancom/utils/utils.py
|
1
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import re
import os
import html
import operator
import urllib.request
from skl_shared.localize import _
import skl_shared.shared as sh
''' It seems to be highly difficult to extract short-full title pairs
since, unlike multitran.ru, there are no '<a title' tags, such
cases are coded as usual URLs. Use 'Commands.run_missing_titles' to
manually fill up new titles.
'''
class ExtractGroups:
def __init__(self):
''' Log in at multitran.com, select an article, click 'Add' and
copy the value of 'var MajorToMinor='.
'''
self.lst = ["Computing", "Information technology", "Computing", "SAP", "Computing", "Computer networks", "Computing", "Programming", "Computing", "Operation systems", "Computing", "Data processing", "Computing", "Neural networks", "Computing", "Internet", "Computing", "File extension", "Computing", "SAP tech.", "Computing", "SAP finance", "Computing", "Computer graphics", "Computing", "Computer games", "Computing", "Microsoft", "Computing", "Hacking", "Computing", "Chat and Internet slang", "Computing", "Software", "Computing", "Databases", "Computing", "Office equipment", "Computing", "Information security", "Computing", "Social media", "Computing", "Artificial intelligence", "Computing", "Computing slang", "Computing", "E-mail", "Computing", "Computer security", "Computing", "ASCII", "Geology", "Mineralogy", "Geology", "Geophysics", "Geology", "Seismology", "Geology", "Tectonics", "Geology", "Petrography", "Geology", "Geomorphology", "Geology", "Geochemistry", "Geology", "Hydrogeology", "Geology", "Crystallography", "Geology", "Lithology", "Geology", "Stratigraphy", "Geology", "Volcanology", "Geology", "Engineering geology", "Geology", "Spectroscopy", "Geology", "Geochronology", "Geology", "Mineral classification", "Geology", "Geomechanics", "Geology", "Mineral products", "Geology", "Soil science", "Geology", "Glaciology", "Geology", "Reservoir simulation", "Geology", "Speleology", "Geology", "Gemmology", "Geology", "Sedimentology", "Biology", "Zoology", "Biology", "Botany", "Biology", "Anatomy", "Biology", "Entomology", "Biology", "Embryology", "Biology", "Biophysics", "Biology", "Genetics", "Biology", "Microbiology", "Biology", "Cytology", "Biology", "Helminthology", "Biology", "Biochemistry", "Biology", "Paleontology", "Biology", "Ichthyology", "Biology", "Biotechnology", "Biology", "Genetic engineering", "Biology", "Molecular genetics", "Biology", "Cytogenetics", "Biology", "Biotaxy", "Biology", "Paleobotany", "Biology", "Herpetology (incl. serpentology)", "Biology", "Ethology", "Biology", "Palynology", "Biology", "Mycology", "Biology", "Ornithology", "Biology", "Biogeography", "Biology", "Protozoology", "Biology", "Malacology", "Biology", "Bioacoustics", "Biology", "Evolution", "Biology", "Carcinology", "Biology", "Geobotanics", "Biology", "Molecular biology", "Biology", "Chalcidology", "Biology", "Amphibians and reptiles", "Biology", "Mammals", "Biology", "Paleozoology", "Biology", "Acridology", "Biology", "Ampelography", "Biology", "Hydrobiology", "Aviation", "Aeronautics", "Aviation", "Navigation", "Aviation", "Aerial photography and topography", "Aviation", "Aviation medicine", "Aviation", "Military aviation", "Aviation", "Airports and air traffic control", "Aviation", "Helicopters", "Aviation", "Hydroplanes", "Aviation", "Airships", "Aviation", "ICAO", "Nautical", "Foil ships", "Nautical", "Navigation", "Nautical", "Navy", "Nautical", "Yachting", "Nautical", "Fishery (fishing industry)", "Nautical", "Shipbuilding", "Nautical", "Hovercraft", "Nautical", "Submarines", "Nautical", "Sailing ships", "Nautical", "Merchant navy", "Nautical", "Maritime law & Law of the Sea", "Nautical", "Ship handling", "Medical", "Psychiatry", "Medical", "Veterinary medicine", "Medical", "Surgery", "Medical", "Obstetrics", "Medical", "Anesthesiology", "Medical", "Ophthalmology", "Medical", "Gastroenterology", "Medical", "Dentistry", "Medical", "Laboratory equipment", "Medical", "Gynecology", "Medical", "Oncology", "Medical", "Urology", "Medical", "Neurosurgery", "Medical", "Orthopedics", "Medical", "Pediatrics", "Medical", "Sanitation", "Medical", "Traumatology", "Medical", "Diseases", "Medical", "Chromatography", "Medical", "Transplantology", "Medical", "Aviation medicine", "Medical", "Drug name", "Medical", "Drugs and addiction medicine", "Medical", "Electromedicine", "Medical", "Psychotherapy", "Medical", "Laser medicine", "Medical", "Pharmacy", "Medical", "Health care", "Medical", "Clinical trial", "Medical", "AIDS", "Medical", "Nursing", "Medical", "Optometry", "Medical", "Deafblindness", "Medical", "Logopedics", "Medical", "Physiotherapy", "Medical", "Speech disorders", "Medical", "Emergency medical care", "Dialectal", "Southern Chinese", "Dialectal", "Cantonese", "Dialectal", "Ritual", "Dialectal", "Shanghainese", "Dialectal", "Northeastern Mandarin", "Dialectal", "Beijing dialect", "Dialectal", "Vienna dialect", "Dialectal", "Eastern Chinese", "Dialectal", "Jamaican English", "Dialectal", "Northern Chinese", "Dialectal", "Salvadoran Spanish", "Dialectal", "Uruguayan Spanish", "Dialectal", "Torgut language", "Dialectal", "Derbet language", "Dialectal", "Sicilian", "Dialectal", "Middle Chinese", "Sports", "Chess", "Sports", "Polo", "Sports", "Doping", "Sports", "Kick volleyball", "Sports", "Croquet", "Sports", "Baseball", "Sports", "Football", "Sports", "Petanque", "Sports", "Fencing", "Sports", "Hockey", "Sports", "Figure skating", "Sports", "Billiards", "Sports", "Yachting", "Sports", "Bodybuilding", "Sports", "Basketball", "Sports", "American football", "Sports", "Horse racing", "Sports", "Sailing", "Sports", "Gymnastics", "Sports", "Shooting sport", "Sports", "Cycle sport", "Sports", "Skiing", "Sports", "Platform diving", "Sports", "Throw", "Sports", "Equestrianism", "Sports", "Ski jumping", "Sports", "Athletics", "Sports", "Luge", "Sports", "Rowing", "Sports", "Swimming", "Sports", "Handball", "Sports", "Volleyball", "Sports", "Tennis", "Sports", "Long jump", "Sports", "High jump", "Sports", "Speed skating", "Sports", "Table tennis", "Sports", "Trampolining", "Sports", "Archery", "Sports", "Water polo", "Sports", "Rugby football", "Sports", "Pole vaults", "Sports", "Weightlifting", "Sports", "Acrobatics", "Sports", "Biathlon", "Sports", "Curling", "Sports", "Sporting goods", "Sports", "Racing and motorsport", "Sports", "Badminton", "Military", "Artillery", "Military", "Military lingo", "Military", "Radiolocation", "Military", "Missiles", "Military", "Aerial photography and topography", "Military", "Weapons of mass destruction", "Military", "Ground forces (Army)", "Military", "Navy", "Military", "Armored vehicles", "Military", "NATO", "Military", "Air defense", "Military", "Anti-air artillery", "Military", "Explosives", "Military", "Military aviation", "Military", "Intelligence and security services", "Military", "Torpedoes", "Military", "Fortification", "Military", "Ammunition", "Philosophy", "Logic", "Technology", "Laboratory equipment", "Technology", "Fiber optic", "Technology", "Cybernetics", "Technology", "Metrology", "Technology", "Refrigeration", "Technology", "Welding", "Technology", "Household appliances", "Technology", "Automated equipment", "Technology", "Extrusion", "Technology", "Robotics", "Technology", "Tools", "Technology", "Microscopy", "Technology", "Automatic control", "Technology", "Measuring instruments", "Technology", "Isolation", "Technology", "Accumulators", "Technology", "Drives", "Technology", "Switches", "Technology", "Lighting (other than cinema)", "Technology", "Winding", "Technology", "SAP tech.", "Technology", "Sensitometry", "Technology", "Disaster recovery", "Technology", "Infrared technology", "Technology", "Level measurement", "Technology", "Nanotechnology", "Technology", "Air conditioners", "Technology", "Lasers", "Technology", "Vibration monitoring", "Technology", "Gyroscopes", "Technology", "Additive manufacturing & 3D printing", "Technology", "Photometry", "Agriculture", "Gardening", "Agriculture", "Fish farming (pisciculture)", "Agriculture", "Beekeeping", "Agriculture", "Melioration", "Agriculture", "Soil science", "Agriculture", "Poultry farming", "Agriculture", "Meat processing", "Agriculture", "Horse breeding", "Agriculture", "Floriculture", "Agriculture", "Milk production", "Agriculture", "Wine growing", "Agriculture", "Selective breeding", "Agriculture", "Phytophathology", "Agriculture", "Fodder", "Agriculture", "Zootechnics", "Agriculture", "Agrochemistry", "Agriculture", "Horticulture", "Agriculture", "Greenhouse technology", "Agriculture", "Animal husbandry", "Agriculture", "Pest control", "Agriculture", "Agronomy", "Agriculture", "Fertilizers", "Historical", "Archaeology", "Historical", "Heraldry", "Historical", "Classical antiquity (excl. mythology)", "Historical", "Anthropology", "Historical", "Egyptology", "Historical", "Socialism", "Historical", "East Germany (history)", "Historical", "Genealogy", "Historical", "Soviet", "Historical", "Cultural studies", "Historical", "Historical figure", "Historical", "Palaeography", "Chemistry", "Biochemistry", "Chemistry", "Laboratory equipment", "Chemistry", "Geochemistry", "Chemistry", "Forest chemistry", "Chemistry", "Chromatography", "Chemistry", "Spectroscopy", "Chemistry", "Physical chemistry", "Chemistry", "Electrochemistry", "Chemistry", "Agrochemistry", "Chemistry", "Alkaloids", "Chemistry", "Chemical nomenclature", "Chemistry", "Nuclear chemistry", "Chemistry", "Organic chemistry", "Chemistry", "Chemical compounds", "Chemistry", "Distillation", "Chemistry", "Colloid chemistry", "Chemistry", "Inorganic chemistry", "Chemistry", "Dyalysis", "Chemistry", "Electrolysis", "Construction", "Architecture", "Construction", "Road works", "Construction", "Heavy equipment vehicles", "Construction", "Welding", "Construction", "Plumbing", "Construction", "Paint work", "Construction", "Tools", "Construction", "Reinforced concrete", "Construction", "Wiring", "Construction", "Foundation engineering", "Construction", "Stonemasonry", "Construction", "Tunneling", "Construction", "Soil mechanics", "Construction", "Building structures", "Construction", "Valves", "Construction", "Road construction", "Construction", "Landscaping", "Construction", "Windows", "Construction", "Bridge construction", "Construction", "Road surface", "Construction", "Pipelines", "Construction", "Municipal planning", "Construction", "Dams", "Mathematics", "Statistics", "Mathematics", "Geometry", "Mathematics", "Algebra", "Mathematics", "Applied mathematics", "Mathematics", "Topology", "Mathematics", "Mathematical analysis", "Mathematics", "Gravimetry", "Mathematics", "Econometrics", "Religion", "Clerical", "Religion", "Bible", "Religion", "Quran", "Religion", "Eastern Orthodoxy", "Religion", "Catholic", "Religion", "Christianity", "Religion", "Judaism", "Religion", "Buddhism", "Religion", "Shinto", "Religion", "Cults and miscellaneous spiritual practices", "Religion", "Taoism", "Religion", "Islam", "Religion", "Confucianism", "Religion", "Hinduism", "Law", "Alternative dispute resolution", "Law", "Contracts", "Law", "Common law (Anglo-Saxon legal system)", "Law", "Patents", "Law", "Criminal law", "Law", "Bills", "Law", "Copyright", "Law", "Inheritance law", "Law", "Notarial practice", "Law", "Labor law", "Law", "Private international law", "Law", "Procedural law", "Law", "Public law", "Law", "Legal theory", "Law", "Administrative law", "Law", "International law", "Law", "Economic law", "Law", "Civil law", "Law", "Civil procedure", "Law", "Maritime law & Law of the Sea", "Law", "Antitrust law", "Law", "Court (law)", "Economy", "Commerce", "Economy", "Insurance", "Economy", "Trade classification", "Economy", "Investment", "Economy", "Foreign trade", "Economy", "World trade organization", "Economy", "Marketing", "Economy", "Political economy", "Economy", "International trade", "Economy", "Employment", "Linguistics", "Grammar", "Linguistics", "Stylistics", "Linguistics", "Phonetics", "Linguistics", "Psycholinguistics", "Linguistics", "Phonology", "Linguistics", "Pragmatics", "Linguistics", "Syntax", "Linguistics", "Morphology", "Linguistics", "Semantics", "Linguistics", "Typology", "Linguistics", "Semiotics", "Linguistics", "Sociolinguistics", "Linguistics", "Tagmemics", "Linguistics", "Neurolinguistics", "Finances", "Accounting", "Finances", "Insurance", "Finances", "Stock Exchange", "Finances", "Banking", "Finances", "Audit", "Finances", "Foreign exchange market", "Finances", "Investment", "Finances", "European Bank for Reconstruction and Development", "Finances", "Currencies and monetary policy", "Finances", "SAP finance", "Finances", "Securities", "Finances", "NASDAQ", "Finances", "New York Stock Exchange", "Finances", "American stock exchange", "Finances", "International Monetary Fund", "Finances", "Charities", "Finances", "Digital and cryptocurrencies", "Finances", "Offshore companies", "Geography", "Meteorology", "Geography", "Hydrography", "Geography", "Topography", "Geography", "Ethnography", "Geography", "Cartography", "Geography", "Demography", "Geography", "Seismology", "Geography", "Geomorphology", "Geography", "Oceanography (oceanology)", "Geography", "Aerial photography and topography", "Geography", "Soil science", "Geography", "Ice formation", "Geography", "Antarctic", "Geography", "Hydrology", "Geography", "Ethnology", "Geography", "Earth sciences", "Geography", "Climatology", "Geography", "Limnology", "Geography", "Remote sensing", "Geography", "Hydrometry", "Geography", "Administrative geography", "Mining", "Ore formation", "Mining", "Drilling", "Mining", "Mineral processing", "Mining", "Mine surveying", "Mining", "Gold mining", "Mining", "Coal", "Mining", "Quarrying", "Cinematography", "Cinema equipment", "Cinematography", "Narrow film", "Cinematography", "Film lighting equipment", "Cinematography", "Photographical sound recording", "Cinematography", "Projectors", "Cinematography", "Sound recording", "Cinematography", "Film processing", "Cinematography", "Filming equipment", "Cinematography", "Magnetic image recording", "Cinematography", "Animation and animated films", "Cinematography", "Sound engineering", "Cooking", "Confectionery", "Cooking", "Spices", "Cooking", "Beverages", "Martial arts and combat sports", "Karate", "Martial arts and combat sports", "Boxing", "Martial arts and combat sports", "Aikido", "Martial arts and combat sports", "Wrestling", "Martial arts and combat sports", "Judo", "Martial arts and combat sports", "Sumo", "Martial arts and combat sports", "Wushu", "Martial arts and combat sports", "Taekwondo", "Metallurgy", "Roll stock", "Metallurgy", "Blast-furnace practice", "Metallurgy", "Open-hearth process", "Metallurgy", "Continuous casting", "Metallurgy", "Nonferrous industry", "Metallurgy", "Powder metallurgy", "Metallurgy", "Forging", "Metallurgy", "Metal science", "Metallurgy", "Electrothermy", "Metallurgy", "Electrometallurgy", "Metallurgy", "Foundry", "Metallurgy", "Aluminium industry", "Metallurgy", "Alloy addition", "Metallurgy", "Steel production", "Metallurgy", "Cast iron", "Mythology", "Greek and Roman mythology", "Mythology", "Norse mythology", "Mythology", "Hinduism", "Politics", "Foreign policy", "Politics", "Disaster recovery", "Politics", "International relations", "Politics", "Elections", "Politics", "Public relations", "Psychology", "Psychotherapy", "Psychology", "Psychophysiology", "Psychology", "Mental health", "Psychology", "Psycholinguistics", "Psychology", "Ethnopsychology", "Psychology", "Neuropsychology", "Psychology", "Psychopathology", "Physics", "Optics (branch of physics)", "Physics", "Biophysics", "Physics", "Mechanics", "Physics", "Magnetics", "Physics", "Nuclear physics", "Physics", "Acoustics", "Physics", "Hydraulics", "Physics", "Quantum mechanics", "Physics", "Nonlinear optics", "Physics", "Metal physics", "Physics", "Spectroscopy", "Physics", "Solid-state physics", "Physics", "Quantum electronics", "Physics", "Tribology", "Physics", "Aerodynamics", "Physics", "Thermodynamics", "Physics", "Aerohydrodynamics", "Physics", "Astrophysics", "Physics", "Ballistics", "Physics", "Heat transfer", "Physics", "Hydroacoustics", "Physics", "Electricity", "Physics", "Piezoelectric crystals", "Photography", "Film processing", "Electronics", "Integrated circuits", "Electronics", "Cathode-ray tubes", "Electronics", "Microelectronics", "Electronics", "High frequency electronics", "Electronics", "Printed circuit boards", "Electronics", "Vacuum tubes", "Electronics", "Power electronics", "Literature", "Poetry (terminology)", "Literature", "Fantasy and science fiction", "Literature", "Librarianship", "Literature", "Quotes and aphorisms", "Literature", "Fairy tales", "Literature", "Titles of works of art", "Literature", "Screenwriting", "Folklore", "Proverb", "Folklore", "Saying", "Folklore", "Puzzle", "Sociology", "Survey", "Communications", "Radio", "Communications", "Telecommunications", "Communications", "Postal service", "Communications", "Telephony", "Communications", "Telegraphy", "Communications", "Internet", "Communications", "Telemechanics", "Communications", "Short message service", "Communications", "Satellite communications", "Communications", "Antennas and waveguides", "Communications", "Mobile and cellular communications", "Communications", "E-mail", "Transport", "Railway term", "Transport", "Automobiles", "Transport", "Road works", "Transport", "Road traffic", "Transport", "Cycling (other than sport)", "Transport", "Motorcycles", "Transport", "Trucks/Lorries", "Transport", "Road sign", "Transport", "International transportation", "Transport", "Oils and lubricants", "Transport", "Metro and rapid transit", "Transport", "Helicopters", "Transport", "Traffic control", "Transport", "Public transportation", "Food industry", "Confectionery", "Food industry", "Canning", "Food industry", "Sugar production", "Food industry", "Brewery", "Food industry", "Meat processing", "Food industry", "Alcohol distilling", "Food industry", "Milk production", "Food industry", "Flour production", "Food industry", "Bakery", "Food industry", "Cheesemaking (caseiculture)", "Food industry", "Starch industry", "Food industry", "Fat-and-oil industry", "Food industry", "Glass container manufacture", "Food industry", "Winemaking", "Food industry", "Fermentation", "Food industry", "Champagne and sparkling wines", "Food industry", "Coffee", "Food industry", "Beverages", "Food industry", "Groceries", "Food industry", "Wine tasting", "Energy industry", "Nuclear and fusion power", "Energy industry", "Hydroelectric power stations", "Energy industry", "Solar power", "Energy industry", "Transformers", "Energy industry", "Power lines", "Energy industry", "Energy system", "Energy industry", "Energy distribution", "Energy industry", "Power system protection", "Energy industry", "Wind Energy", "Energy industry", "Thermal Energy", "Energy industry", "Bioenergy", "Energy industry", "Electricity generation", "Mass media", "Radio", "Mass media", "Television", "Mass media", "Advertising", "Mass media", "Internet", "Mass media", "Journalism (terminology)", "Business", "Alternative dispute resolution", "Business", "Commerce", "Business", "Contracts", "Business", "Business style", "Business", "Trademark", "Business", "Advertising", "Business", "Trade classification", "Business", "Investment", "Business", "Companies & Partnerships", "Business", "Exhibitions", "Business", "Human resources", "Business", "Corporate governance", "Business", "Public relations", "Business", "Consulting", "Business", "Electronic commerce", "Business", "Legal entity types (business legal structures)", "Business", "Offshore companies", "Publishing", "Polygraphy", "Publishing", "Copyright", "Publishing", "Typography", "Publishing", "Book binding", "Engineering", "Architecture", "Engineering", "Surveying", "Engineering", "Strength of materials", "Engineering", "Thermal engineering", "Engineering", "Refrigeration", "Engineering", "Hydraulics", "Engineering", "Heating", "Engineering", "Drawing", "Engineering", "Radiogeodesy", "Engineering", "Bionics", "Engineering", "Tunneling", "Engineering", "Soil mechanics", "Engineering", "Building structures", "Engineering", "Water supply", "Engineering", "Ventilation", "Engineering", "Valves", "Engineering", "Air conditioners", "Engineering", "Heat exchangers", "Engineering", "Landscaping", "Engineering", "Hydromechanics", "Engineering", "Sewage and wastewater treatment", "Engineering", "Pipelines", "Engineering", "Hydraulic engineering", "Engineering", "Municipal planning", "Engineering", "Seismic resistance", "Engineering", "Design", "Production", "Labor organization", "Production", "Packaging", "Production", "Planning", "Production", "Converter industry", "Production", "Jewelry", "Production", "Industrial hygiene", "Production", "Facilities", "Production", "Glass production", "Production", "Ceramics", "Production", "Trade unions", "Production", "Permit to work system", "Production", "Personal protective equipment", "Production", "Tinware", "Management", "Labor organization", "Management", "Human resources", "Management", "Project management", "Management", "Corporate governance", "Management", "Risk Management", "Education", "University", "Education", "School and university subjects", "Education", "Social science", "Education", "Physical sciences", "Education", "Pedagogics", "Industry", "Mechanic engineering", "Industry", "Silicate industry", "Industry", "Press equipment", "Industry", "Stamping", "Industry", "Wire drawing", "Industry", "Industrial hygiene", "Industry", "Waste management", "Industry", "Tobacco industry", "Industry", "Machine tools", "Industry", "Materials science", "Industry", "Metalworking", "Occupational health & safety", "Permit to work system", "Occupational health & safety", "Personal protective equipment", "Philology", "Rhetoric", "Philology", "Palaeography", "Travel", "Hotel industry", "Quality control and standards", "Reliability", "Quality control and standards", "Non-destructive testing", "Quality control and standards", "GOST", "Medical appliances", "Electrophoresis", "Medical appliances", "Microscopy", "Medical appliances", "Dental implantology", "Medical appliances", "Radiography", "Medical appliances", "Hearing aid", "Medical appliances", "Magnetic tomography", "Medical appliances", "Computer tomography", "Medical appliances", "Ultrasound", "Machinery and mechanisms", "Engines", "Machinery and mechanisms", "Automated equipment", "Machinery and mechanisms", "Watchmaking", "Machinery and mechanisms", "Electric machinery", "Machinery and mechanisms", "Electric traction", "Machinery and mechanisms", "Elevators", "Machinery and mechanisms", "Machine components", "Machinery and mechanisms", "Combustion gas turbines", "Machinery and mechanisms", "Pumps", "Machinery and mechanisms", "Gear train", "Machinery and mechanisms", "Jet engines", "Machinery and mechanisms", "Turbines", "Machinery and mechanisms", "Pneumatics", "Machinery and mechanisms", "Electric motors", "Machinery and mechanisms", "Ball bearings", "Security systems", "Cryptography", "Security systems", "Signalling", "Security systems", "Biometry", "Security systems", "Infrared technology", "Security systems", "Information security", "Security systems", "Computer security", "Security systems", "Identification systems", "Security systems", "Video recording", "Oil and gas", "Oil / petroleum", "Oil and gas", "Drilling", "Oil and gas", "Sakhalin", "Oil and gas", "Oil and gas technology", "Oil and gas", "Oilfields", "Oil and gas", "Sakhalin R", "Oil and gas", "Sakhalin S", "Oil and gas", "Sakhalin A", "Oil and gas", "Well control", "Oil and gas", "Molikpaq", "Oil and gas", "Gas processing plants", "Oil and gas", "Oil processing plants", "Oil and gas", "Tengiz", "Oil and gas", "Karachaganak", "Oil and gas", "Caspian", "Oil and gas", "Flow measurement", "Oil and gas", "Oils and lubricants", "Regional usage (other than language varieties)", "American (usage, not AmE)", "Regional usage (other than language varieties)", "Eskimo (usage)", "Regional usage (other than language varieties)", "Australian", "Regional usage (other than language varieties)", "South African", "Regional usage (other than language varieties)", "Ukrainian (usage)", "Regional usage (other than language varieties)", "New Zealand", "Regional usage (other than language varieties)", "Netherlands (usage)", "Regional usage (other than language varieties)", "British (usage, not BrE)", "Regional usage (other than language varieties)", "Irish (usage, not language)", "Regional usage (other than language varieties)", "Canadian", "Regional usage (other than language varieties)", "Scottish (usage)", "Regional usage (other than language varieties)", "Austrian (usage)", "Regional usage (other than language varieties)", "Rhine", "Regional usage (other than language varieties)", "Swiss term", "Regional usage (other than language varieties)", "South German", "Regional usage (other than language varieties)", "Northern German", "Regional usage (other than language varieties)", "Berlin expression", "Regional usage (other than language varieties)", "East-Middle-German", "Regional usage (other than language varieties)", "South-West-German", "Regional usage (other than language varieties)", "Middle German", "Regional usage (other than language varieties)", "Lower German", "Regional usage (other than language varieties)", "West-German", "Regional usage (other than language varieties)", "Local name", "Regional usage (other than language varieties)", "Mexican", "Regional usage (other than language varieties)", "North American (USA and Canada)", "Regional usage (other than language varieties)", "Slavonic", "Regional usage (other than language varieties)", "Oriental", "Regional usage (other than language varieties)", "Puerto Rican Spanish", "Regional usage (other than language varieties)", "Salvadoran Spanish", "Regional usage (other than language varieties)", "Uruguayan Spanish", "Regional usage (other than language varieties)", "Ecuador", "Regional usage (other than language varieties)", "Belgian (usage)", "Regional usage (other than language varieties)", "Indonesian", "Regional usage (other than language varieties)", "Southern Dutch", "Regional usage (other than language varieties)", "Polynesian", "Regional usage (other than language varieties)", "Tuscan", "Regional usage (other than language varieties)", "Neapolitan", "Regional usage (other than language varieties)", "Latin American", "Regional usage (other than language varieties)", "African", "Logistics", "Warehouse", "Logistics", "Procurement", "Logistics", "Loading equipment", "Logistics", "International transportation", "Foreign affairs", "Diplomacy", "Foreign affairs", "Foreign policy", "Foreign affairs", "International relations", "Foreign affairs", "Immigration and citizenship", "Building materials", "Concrete", "Building materials", "Bricks", "Building materials", "Refractory materials", "Building materials", "Paint, varnish and lacquer", "Building materials", "Cement", "Building materials", "Ceramic tiles", "Building materials", "Astringents", "Building materials", "Drywall", "Space", "Astronomy", "Space", "Astronautics", "Space", "Missiles", "Space", "Astrometry", "Space", "Astrophysics", "Space", "Radioastronomy", "Space", "Celestial mechanics", "Space", "Astrospectroscopy", "Space", "NASA", "Space", "Apollo-Soyuz", "Space", "Remote sensing", "Electrical engineering", "Transformers", "Electrical engineering", "Semiconductors", "Electrical engineering", "Tools", "Electrical engineering", "Cables and cable production", "Electrical engineering", "Electric machinery", "Electrical engineering", "Measuring instruments", "Electrical engineering", "Isolation", "Electrical engineering", "Power system protection", "Electrical engineering", "Electricity", "Electrical engineering", "Electric motors", "Electrical engineering", "Power electronics", "Electrical engineering", "Superconductivity", "United Nations", "World trade organization", "United Nations", "International Monetary Fund", "United Nations", "ICAO", "Life sciences", "Psychiatry", "Life sciences", "Pharmacology", "Life sciences", "Bacteriology", "Life sciences", "Physiology", "Life sciences", "Allergology", "Life sciences", "Cardiology", "Life sciences", "Anesthesiology", "Life sciences", "Ophthalmology", "Life sciences", "Radiology", "Life sciences", "Immunology", "Life sciences", "Neurology", "Life sciences", "Gastroenterology", "Life sciences", "Hematology", "Life sciences", "Dermatology", "Life sciences", "Pathology", "Life sciences", "Teratology", "Life sciences", "Parasitology", "Life sciences", "Histology", "Life sciences", "Virology", "Life sciences", "Toxicology", "Life sciences", "Oncology", "Life sciences", "Venereology", "Life sciences", "Urology", "Life sciences", "Nephrology", "Life sciences", "Sexology", "Life sciences", "Orthopedics", "Life sciences", "Pulmonology", "Life sciences", "Traumatology", "Life sciences", "Epidemiology", "Life sciences", "Endocrinology", "Life sciences", "Transplantology", "Life sciences", "Mammalogy", "Life sciences", "Radiobiology", "Life sciences", "Logopedics", "Life sciences", "Dietology", "Natural resourses and wildlife conservation", "Forestry", "Natural resourses and wildlife conservation", "Ecology", "Natural resourses and wildlife conservation", "Environment", "Natural resourses and wildlife conservation", "Taxation of forests", "Natural resourses and wildlife conservation", "Waste management", "Natural resourses and wildlife conservation", "Antarctic", "Natural resourses and wildlife conservation", "Lean production", "Natural resourses and wildlife conservation", "Water resources", "Hobbies and pastimes", "Handicraft", "Hobbies and pastimes", "Angling (hobby)", "Hobbies and pastimes", "Model sports", "Government, administration and public services", "Police", "Government, administration and public services", "Taxes", "Government, administration and public services", "Customs", "Government, administration and public services", "Welfare & Social Security", "Government, administration and public services", "Health care", "Government, administration and public services", "Public utilities", "Government, administration and public services", "Penitentiary system", "Human rights activism", "LGBT", "Medicine - Alternative medicine", "Acupuncture", "Medicine - Alternative medicine", "Homeopathy", "Medicine - Alternative medicine", "Somatics", "Medicine - Alternative medicine", "Traditional medicine", "Medicine - Alternative medicine", "Manual therapy and osteopathy", "Proper name", "Trademark", "Proper name", "Company name", "Proper name", "Drug name", "Proper name", "Names and surnames", "Proper name", "Toponym", "Proper name", "Surname", "Proper name", "Given name", "Proper name", "Name of organization", "Proper name", "Titles of works of art", "Chemical industry", "Resins", "Chemical industry", "Silicate industry", "Chemical industry", "Polymers", "Chemical industry", "Galvanoplasty", "Chemical industry", "Stratified plastics", "Chemical industry", "Plastics", "Chemical industry", "Forest chemistry", "Chemical industry", "Agrochemistry", "Chemical industry", "Galvanizing", "Chemical industry", "Chemical fibers", "Chemical industry", "Dyes", "Chemical industry", "Astringents", "Chemical industry", "Material safety data sheet", "Chemical industry", "Enameling", "Wellness", "Hygiene", "Wellness", "Perfume", "Wellness", "Cosmetics and cosmetology", "Wellness", "Hairdressing", "Wellness", "Dietology", "Records management", "Bibliography", "Records management", "Office equipment", "Records management", "Archiving", "Records management", "Work flow", "Records management", "Typewriters and typewriting", "Records management", "Stationery", "Multimedia", "Television", "Multimedia", "Projectors", "Multimedia", "Sound recording", "Multimedia", "Stereo", "Multimedia", "LP players", "Multimedia", "Hi-Fi", "Multimedia", "Digital sound processing", "Multimedia", "Video recording", "Multimedia", "Audio electronics", "Games (other than sports)", "Card games", "Games (other than sports)", "Chess", "Games (other than sports)", "Bridge (card game)", "Games (other than sports)", "Dice", "Games (other than sports)", "Tabletop games", "Games (other than sports)", "Checkers", "Games (other than sports)", "Billiards", "Games (other than sports)", "Gambling", "Games (other than sports)", "Bowling", "Games (other than sports)", "Mahjong", "Games (other than sports)", "Computer games", "Games (other than sports)", "Golf", "Games (other than sports)", "Darts", "Games (other than sports)", "Cricket", "Outdoor activities and extreme sports", "Aeronautics", "Outdoor activities and extreme sports", "Cycling (other than sport)", "Outdoor activities and extreme sports", "Alpine skiing", "Outdoor activities and extreme sports", "Scuba diving", "Outdoor activities and extreme sports", "Skateboarding", "Outdoor activities and extreme sports", "Skydiving", "Outdoor activities and extreme sports", "Mountaineering", "Outdoor activities and extreme sports", "Sailing", "Outdoor activities and extreme sports", "Speed skating", "Outdoor activities and extreme sports", "Waterskiing", "Outdoor activities and extreme sports", "Paragliding", "Outdoor activities and extreme sports", "Windsurfing", "Outdoor activities and extreme sports", "Snowboard", "Law enforcement", "Police", "Law enforcement", "Criminology", "Law enforcement", "Forensic medicine", "Law enforcement", "Procedural law", "Law enforcement", "Combating corruption", "Law enforcement", "Explosives", "Law enforcement", "Intelligence and security services", "Law enforcement", "Federal Bureau of Investigation", "Law enforcement", "Dactyloscopy", "Law enforcement", "Organized crime", "Law enforcement", "Penitentiary system", "Law enforcement", "Forensics", "Collecting", "Philately / stamp collecting", "Collecting", "Numismatics", "Collecting", "Phaleristics", "Service industry", "Hotel industry", "Service industry", "Hairdressing", "Service industry", "Food service and catering", "Languages", "Ancient Greek", "Languages", "French", "Languages", "Latin", "Languages", "Arabic language", "Languages", "Hungarian Language", "Languages", "Dutch", "Languages", "Greek", "Languages", "Hindi", "Languages", "Irish", "Languages", "Spanish", "Languages", "Italian", "Languages", "German", "Languages", "Russian language", "Languages", "Sanskrit", "Languages", "Turkish language", "Languages", "Persian", "Languages", "Chinese", "Languages", "Swedish", "Languages", "Czech", "Languages", "Turk", "Languages", "Scandinavian", "Languages", "Romanian", "Languages", "Portuguese", "Languages", "Polish", "Languages", "Moldavian", "Languages", "Malay", "Languages", "Korean", "Languages", "Iceland", "Languages", "Ancient Hebrew", "Languages", "Hebrew", "Languages", "Danish", "Languages", "Hawaii", "Languages", "Mongolian", "Languages", "Manchu language", "Languages", "Japanese language", "Languages", "Ancient French", "Languages", "Norway", "Languages", "Gaelic", "Languages", "Thai", "Languages", "Tatar", "Languages", "Yiddish", "Languages", "Maori", "Languages", "Esperanto", "Languages", "Vietnamese", "Languages", "Albanian language", "Languages", "Bulgarian language", "Languages", "Estonian language", "Languages", "Tibetan", "Languages", "English", "Languages", "Belarusian language", "Languages", "Ukrainian language", "Languages", "Finnish language", "Jargon and slang", "Jargon", "Jargon and slang", "Spanish-American", "Jargon and slang", "Professional jargon", "Jargon and slang", "Military lingo", "Jargon and slang", "School", "Jargon and slang", "Slang", "Jargon and slang", "College vernacular", "Jargon and slang", "Black slang", "Jargon and slang", "Criminal jargon", "Jargon and slang", "Cockney rhyming slang", "Jargon and slang", "Latin American slang", "Jargon and slang", "Drug-related slang", "Jargon and slang", "Verlan", "Jargon and slang", "Prison slang", "Jargon and slang", "Chat and Internet slang", "Jargon and slang", "Youth slang", "Jargon and slang", "Computing slang", "Jargon and slang", "Police jargon", "Stylistic values", "Obsolete / dated", "Stylistic values", "Poetic", "Stylistic values", "Bookish / literary", "Stylistic values", "Rude", "Stylistic values", "Childish", "Stylistic values", "Euphemistic", "Stylistic values", "Slang", "Stylistic values", "Business style", "Stylistic values", "Nonstandard", "Stylistic values", "Formal", "Stylistic values", "Officialese", "Stylistic values", "Sublime", "Stylistic values", "Low register", "Stylistic values", "Invective", "Stylistic values", "Scientific", "Stylistic values", "News style", "Stylistic values", "Old-fashioned", "Stylistic values", "Cliche", "Stylistic values", "Archaic", "Stylistic values", "Taboo expressions and obscenities", "Stylistic values", "Vernacular language", "Stylistic values", "Modern use", "Stylistic values", "Soviet", "Stylistic values", "Neologism", "Stylistic values", "Epistolary", "Stylistic values", "Barbarism", "Stylistic values", "Spoken", "Stylistic values", "Vulgar", "Stylistic values", "Written", "Countries and regions", "Scotland", "Countries and regions", "Turkey", "Countries and regions", "Canada", "Countries and regions", "Spain", "Countries and regions", "Australia", "Countries and regions", "Israel", "Countries and regions", "France", "Countries and regions", "Japan", "Countries and regions", "Belarus", "Countries and regions", "United Kingdom", "Countries and regions", "Germany", "Countries and regions", "Wales", "Countries and regions", "Northern Ireland", "Countries and regions", "West Indies", "Countries and regions", "Andalusia", "Countries and regions", "Antilles", "Countries and regions", "Aragon", "Countries and regions", "Argentina", "Countries and regions", "Asturias", "Countries and regions", "Bolivia", "Countries and regions", "Brazil", "Countries and regions", "Venezuela", "Countries and regions", "Galicia", "Countries and regions", "Guatemala", "Countries and regions", "Columbia", "Countries and regions", "Costa Rica", "Countries and regions", "Cuba", "Countries and regions", "Netherlands", "Countries and regions", "Morocco", "Countries and regions", "Panama", "Countries and regions", "Peru", "Countries and regions", "Philippines", "Countries and regions", "Central America", "Countries and regions", "Chile", "Countries and regions", "South Asia", "Countries and regions", "South America", "Countries and regions", "United States", "Countries and regions", "Africa", "Countries and regions", "European Union", "Countries and regions", "Dominican Republic", "Countries and regions", "Algeria", "Countries and regions", "Afghanistan", "Countries and regions", "Taiwan", "Countries and regions", "Ukraine", "Countries and regions", "Austria", "Countries and regions", "Kazakhstan", "Countries and regions", "Cyprus", "Countries and regions", "Russia", "Countries and regions", "India", "Countries and regions", "Kyrgyzstan", "Countries and regions", "China", "Countries and regions", "Iran", "Grammatical labels", "Collective", "Grammatical labels", "Abbreviation", "Grammatical labels", "Diminutive", "Grammatical labels", "Iimitative (onomatopoeic)", "Grammatical labels", "Exclamation", "Grammatical labels", "Augmentative", "Grammatical labels", "Affectionate", "Auxilliary categories (editor use only)", "British English", "Auxilliary categories (editor use only)", "American English", "Auxilliary categories (editor use only)", "Old orthography", "Auxilliary categories (editor use only)", "Misused", "Auxilliary categories (editor use only)", "Loan translation", "Auxilliary categories (editor use only)", "Meaning 1", "Auxilliary categories (editor use only)", "Meaning 2", "Auxilliary categories (editor use only)", "Meaning 3", "Auxilliary categories (editor use only)", "Translator's false friend", "Parasciences", "Astrology", "Parasciences", "Parapsychology", "Parasciences", "Esoterics", "Parasciences", "Ufology", "Art and culture (n.e.s.)", "Painting", "Art and culture (n.e.s.)", "Art", "Art and culture (n.e.s.)", "Music", "Art and culture (n.e.s.)", "Rhetoric", "Art and culture (n.e.s.)", "Theatre", "Art and culture (n.e.s.)", "Circus", "Art and culture (n.e.s.)", "Tauromachy", "Art and culture (n.e.s.)", "Choreography", "Art and culture (n.e.s.)", "Librarianship", "Art and culture (n.e.s.)", "Museums", "Art and culture (n.e.s.)", "Comics", "Art and culture (n.e.s.)", "Fashion", "Art and culture (n.e.s.)", "Cultural studies", "Art and culture (n.e.s.)", "Sculpture", "Art and culture (n.e.s.)", "Manga", "Art and culture (n.e.s.)", "Titles of works of art", "Art and culture (n.e.s.)", "Design", "Art and culture (n.e.s.)", "Musical instruments", "Art and culture (n.e.s.)", "Dancing", "Art and culture (n.e.s.)", "Calligraphy", "Art and culture (n.e.s.)", "Ballet", "Emotional values", "Ironical", "Emotional values", "Humorous / Jocular", "Emotional values", "Rude", "Emotional values", "Gloomy", "Emotional values", "Contemptuous", "Emotional values", "Disapproving", "Emotional values", "Emotive", "Emotional values", "Avuncular", "Emotional values", "Pompous", "Emotional values", "Derogatory", "Emotional values", "Affectionate", "Emotional values", "Respectful", "Emotional values", "Pejorative", "Emotional values", "Polite", "Emotional values", "Sarcastical", "Emotional values", "Laudatory", "Light industries", "Textile industry", "Light industries", "Leather", "Light industries", "Knitted goods", "Light industries", "Sewing and clothing industry", "Light industries", "Clothing", "Light industries", "Footwear", "Light industries", "Haberdashery", "Wood, pulp and paper industries", "Pulp and paper industry", "Wood, pulp and paper industries", "Wood processing", "Wood, pulp and paper industries", "Matches", "Wood, pulp and paper industries", "Timber floating", "Wood, pulp and paper industries", "Logging", "Crafts", "Cooperage", "Crafts", "Spinning", "Crafts", "Weaving", "Companion animals", "Dog breeding", "Companion animals", "Pets", "Companion animals", "Felinology", "Subjects for Chinese dictionaries (container)", "Dragon boat", "Subjects for Chinese dictionaries (container)", "Dragon dance", "Subjects for Chinese dictionaries (container)", "Northeastern Mandarin", "Subjects for Chinese dictionaries (container)", "Verbatim", "Subjects for Chinese dictionaries (container)", "Eastern Chinese", "Subjects for Chinese dictionaries (container)", "Kabaddi", "Subjects for Chinese dictionaries (container)", "Mahjong", "Subjects for Chinese dictionaries (container)", "Conventional notation", "Subjects for Chinese dictionaries (container)", "Middle Chinese", "Subjects for Chinese dictionaries (container)", "Pigeon racing", "Subjects for Chinese dictionaries (container)", "Instead of"]
self.Success = True
self.subjs = {}
def run(self):
self.check()
self.parse()
self.get_dict()
self.get_list()
def get_list(self):
f = '[MClient] plugins.multitrancom.utils.utils.ExtractGroups.get_list'
if self.Success:
lst = []
majors = sorted(self.subjs.keys())
for major in majors:
minors = sorted(self.subjs[major])
minors.insert(0,major)
lst.append(minors)
sh.com.run_fast_debug(f,lst)
else:
sh.com.cancel(f)
def get_dict(self):
f = '[MClient] plugins.multitrancom.utils.utils.ExtractGroups.get_dict'
if self.Success:
mes = []
majors = sorted(self.subjs.keys())
for major in majors:
minors = sorted(self.subjs[major])
sub = ''''{}': {},\n'''.format(major,minors)
mes.append(sub)
mes = ''.join(mes)
sh.com.run_fast_debug(f,mes)
else:
sh.com.cancel(f)
def check(self):
f = '[MClient] plugins.multitrancom.utils.utils.ExtractGroups.check'
if self.lst:
if len(self.lst) % 2 != 0:
self.Success = False
sub = '{} % 2 == 0'.format(len(self.lst))
mes = _('The condition "{}" is not observed!')
mes = mes.format(sub)
sh.objs.get_mes(f,mes,True).show_warning()
else:
self.Success = False
sh.com.rep_empty(f)
def parse(self):
f = '[MClient] plugins.multitrancom.utils.utils.ExtractGroups.parse'
if self.Success:
i = 0
while i < len(self.lst):
if not self.lst[i] in self.subjs:
self.subjs[self.lst[i]] = []
self.subjs[self.lst[i]].append(self.lst[i+1])
i += 2
else:
sh.com.cancel(f)
class Pairs:
# Determine language pairs supported by MT
def __init__(self):
self.set_values()
def get_blacklist(self):
''' Read a list of URLs leading to network errors and return
a list of pairs represented by language codes that
cannot be used.
'''
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.get_blacklist'
file = '/tmp/urls'
pattern = 'https\:\/\/www.multitran.com\/m.exe\?l1=(\d+)\&l2=(\d+)\&SHL=2\&s='
text = sh.ReadTextFile(file).get()
if text:
lst = text.splitlines()
lst = [item.strip() for item in lst if item.strip()]
if lst:
codes = []
for url in lst:
match = re.match(pattern,url)
if match:
code1 = int(match.group(1))
code2 = int(match.group(2))
codes.append((code1,code2))
return codes
else:
sh.com.rep_empty(f)
else:
sh.com.rep_empty(f)
def get_bad_gateway(self):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.get_bad_gateway'
file = '/tmp/urls'
text = sh.ReadTextFile(file).get()
if text:
lst = text.splitlines()
lst = [item.strip() for item in lst if item.strip()]
if lst:
errors = []
for i in range(len(lst)):
mes = '{}/{}'.format(i+1,len(lst))
sh.objs.get_mes(f,mes,True).show_info()
try:
req = urllib.request.Request (url = lst[i]
,data = None
,headers = {'User-Agent': \
'Mozilla'
}
)
urllib.request.urlopen(req,timeout=12).read()
if self.Verbose:
mes = _('[OK]: "{}"').format(lst[i])
sh.objs.get_mes(f,mes,True).show_info()
except Exception as e:
if 'gateway' in str(e).lower():
errors.append(lst[i])
if errors:
mes = '\n'.join(errors)
sh.objs.get_mes(f,mes,True).show_info()
else:
mes = _('No matches!')
sh.objs.get_mes(f,mes,True).show_info()
else:
sh.com.rep_empty(f)
else:
sh.com.rep_empty(f)
def get_lang(self,code):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.get_lang'
if isinstance(code,int):
for lang in self.dic.keys():
if self.dic[lang]['code'] == code:
return lang
else:
mes = _('Wrong input data: "{}"!').format(code)
sh.objs.get_mes(f,mes).show_error()
def rep_remaining(self):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.rep_remaining'
file = '/tmp/urls'
pattern = 'https\:\/\/www.multitran.com\/m.exe\?l1=(\d+)\&l2=(\d+)\&SHL=2\&s='
text = sh.ReadTextFile(file).get()
if text:
lst = text.splitlines()
lst = [item.strip() for item in lst if item.strip()]
if lst:
pairs = []
for url in lst:
match = re.match(pattern,url)
if match:
code1 = int(match.group(1))
code2 = int(match.group(2))
if self.is_pair(code1,code2):
lang1 = self.get_lang(code1)
lang2 = self.get_lang(code2)
if lang1 and lang2:
pairs.append(lang1 + ' <=> ' + lang2)
else:
sh.com.rep_empty(f)
if pairs:
mes = '\n'.join(pairs)
sh.objs.get_mes(f,mes).show_info()
else:
mes = _('No matches!')
sh.objs.get_mes(f,mes,True).show_info()
else:
sh.com.rep_empty(f)
else:
sh.com.rep_empty(f)
def get_dead(self):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.get_dead'
dead = []
for i in range(len(self.langs)):
if self.isdead(i+1):
dead.append(self.langs[i])
self.alive = [lang for lang in self.langs if not lang in dead]
message = _('Dead languages: {}').format(', '.join(dead))
message += '\n'
message += _('Languages: total: {}; alive: {}; dead: {}')
message = message.format (len(self.langs)
,len(self.alive)
,len(dead)
)
message += '\n'
sh.objs.get_mes(f,message,True).show_info()
message = _('Alive languages:') + '\n' + ', '.join(self.alive)
message += '\n\n'
message += _('The entire dictionary:') + '\n' + str(self.dic)
sh.objs.get_mes(f,message).show_info()
def is_dead(self,code1):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.is_dead'
url = self.deadr.format(code1)
# We use '<=' since a language code starts with 1
if 0 < code1 <= len(self.langs):
code = ''
while not code:
code = sh.Get (url = url
,timeout = 20
).run()
if self.zero in code.replace('\n','').replace('\r',''):
return True
else:
sub = '0 < {} <= {}'.format(code,len(self.alive))
mes = _('The condition "{}" is not observed!').format(sub)
sh.objs.get_mes(f,mes).show_error()
def fill(self):
for i in range(len(self.langs)):
self.dic[self.langs[i]] = {'code':i+1
,'pair':()
}
def get_pairs(self,lang1):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.get_pairs'
if lang1:
if lang1 in self.alive:
lst = []
for lang2 in self.alive:
if self.is_pair (self.dic[lang1]['code']
,self.dic[lang2]['code']
):
lst.append(lang2)
if lst:
lst.sort()
self.dic[lang1]['pair'] = tuple(lst)
else:
''' This error can be caused by network issues, so
we make it silent.
'''
mes = _('Language "{}" is alive but has no pairs!')
mes = mes.format(lang1)
sh.objs.get_mes(f,mes,True).show_warning()
else:
# We should pass only alive languages to this procedure
mes = _('Language "{}" is dead!').format(lang1)
sh.objs.get_mes(f,mes).show_warning()
else:
sh.com.rep_empty(f)
def loop(self):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.loop'
#NOTE: Set this to the last processed language
i = 0
while i < len(self.alive):
lang = self.alive[i]
sh.objs.get_mes(f,lang,True).show_info()
self.get_pairs(lang)
self.write(lang)
i += 1
def write(self,lang='Last'):
struct = sorted(self.dic.items(),key=operator.itemgetter(0))
message = _('Last processed language:') + ' ' + lang + '\n\n' \
+ str(struct)
if self.errors:
message += '\n\n' + _('URLs that caused errors:') + '\n'
message += '\n'.join(self.errors)
sh.WriteTextFile (file = self.filew
,Rewrite = True
).write(message)
def run(self):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.run'
timer = sh.Timer(f)
timer.start()
self.fill()
self.loop()
timer.end()
self.write()
sh.Launch(self.filew).launch_default()
def is_pair(self,code1,code2):
f = '[MClient] plugins.multitrancom.utils.utils.Pairs.is_pair'
# We use '<=' since a language code starts with 1
if 0 < code1 <= len(self.langs) \
and 0 < code2 <= len(self.langs):
if code1 == code2:
sh.com.rep_lazy(f)
else:
url = self.root.format(code1,code2)
'''
code = ''
while not code:
code = sh.Get(url=url).run()
'''
code = sh.Get (url = url
,timeout = 20
).run()
if 'Тематика' in code:
return True
elif not code:
''' Sometimes 'Bad Gateway' error is received which
can be witnessed in a browser too.
'''
self.errors.append(url)
else:
sub = '0 < {} <= {}, 0 < {} <= {}'.format (code1
,len(self.langs)
,code2
,len(self.langs)
)
mes = _('The condition "{}" is not observed!').format(sub)
sh.objs.get_mes(f,mes).show_error()
def set_values(self):
self.Success = True
self.root = 'https://www.multitran.com/m.exe?l1={}&l2={}&SHL=2&s='
self.deadr = 'https://www.multitran.com/m.exe?l1={}&SHL=2&s='
self.zero = 'Количество терминов</a></td></tr><tr bgcolor=#DBDBDB><td>Всего</td><td></td><td align="right">0</td>'
''' A list of languages that have terms (and therefore pairs).
This list is based on the output of 'self.get_dead'.
Recreate it when necessary.
'''
self.alive = (_('Abkhazian'),_('Afrikaans'),_('Albanian'),_('Amharic'),_('Arabic'),_('Armenian'),_('Assamese'),_('Azerbaijani'),_('Bashkir'),_('Basque'),_('Belarusian'),_('Bengali'),_('Bosnian'),_('Bosnian cyrillic'),_('Breton'),_('Bulgarian'),_('Burmese'),_('Catalan'),_('Chechen'),_('Chinese'),_('Chinese Taiwan'),_('Chinese simplified'),_('Chuvash'),_('Cornish'),_('Croatian'),_('Czech'),_('Danish'),_('Dutch'),_('English'),_('Esperanto'),_('Estonian'),_('Faroese'),_('Filipino'),_('Finnish'),_('French'),_('Frisian'),_('Friulian'),_('Galician'),_('Gallegan'),_('Georgian'),_('German'),_('Gothic'),_('Greek'),_('Gujarati'),_('Hausa'),_('Hebrew'),_('Hindi'),_('Hungarian'),_('Icelandic'),_('Igbo'),_('Indonesian'),_('Ingush'),_('Inuktitut'),_('Irish'),_('IsiXhosa'),_('Italian'),_('Japanese'),_('Kalmyk'),_('Kannada'),_('Kazakh'),_('Khmer'),_('Kinyarwanda'),_('Kirghiz'),_('Konkani'),_('Korean'),_('Ladin'),_('Lao'),_('Latin'),_('Latvian'),_('Lithuanian'),_('Lower Sorbian'),_('Luxembourgish'),_('Macedonian'),_('Malay'),_('Malayalam'),_('Maltese'),_('Manh'),_('Maori'),_('Marathi'),_('Mongolian'),_('Montenegrin'),_('Nepali'),_('Norwegian Bokmal'),_('Norwegian Nynorsk'),_('Occitan'),_('Odia'),_('Pashto'),_('Persian'),_('Polish'),_('Portuguese'),_('Punjabi'),_('Quechua'),_('Romanian'),_('Romansh'),_('Romany'),_('Russian'),_('Sami'),_('Sardinian'),_('Scottish Gaelic'),_('Serbian'),_('Serbian latin'),_('Sesotho'),_('Sesotho sa leboa'),_('Sinhala'),_('Slovak'),_('Slovenian'),_('South Ndebele'),_('Spanish'),_('Swahili'),_('Swati'),_('Swedish'),_('Tajik'),_('Tamil'),_('Tatar'),_('Telugu'),_('Thai'),_('Tsonga'),_('Tswana'),_('Turkish'),_('Turkmen'),_('Ukrainian'),_('Upper Sorbian'),_('Urdu'),_('Uzbek'),_('Venda'),_('Vietnamese'),_('Wayana'),_('Welsh'),_('Wolof'),_('Yakut'),_('Yoruba'),_('Zulu'))
''' A total list of languages supported by Multitran.
#NOTE: Must be sorted by a language code in an ascending
order.
'''
self.langs = (_('English'),_('Russian'),_('German'),_('French'),_('Spanish'),_('Hebrew'),_('Serbian'),_('Croatian'),_('Tatar'),_('Arabic'),_('Portuguese'),_('Lithuanian'),_('Romanian'),_('Polish'),_('Bulgarian'),_('Czech'),_('Chinese'),_('Hindi'),_('Bengali'),_('Punjabi'),_('Vietnamese'),_('Danish'),_('Italian'),_('Dutch'),_('Azerbaijani'),_('Estonian'),_('Latvian'),_('Japanese'),_('Swedish'),_('Norwegian Bokmal'),_('Afrikaans'),_('Turkish'),_('Ukrainian'),_('Esperanto'),_('Kalmyk'),_('Finnish'),_('Latin'),_('Greek'),_('Korean'),_('Georgian'),_('Armenian'),_('Hungarian'),_('Kazakh'),_('Kirghiz'),_('Uzbek'),_('Romany'),_('Albanian'),_('Welsh'),_('Irish'),_('Icelandic'),_('Kurdish'),_('Persian'),_('Catalan'),_('Corsican'),_('Galician'),_('Mirandese'),_('Romansh'),_('Belarusian'),_('Ruthene'),_('Slovak'),_('Upper Sorbian'),_('Lower Sorbian'),_('Bosnian'),_('Montenegrin'),_('Macedonian'),_('Church Slavonic'),_('Slovenian'),_('Basque'),_('Svan'),_('Mingrelian'),_('Abkhazian'),_('Adyghe'),_('Chechen'),_('Avar'),_('Ingush'),_('Crimean Tatar'),_('Chuvash'),_('Maltese'),_('Khmer'),_('Nepali'),_('Amharic'),_('Assamese'),_('Lao'),_('Asturian'),_('Odia'),_('Indonesian'),_('Pashto'),_('Quechua'),_('Maori'),_('Marathi'),_('Tamil'),_('Telugu'),_('Thai'),_('Turkmen'),_('Yoruba'),_('Bosnian cyrillic'),_('Chinese simplified'),_('Chinese Taiwan'),_('Filipino'),_('Gujarati'),_('Hausa'),_('Igbo'),_('Inuktitut'),_('IsiXhosa'),_('Zulu'),_('Kannada'),_('Kinyarwanda'),_('Swahili'),_('Konkani'),_('Luxembourgish'),_('Malayalam'),_('Wolof'),_('Wayuu'),_('Serbian latin'),_('Tswana'),_('Sinhala'),_('Urdu'),_('Sesotho sa leboa'),_('Norwegian Nynorsk'),_('Malay'),_('Mongolian'),_('Frisian'),_('Faroese'),_('Friulian'),_('Ladin'),_('Sardinian'),_('Occitan'),_('Gaulish'),_('Gallegan'),_('Sami'),_('Breton'),_('Cornish'),_('Manh'),_('Scottish Gaelic'),_('Yiddish'),_('Tajik'),_('Tagalog'),_('Soninke'),_('Baoulé'),_('Javanese'),_('Wayana'),_('French Guiana Creole'),_('Mauritian Creole'),_('Seychellois Creole'),_('Guadeloupe Creole'),_('Rodriguan Creole'),_('Haitian Creole'),_('Mandinka'),_('Surigaonon'),_('Adangme'),_('Tok Pisin'),_('Cameroonian Creole'),_('Suriname Creole'),_('Belizean Creole'),_('Virgin Islands Creole'),_('Fon'),_('Kim'),_('Ivatan'),_('Gen'),_('Marshallese'),_('Wallisian'),_('Old Prussian'),_('Yom'),_('Tokelauan'),_('Zande'),_('Yao'),_('Waray'),_('Walmajarri'),_('Visayan'),_('Vili'),_('Venda'),_('Achinese'),_('Adjukru'),_('Agutaynen'),_('Afar'),_('Acoli'),_('Afrihili'),_('Ainu'),_('Akan'),_('Akkadian'),_('Aleut'),_('Southern Altai'),_('Old English'),_('Angika'),_('Official Aramaic'),_('Aragonese'),_('Mapudungun'),_('Arapaho'),_('Arawak'),_('Avestan'),_('Awadhi'),_('Aymara'),_('Bashkir'),_('Baluchi'),_('Bambara'),_('Balinese'),_('Basaa'),_('Beja'),_('Bemba'),_('Bhojpuri'),_('Bikol'),_('Bini'),_('Bislama'),_('Siksika'),_('Tibetan'),_('Braj'),_('Buriat'),_('Buginese'),_('Burmese'),_('Bilin'),_('Caddo'),_('Galibi Carib'),_('Cebuano'),_('Chamorro'),_('Chibcha'),_('Chagatai'),_('Chuukese'),_('Mari'),_('Chinook jargon'),_('Choctaw'),_('Chipewyan'),_('Cherokee'),_('Cheyenne'),_('Coptic'),_('Cree'),_('Kashubian'),_('Dakota'),_('Dargwa'),_('Delaware'),_('Slave'),_('Dogrib'),_('Dinka'),_('Dhivehi'),_('Dogri'),_('Duala'),_('Middle Dutch'),_('Dyula'),_('Dzongkha'),_('Efik'),_('Egyptian'),_('Ekajuk'),_('Elamite'),_('Middle English'),_('Ewe'),_('Ewondo'),_('Fang'),_('Fanti'),_('Fijian'),_('Middle French'),_('Old French'),_('Eastern Frisian'),_('Fulah'),_('Ga'),_('Gayo'),_('Gbaya'),_('Ge\'ez'),_('Gilbertese'),_('Middle High German'),_('Old High German'),_('Gondi'),_('Gorontalo'),_('Gothic'),_('Grebo'),_('Ancient Greek'),_('Guarani'),_('Swiss German'),_('Gwichʼin'),_('Haida'),_('Kikuyu'),_('Hawaiian'),_('Herero'),_('Hiligaynon'),_('Hittite'),_('Hmong'),_('Hiri Motu'),_('Hupa'),_('Iban'),_('Ido'),_('Sichuan Yi'),_('Interlingue'),_('Ilocano'),_('Interlingua'),_('Inupiaq'),_('Lojban'),_('Judeo-Persian'),_('Judeo-Arabic'),_('Kara-Kalpak'),_('Kabyle'),_('Kachin'),_('Kalaallisut'),_('Kamba'),_('Kashmiri'),_('Kanuri'),_('Kawi'),_('Kabardian'),_('Khasi'),_('Khotanese'),_('Kimbundu'),_('Komi'),_('Kongo'),_('Kosraean'),_('Kpelle'),_('Karachay-Balkar'),_('Karelian'),_('Kurukh'),_('Kuanyama'),_('Kumyk'),_('Kutenai'),_('Lahnda'),_('Lamba'),_('Lezghian'),_('Limburgan'),_('Lingala'),_('Mongo'),_('Lozi'),_('Luba-Lulua'),_('Luba-Katanga'),_('Ganda'),_('Luiseno'),_('Lunda'),_('Luo'),_('Lushai'),_('Madurese'),_('Magahi'),_('Maithili'),_('Makasar'),_('Masai'),_('Moksha'),_('Mandar'),_('Mende'),_('Middle Irish'),_('Mi\'kmaq'),_('Minangkabau'),_('Malagasy'),_('Manchu'),_('Manipuri'),_('Mohawk'),_('Mossi'),_('Creek'),_('Marwari'),_('Erzya'),_('Neapolitan'),_('Nauru'),_('Navajo'),_('South Ndebele'),_('North Ndebele'),_('Ndonga'),_('Low German'),_('Nepal Bhasa'),_('Nias'),_('Niuean'),_('Nogai'),_('Old Norse'),_('Sandawe'),_('N\'Ko'),_('Classical Newari'),_('Nyanja'),_('Nyamwezi'),_('Nyankole'),_('Nyoro'),_('Nzima'),_('Ojibwa'),_('Oromo'),_('Osage'),_('Ossetian'),_('Ottoman Turkish'),_('Pangasinan'),_('Pahlavi'),_('Pampanga'),_('Papiamento'),_('Palauan'),_('Old Persian'),_('Phoenician'),_('Pali'),_('Pohnpeian'),_('Old Occitan'),_('Rajasthani'),_('Rapanui'),_('Rarotongan'),_('Reunionese'),_('Rundi'),_('Macedo-Romanian'),_('Sango'),_('Yakut'),_('Samaritan Aramaic'),_('Sanskrit'),_('Sasak'),_('Sicilian'),_('Scots'),_('Selkup'),_('Old Irish'),_('Shan'),_('Sidamo'),_('Southern Sami'),_('Northern Sami'),_('Lule Sami'),_('Inari Sami'),_('Samoan'),_('Skolt Sami'),_('Shona'),_('Sindhi'),_('Sogdian'),_('Somali'),_('Sesotho'),_('Sranan Tongo'),_('Serer'),_('Swati'),_('Sukuma'),_('Sundanese'),_('Susu'),_('Sumerian'),_('Santali'),_('Syriac'),_('Tahitian'),_('Timne'),_('Tonga'),_('Tetum'),_('Tigre'),_('Tigrinya'),_('Tiv'),_('Shilluk'),_('Klingon'),_('Tlingit'),_('Tamashek'),_('Carolinian'),_('Portuguese creole'),_('Tuamotuan'),_('Numèè'),_('Gela'),_('Comorian'),_('Rennellese'),_('Emilian-Romagnol'),_('Mayan'),_('Caribbean Hindustani'),_('Khakas'),_('Kinga'),_('Kurmanji'),_('Kwangali'),_('Lango'),_('Ligurian'),_('Lombard'),_('Luguru'),_('Mamasa'),_('Mashi'),_('Meru'),_('Rotokas'),_('Moldovan'),_('Mongolian script'),_('Nasioi'),_('Nyakyusa'),_('Piedmontese'),_('Pinyin'),_('Sangu'),_('Shambala'),_('Shor'),_('Central Atlas Tamazight'),_('Thai Transliteration'),_('Tsonga'),_('Tuvan'),_('Valencian'),_('Venetian'),_('Walloon'),_('Wanji'),_('Zigula'),_('Korean Transliteration'),_('Mongolian Transliteration'),_('Assyrian'),_('Kaguru'),_('Kimakonde'),_('Kirufiji'),_('Mbwera'),_('Gronings'),_('Hadza'),_('Iraqw'),_('Kami'),_('Krio'),_('Tweants'),_('Abaza'))
self.filew = '/home/pete/tmp/ars/pairs'
self.dic = {}
self.errors = []
class Commands:
def format_gettext(self):
f = '[MClient] plugins.multitrancom.utils.utils.Commands.format_gettext'
text = sh.Clipboard().paste()
if text:
text = text.replace("('",'')
text = text.replace("')",'')
text = text.replace("', '",',')
lst = text.split(',')
lst = ["_('" + item.strip() + "')" for item in lst \
if item.strip()
]
text = '(' + ','.join(lst) + ')'
sh.Clipboard().copy(text)
input(_('Press any key to continue.'))
else:
sh.com.rep_empty(f)
# Transform new-line-delimited text into a list of languages
def format_pairs(self):
f = '[MClient] plugins.multitrancom.utils.utils.Commands.format_pairs'
text = sh.Clipboard().paste()
if text:
text= text.replace(r"'",r"\'")
lst = text.splitlines()
lst = ["_('" + item.strip() + "')" for item in lst \
if item.strip()
]
text = '(' + ','.join(lst) + ')'
sh.Clipboard().copy(text)
input(_('Press any key to continue.'))
else:
sh.com.rep_empty(f)
com = Commands()
|
noba3/KoTos
|
refs/heads/master
|
addons/script.module.youtube.dl/lib/youtube_dl/extractor/ultimedia.py
|
106
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
ExtractorError,
qualities,
unified_strdate,
clean_html,
)
class UltimediaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ultimedia\.com/default/index/video[^/]+/id/(?P<id>[\d+a-z]+)'
_TESTS = [{
# news
'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
'md5': '276a0e49de58c7e85d32b057837952a2',
'info_dict': {
'id': 's8uk0r',
'ext': 'mp4',
'title': 'Loi sur la fin de vie: le texte prévoit un renforcement des directives anticipées',
'description': 'md5:3e5c8fd65791487333dda5db8aed32af',
'thumbnail': 're:^https?://.*\.jpg',
'upload_date': '20150317',
},
}, {
# music
'url': 'https://www.ultimedia.com/default/index/videomusic/id/xvpfp8',
'md5': '2ea3513813cf230605c7e2ffe7eca61c',
'info_dict': {
'id': 'xvpfp8',
'ext': 'mp4',
'title': "Two - C'est la vie (Clip)",
'description': 'Two',
'thumbnail': 're:^https?://.*\.jpg',
'upload_date': '20150224',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
deliver_url = self._proto_relative_url(self._search_regex(
r'<iframe[^>]+src="((?:https?:)?//(?:www\.)?ultimedia\.com/deliver/[^"]+)"',
webpage, 'deliver URL'), compat_urllib_parse_urlparse(url).scheme + ':')
deliver_page = self._download_webpage(
deliver_url, video_id, 'Downloading iframe page')
if '>This video is currently not available' in deliver_page:
raise ExtractorError(
'Video %s is currently not available' % video_id, expected=True)
player = self._parse_json(
self._search_regex(
r"jwplayer\('player(?:_temp)?'\)\.setup\(({.+?})\)\.on",
deliver_page, 'player'),
video_id)
quality = qualities(['flash', 'html5'])
formats = []
for mode in player['modes']:
video_url = mode.get('config', {}).get('file')
if not video_url:
continue
if re.match(r'https?://www\.youtube\.com/.+?', video_url):
return self.url_result(video_url, 'Youtube')
formats.append({
'url': video_url,
'format_id': mode.get('type'),
'quality': quality(mode.get('type')),
})
self._sort_formats(formats)
thumbnail = player.get('image')
title = clean_html((
self._html_search_regex(
r'(?s)<div\s+id="catArticle">.+?</div>(.+?)</h1>',
webpage, 'title', default=None) or
self._search_regex(
r"var\s+nameVideo\s*=\s*'([^']+)'",
deliver_page, 'title')))
description = clean_html(self._html_search_regex(
r'(?s)<span>Description</span>(.+?)</p>', webpage,
'description', fatal=False))
upload_date = unified_strdate(self._search_regex(
r'Ajouté le\s*<span>([^<]+)', webpage,
'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
}
|
AlexaProjects/Alexa2
|
refs/heads/master
|
ALEXA-IDE/core/ninja_ide/core/filesystem_notifications/windows.py
|
2
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. #
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from threading import Thread
import win32con
import win32file
import win32event
import pywintypes
import os
from ninja_ide.core import file_manager
from ninja_ide.tools.logger import NinjaLogger
logger = NinjaLogger('ninja_ide.core.filesystem_notifications.windows')
DEBUG = logger.debug
from ninja_ide.core.filesystem_notifications import base_watcher
ADDED = base_watcher.ADDED
DELETED = base_watcher.DELETED
REMOVE = base_watcher.REMOVE
RENAME = base_watcher.RENAME
MODIFIED = base_watcher.MODIFIED
ACTIONS = {
1: ADDED,
2: DELETED,
3: MODIFIED,
4: RENAME,
5: RENAME
}
# Thanks to Claudio Grondi for the correct set of numbers
FILE_LIST_DIRECTORY = 0x0001
watchmask = (win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME)
def listdir(path):
fdict = file_manager.open_project(path)
for each_folder in fdict:
files, folders = fdict[each_folder]
yield each_folder
for each_file in files:
yield os.path.join(each_folder, each_file)
#Credit on this workaround for the shortsightness of windows developers goes
#to Malthe Borch http://pypi.python.org/pypi/MacFSEvents
class FileEventCallback(object):
def __init__(self, callback, paths):
self.snapshots = {}
for path in paths:
self.snapshot(path)
self.callback = callback
self._path_last_time = {}
self.cookie = 0
def pulentastack(self, path):
return os.stat(path)
def __call__(self, paths):
events = []
deleted = {}
for path in sorted(paths):
path = path.rstrip('/')
snapshot = self.snapshots[path]
current = {}
try:
for name in listdir(path):
try:
current[name] = self.pulentastack(os.path.join(path,
name))
except OSError:
pass
except OSError:
# recursive delete causes problems with path being non-existent
pass
observed = set(current)
for name, snap_stat in list(snapshot.items()):
filename = os.path.join(path, name)
if name in observed:
stat = current[name]
if stat.st_mtime > snap_stat.st_mtime:
events.append((MODIFIED, filename))
observed.discard(name)
else:
event = (DELETED, filename)
deleted[snap_stat.st_ino] = event
events.append(event)
for name in observed:
if name != path:
stat = current[name]
filename = os.path.join(path, name)
event = deleted.get(stat.st_ino)
if event is not None:
event = (REMOVE, filename)
else:
event = (ADDED, filename)
if os.path.isdir(filename):
self.snapshot(filename)
events.append(event)
snapshot.clear()
snapshot.update(current)
for event in events:
self.callback(*event)
def snapshot(self, path):
path = os.path.realpath(path)
refs = self.snapshots
refs[path] = {}
for root, dirs, files in os.walk(path):
entry = refs[root]
for filename in files:
try:
entry[filename] = self.pulentastack(os.path.join(root,
filename))
except OSError:
continue
for directory in dirs:
refs[os.path.join(root, directory)] = {}
if os.path.isdir(path):
refs[os.path.join(root, path)] = {}
for name in listdir(os.path.join(root, path)):
try:
refs[path][name] = self.pulentastack(os.path.join(path,
name))
except OSError:
pass
class ThreadedFSWatcher(Thread):
def __init__(self, path, callback):
self._watch_path = path
self._callback = callback # FileEventCallback(callback, (path, ))
self._windows_sucks_flag = True
self._wait_stop = win32event.CreateEvent(None, 0, 0, None)
self._overlapped = pywintypes.OVERLAPPED()
self._overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
super(ThreadedFSWatcher, self).__init__()
def stop(self):
self._windows_sucks_flag = False
win32event.SetEvent(self._wait_stop)
def run(self):
hDir = win32file.CreateFileW(self._watch_path,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ |
win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None
)
while self._windows_sucks_flag:
buf = win32file.AllocateReadBuffer(1024)
win32file.ReadDirectoryChangesW(
hDir,
buf,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,
self._overlapped
)
result_stack = {}
rc = win32event.WaitForMultipleObjects((self._wait_stop,
self._overlapped.hEvent),
0, win32event.INFINITE)
if rc == win32event.WAIT_OBJECT_0:
# Stop event
break
data = win32file.GetOverlappedResult(hDir, self._overlapped, True)
# lets read the data and store it in the results
results = win32file.FILE_NOTIFY_INFORMATION(buf, data)
for action, afile in results:
if action in ACTIONS:
full_filename = os.path.join(self._watch_path, afile)
result_stack.setdefault(full_filename,
[]).append(ACTIONS.get(action))
keys = list(result_stack.keys())
while len(keys):
key = keys.pop(0)
event = result_stack.pop(key)
if (ADDED in event) and (DELETED in event):
event = [e for e in event if e not in (ADDED, DELETED)]
noticed = []
for each_event in event:
if each_event not in noticed:
self._callback(each_event, full_filename)
noticed.append(each_event)
class NinjaFileSystemWatcher(base_watcher.BaseWatcher):
def __init__(self):
super(NinjaFileSystemWatcher, self).__init__()
# do stuff
self.watching_paths = {}
def add_watch(self, path):
if path not in self.watching_paths:
watch = ThreadedFSWatcher(path, self._emit_signal_on_change)
watch.start()
self.watching_paths[path] = watch
# Add real watcher using platform specific things
def remove_watch(self, path):
if path in self.watching_paths:
self.watching_paths[path].stop()
self.watching_paths[path].join()
del(self.watching_paths[path])
# Remove real watcher using platform specific things
def shutdown_notification(self):
base_watcher.BaseWatcher.shutdown_notification(self)
for each_path in self.watching_paths:
each_path = self.watching_paths[each_path]
each_path.stop()
each_path.join()
|
devendermishrajio/nova_test_latest
|
refs/heads/master
|
nova/api/openstack/compute/plugins/v3/networks.py
|
21
|
# Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import networks as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import network
from nova.objects import base as base_obj
from nova.objects import fields as obj_fields
ALIAS = 'os-networks'
authorize = extensions.os_compute_authorizer(ALIAS)
def network_dict(context, network):
fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
'injected', 'bridge', 'vlan', 'vpn_public_address',
'vpn_public_port', 'vpn_private_address', 'dhcp_start',
'project_id', 'host', 'bridge_interface', 'multi_host',
'priority', 'rxtx_base', 'mtu', 'dhcp_server',
'enable_dhcp', 'share_address')
if network:
# NOTE(mnaser): We display a limited set of fields so users can know
# what networks are available, extra system-only fields
# are only visible if they are an admin.
if context.is_admin:
fields += admin_fields
# TODO(mriedem): Remove the NovaObject type check once the
# network.create API is returning objects.
is_obj = isinstance(network, base_obj.NovaObject)
result = {}
for field in fields:
# NOTE(mriedem): If network is an object, IPAddress fields need to
# be cast to a string so they look the same in the response as
# before the objects conversion.
if is_obj and isinstance(network.fields[field].AUTO_TYPE,
obj_fields.IPAddress):
# NOTE(danms): Here, network should be an object, which could
# have come from neutron and thus be missing most of the
# attributes. Providing a default to get() avoids trying to
# lazy-load missing attributes.
val = network.get(field, None)
if val is not None:
result[field] = str(val)
else:
result[field] = val
else:
# It's either not an object or it's not an IPAddress field.
result[field] = network.get(field, None)
uuid = network.get('uuid')
if uuid:
result['id'] = uuid
return result
else:
return {}
class NetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = network_api or network.API(skip_policy_check=True)
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context, action='view')
networks = self.network_api.get_all(context)
result = [network_dict(context, net_ref) for net_ref in networks]
return {'networks': result}
@wsgi.response(202)
@extensions.expected_errors((404, 501))
@wsgi.action("disassociate")
def _disassociate_host_and_project(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
try:
self.network_api.associate(context, id, host=None, project=None)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
except NotImplementedError:
common.raise_feature_not_supported()
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context, action='view')
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(context, network)}
@wsgi.response(202)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
self.network_api.delete(context, id)
except exception.NetworkInUse as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors((400, 409, 501))
@validation.schema(schema.create)
def create(self, req, body):
context = req.environ['nova.context']
authorize(context)
params = body["network"]
cidr = params.get("cidr") or params.get("cidr_v6")
params["num_networks"] = 1
params["network_size"] = netaddr.IPNetwork(cidr).size
try:
network = self.network_api.create(context, **params)[0]
except (exception.InvalidCidr,
exception.InvalidIntValue,
exception.InvalidAddress,
exception.NetworkNotCreated) as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message)
except exception.CidrConflict as ex:
raise exc.HTTPConflict(explanation=ex.format_message())
return {"network": network_dict(context, network)}
@wsgi.response(202)
@extensions.expected_errors((400, 501))
@validation.schema(schema.add_network_to_project)
def add(self, req, body):
context = req.environ['nova.context']
authorize(context)
network_id = body['id']
project_id = context.project_id
try:
self.network_api.add_network_to_project(
context, project_id, network_id)
except NotImplementedError:
common.raise_feature_not_supported()
except (exception.NoMoreNetworks,
exception.NetworkNotFoundForUUID) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
class Networks(extensions.V3APIExtensionBase):
"""Admin-only Network Management Extension."""
name = "Networks"
alias = ALIAS
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'add': 'POST'}
res = extensions.ResourceExtension(
ALIAS, NetworkController(),
member_actions=member_actions,
collection_actions=collection_actions)
return [res]
def get_controller_extensions(self):
return []
|
Arable/old-www-do-not-use
|
refs/heads/master
|
lib/python2.7/site-packages/flask/blueprints.py
|
773
|
# -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, `None`
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprint.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
self.view_functions = {}
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
|
Julian/Verge
|
refs/heads/master
|
setup.py
|
1
|
import os
from setuptools import find_packages, setup
from verge import __version__
BIN_DIR = os.path.join(os.path.dirname(__file__), "bin")
with open(os.path.join(os.path.dirname(__file__), "README.rst")) as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy"
]
setup(
name="verge",
version=__version__,
packages=find_packages(),
author="Julian Berman",
author_email="Julian@GrayVines.com",
classifiers=classifiers,
description="Parallel execution inspired by GNU Parallel",
license="MIT",
long_description=long_description,
scripts=[os.path.join(BIN_DIR, bin) for bin in os.listdir(BIN_DIR)],
url="https://github.com/Julian/Verge",
)
|
greenoaktree/MissionPlanner
|
refs/heads/master
|
Lib/dis.py
|
65
|
"""Disassembler of Python byte code into mnemonics."""
import sys
import types
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["dis", "disassemble", "distb", "disco",
"findlinestarts", "findlabels"] + _opcodes_all
del _opcodes_all
_have_code = (types.MethodType, types.FunctionType, types.CodeType,
types.ClassType, type)
def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if isinstance(x, types.InstanceType):
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if isinstance(x1, _have_code):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError, "no last traceback to disassemble"
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
def disassemble(co, lasti=-1):
"""Disassemble a code object."""
code = co.co_code
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
c = code[i]
op = ord(c)
if i in linestarts:
if i > 0:
print
print "%3d" % linestarts[i],
else:
print ' ',
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(20),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
print repr(oparg).rjust(5),
if op in hasconst:
print '(' + repr(co.co_consts[oparg]) + ')',
elif op in hasname:
print '(' + co.co_names[oparg] + ')',
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
print '(' + co.co_varnames[oparg] + ')',
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print '(' + free[oparg] + ')',
print
def disassemble_string(code, lasti=-1, varnames=None, names=None,
constants=None):
labels = findlabels(code)
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(15),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
print repr(oparg).rjust(5),
if op in hasconst:
if constants:
print '(' + repr(constants[oparg]) + ')',
else:
print '(%d)'%oparg,
elif op in hasname:
if names is not None:
print '(' + names[oparg] + ')',
else:
print '(%d)'%oparg,
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
if varnames:
print '(' + varnames[oparg] + ')',
else:
print '(%d)' % oparg,
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
print
disco = disassemble # XXX For backwards compatibility
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
def _test():
"""Simple test program to disassemble a file."""
if sys.argv[1:]:
if sys.argv[2:]:
sys.stderr.write("usage: python dis.py [-|file]\n")
sys.exit(2)
fn = sys.argv[1]
if not fn or fn == "-":
fn = None
else:
fn = None
if fn is None:
f = sys.stdin
else:
f = open(fn)
source = f.read()
if fn is not None:
f.close()
else:
fn = "<stdin>"
code = compile(source, fn, "exec")
dis(code)
if __name__ == "__main__":
_test()
|
tboyce021/home-assistant
|
refs/heads/dev
|
homeassistant/components/nissan_leaf/binary_sensor.py
|
21
|
"""Plugged In Status Support for the Nissan Leaf."""
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import DATA_CHARGING, DATA_LEAF, DATA_PLUGGED_IN, LeafEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up of a Nissan Leaf binary sensor."""
if discovery_info is None:
return
devices = []
for vin, datastore in hass.data[DATA_LEAF].items():
_LOGGER.debug("Adding binary_sensors for vin=%s", vin)
devices.append(LeafPluggedInSensor(datastore))
devices.append(LeafChargingSensor(datastore))
add_entities(devices, True)
class LeafPluggedInSensor(LeafEntity, BinarySensorEntity):
"""Plugged In Sensor class."""
@property
def name(self):
"""Sensor name."""
return f"{self.car.leaf.nickname} Plug Status"
@property
def is_on(self):
"""Return true if plugged in."""
return self.car.data[DATA_PLUGGED_IN]
@property
def icon(self):
"""Icon handling."""
if self.car.data[DATA_PLUGGED_IN]:
return "mdi:power-plug"
return "mdi:power-plug-off"
class LeafChargingSensor(LeafEntity, BinarySensorEntity):
"""Charging Sensor class."""
@property
def name(self):
"""Sensor name."""
return f"{self.car.leaf.nickname} Charging Status"
@property
def is_on(self):
"""Return true if charging."""
return self.car.data[DATA_CHARGING]
@property
def icon(self):
"""Icon handling."""
if self.car.data[DATA_CHARGING]:
return "mdi:flash"
return "mdi:flash-off"
|
mccarrion/python-practice
|
refs/heads/master
|
crash_course/chapter10/alice.py
|
1
|
filename = 'alice.txt'
try:
with open(filename) as f_obj:
contents = f_obj.read()
except FileNotFoundError:
msg = "Sorry, the file " + filename + " does not exist."
print(msg)
else:
# Count the approximate number of words in the file.
words = contents.split()
num_words = len(words)
print("The file " + filename + " has about " + str(num_words) + " words.")
|
Hiwr/hiwr_screen
|
refs/heads/master
|
src/screen.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
#############################################################################
# #
# #
# Copyright 2014 Worldline #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the LICENSE #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
#############################################################################
import os
import re
import signal
from random import randint, random
from math import cos, sin, ceil
from collections import deque
from Queue import Queue
import sys
import numpy as np
import roslib
import rospy
from hiwr_msg.msg import EyesLook
from hiwr_msg.msg import Animation
from hiwr_msg.msg import TouchEvent
import std_msgs
from efl.evas import SmartObject, EVAS_HINT_EXPAND, EVAS_HINT_FILL
from efl.evas import Rect, Rectangle, Line, Text, Polygon, ClippedSmartObject, Box, Map, \
EVAS_TEXT_STYLE_PLAIN, EVAS_TEXT_STYLE_OUTLINE, EVAS_TEXT_STYLE_SOFT_OUTLINE, EVAS_TEXT_STYLE_GLOW, \
EVAS_TEXT_STYLE_OUTLINE_SHADOW, EVAS_TEXT_STYLE_FAR_SHADOW, EVAS_TEXT_STYLE_OUTLINE_SOFT_SHADOW, \
EVAS_TEXT_STYLE_SOFT_SHADOW, EVAS_TEXT_STYLE_FAR_SOFT_SHADOW
from efl.evas import Image as EvasImage
from efl import emotion
from efl.emotion import Emotion
from efl import elementary
from efl.elementary.window import StandardWindow
from efl.elementary.image import Image
from efl.ecore import Animator, Timer
from config import path, script_path, img_path, font_path
from color import rotate_hue, lighten, darken, from_to
from animation import animation_queue, animation_arrays
from timing import linear_number, sinusoidal_number, linear_tuple_number
from bubbles import Bubble
from transformable import Transformable
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
screen_size = (screenX, screenY) = (800, 480)
global_offset = 0, -60
eyeballs_size = (eyeballs_size_x, eyeballs_size_y) = (420, 112)
eyeballs_pos = ((screenX - eyeballs_size_x) / 2 +
global_offset[0], (screenY - eyeballs_size_y) / 2 + global_offset[1])
#((screenX - eyeballs_size_x)/2, (screenY - eyeballs_size_y)/2)
#((screenX - eyeballs_size_x)/2 + global_offset[0], (screenY-eyeballs_size_y)/2 + global_offset[1])
#--tests eyeball move animability
eyeballs_latest_pos_index = 0
#--
#-- ros
follow = ''
animate = ''
#--
#-- colors
WHITE = (255, 255, 255, 255)
BLUE = (0, 0, 100, 255)
YELLOW = (210, 210, 0, 255)
GREEN = (0, 230, 0, 255)
RED = (230, 0, 0, 255)
#--
TOP_LEFT = (0, 0)
# e=0
# f=0
todo_queue = Queue(1)
#-- on Move Eye callback
def draw(win, face):
global todo_queue
if rospy.is_shutdown():
elementary.exit()
#sys.exit(0)
if(not todo_queue.empty()):
task = todo_queue.get_nowait()
task()
todo_queue.task_done()
# print "draw"
#--tests eyeball move animability
#global eyeballs_latest_pos_index, e, f
#face.eyes.eyeballs.move(eyeballs_pos[0]+30*cos(eyeballs_latest_pos_index), eyeballs_pos[1]+30*sin(eyeballs_latest_pos_index))
#face.eyes.eyeballs.move(
# 30 * cos(eyeballs_latest_pos_index), 30 * sin(eyeballs_latest_pos_index))
#eyeballs_latest_pos_index += 0.1
#--
#if(eyeballs_latest_pos_index * 10 % ((2 * 256) - 1) < 256):
# apply(face.color_set, rotate_hue(face.color_get(), 0.005))
#else:
#apply(face.color_set, rotate_hue(face.color_get(), 0.005))
# face.color_set(*lighten(face.color_get(0)))
#if randint(0, 300) < 2:
#face.anim_blink()
# face.anim_tired(face.anim_tired)
# face.anim_tired(lambda:face.anim_sleep(lambda:face.anim_sleep(lambda:face.anim_sleep(face.standard))))
# print "eyes"
# blink.hide()
# eyes.show()
# e=(e+1)%11
# face.change_eyes(e)
# print "change eyes!", e
# f=(f+1)%3
# face.change_mustache(e)
# print "change mustache!", f
# else :
# print "blink"
# eyes.hide()
# blink.show()
class EyeBalls(SmartObject): #Transformable
def __init__(self, win, eyes):
SmartObject.__init__(self, win.evas)
self.name = "eyeballs"
self.eyeballs = Image(
win, pos=eyeballs_pos, size=eyeballs_size, file=os.path.join(img_path, "retine_sim.png"))
self.member_add(self.eyeballs)
#Transformable.__init__(self, self.eyeballs)
self.show()
def show(self):
self.eyeballs.show()
def hide(self):
self.eyeballs.hide()
class Eyes(Transformable):
def __init__(self, win):
global screen_size, global_offset
SmartObject.__init__(self, win.evas)
self.name = "eyes"
eye_size = screen_size
self.origin = eye_pos = global_offset
# white underlay behind eyes
self.underlay = Rectangle(win.evas, pos=TOP_LEFT, size=eye_size, color=WHITE)
self.underlay.layer = 0
self.member_add(self.underlay)
# eyeballs
self.eyeballs = EyeBalls(win, self)
#self.eyeballs.layer = 2
self.member_add(self.eyeballs)
#self.eyeballs.stack_above(self.underlay)
# first load images to retrieve eye size
self.images = [Image(win, pos=TOP_LEFT, size=eye_size, file=os.path.join(img_path, 'oeil-' + str(i) + '.png'))
for i in range(1, 12)]
# print "eyes image list", self.images
self.eyes = self.images[0]
for i in range(0, 11):
self.member_add(self.images[i])
self.images[i].layer = 3
self.images[i].stack_above(self.underlay)
self.images[i].stack_above(self.eyeballs)
Transformable.__init__(self, self.eyes)
self.clip_set(Rectangle(win.evas, pos=TOP_LEFT, size=eye_size, color=BLUE))
self.show()
# print "eyes underlay", self.underlay
# print "eyes current image", self.eyes
# print "eyes clip", self.clip
def show(self):
self.underlay.show()
self.eyes.show()
self.clipper.show()
self.eyeballs.show()
def hide(self):
self.clipper.hide()
self.eyeballs.hide()
def clip_set(self, clipper):
self.clipper = clipper
# self.underlay.clip=clipper
for i in range(0, 11):
self.images[i].clip = clipper
# self.eyes.clip=clipper
def move_origin(self):
self.move(*self.origin)
def color_set(self, r, g, b, a):
color = (r, g, b, a)
self.clipper.color = color
def color_get(self):
return self.clipper.color
def change(self, id):
# print "changing eyes to", id
self.eyes.hide()
# print "old eyes hidden", self.eyes
self.eyes = self.images[id]
self.transformable_set(self.eyes)
# print "new eyes still hidden", self.eyes
self.eyes.show()
# print "new eyes still displayed", self.eyes
class Mustache(Transformable):
def __init__(self, win, pos=(0, 0)):
SmartObject.__init__(self, win.evas)
self.name = "mustache"
self.mustache_size = (400, 134)
# size=mustache_size,
self.images = [Image(win, pos=TOP_LEFT, size=self.mustache_size, file=os.path.join(img_path, 'moustache-' + str(i) + '.png'))
for i in range(1, 3)]
#print "mustache image list", self.images
self.mustache = self.images[0]
#Rectangle(win.evas, pos=TOP_LEFT, size=mustache_size, color=RED)#self.images[0]
#self.member_add(self.mustache)
for i in range(0, 2):
self.member_add(self.images[i])
self.images[i].layer = 4
self.move(*pos)
Transformable.__init__(self, self.mustache)
# does not support yet rotate + scale with diffrent centers
#self.rotate(15.0)
#self.scale(0.5)
self.show()
def move(self, x,y):
self.pos=(x,y)
SmartObject.move(self, x, y)
def change(self, id):
self.mustache.hide()
self.mustache = self.images[id]
self.transformable_set(self.mustache)
self.mustache.show()
def show(self):
self.mustache.show()
def hide(self):
self.mustache.hide()
class Face(SmartObject):
def __init__(self, win):
global screen_size, global_offset
self.win = win
SmartObject.__init__(self, win.evas)
self.bg = Rectangle(
win.evas, pos=TOP_LEFT, size=screen_size, color=BLUE)
self.bg.layer = 0
self.member_add(self.bg)
self.eyes = Eyes(win)
self.eyes.layer = 1
self.eyes.move(global_offset[0], global_offset[1])
self.eyes.stack_above(self.bg)
self.member_add(self.eyes)
self.mustache = Mustache(win, ( global_offset[0] + 200, global_offset[1] + 380 ) )
#print "mustache.move to", (global_offset[0] + 200, global_offset[1] + 380)
#self.mustache.move( global_offset[0] + 200, global_offset[1] + 380 )
self.mustache.layer = 5
self.mustache.stack_above(self.eyes)
self.member_add(self.mustache)
self.color_set(*BLUE)
self.standard()
self.eyes.eyeballs.move(0,0)
self.show()
self.anim_standard()
self.bubbles={}
#self.anim_sleep()
#print "bg", self.bg
#print "eyes", self.eyes
def show(self):
self.bg.show()
self.eyes.show()
self.mustache.show()
def hide(self):
self.bg.hide()
self.eyes.hide()
self.mustache.hide()
def color_set(self, r, g, b, a):
color = (r, g, b, a)
self.bg.color = color
self.eyes.color_set(r, g, b, a)
def color_get(self):
return self.bg.color
def change_eyes(self, id):
self.eyes.change(id % 11)
def change_mustache(self, id):
self.mustache.change(id % 2)
def noop(self):
return True
def add_bubble(self, name, content=None, ratio=1.0, pos=(20,40)):
bubble=Bubble(self.win)
bubble.name=name
bubble.pos=pos
if content:
bubble.add(content)
bubble.show()
self.bubbles[name]=bubble
return bubble
def show_qr_bubble(self):
if not self.bubbles.get("qr"):
bubble_text = Text(self.win.evas, text='?', color=WHITE)
bubble_text.font_source = font_path
bubble_text.font = "FontAwesome", 130
bubble_text.style = EVAS_TEXT_STYLE_SOFT_SHADOW
bubble_text.shadow_color = (64,64,64,127)
self.add_bubble("qr", bubble_text)
self.bubbles["qr"].content.move_relative(0,-20)
else:
self.bubbles.get("qr").show()
def show_tv_bubble(self):
if not self.bubbles.get("tv"):
bubble_image = Image(self.win, pos=(30,20), size=(160,80), file=os.path.join(img_path, "tv.png"))
self.add_bubble("tv", bubble_image)
else:
self.bubbles.get("tv").show()
def show_bubble(self, key):
if self.bubbles.get(key):
self.bubbles[key].show()
def hide_bubble(self, key):
if self.bubbles.get(key):
self.bubbles[key].hide()
def hide_all_bubbles(self):
for key in self.bubbles:
print "hiding bubble", key
self.bubbles[key].hide()
#del self.bubbles[key]
# Face States
def standard(self, awaken=True):
self.awaken = awaken
self.eyes.change(0)
self.eyes.move_origin()
self.color_set(*BLUE)
return True
def dead(self, awaken=False):
self.awaken = awaken
self.eyes.change(11)
self.eyes.move_origin()
self.color_set(*RED)
return True
def tired(self, awaken=True):
self.awaken = awaken
self.eyes.change(6)
#self.color_set(*dark_blue)
self.eyes.move_origin()
return True
def sleep(self, awaken=False):
self.awaken = awaken
self.eyes.change(10)
self.eyes.move_origin()
return True
def intrigued(self, awaken=True):
self.awaken = awaken
self.eyes.change(7)
self.eyes.move_origin()
return True
def astonished(self, awaken=True):
self.awaken = awaken
self.eyes.change(8)
self.eyes.move_origin()
return True
def grumpy(self, awaken=True):
self.awaken = awaken
self.eyes.change(8)
self.eyes.move_origin()
return True
# Face Animations (default for animating at ~ 30fps)
def anim_standard(self, blink_max_delay=250.0/30.0, blink_min_delay=50.0/30.0, cb=lambda: None):
self.animation="standard"
self.eyes.change(0)
def set_color(rgba):
(r, g, b, a) = rgba
self.color_set(r,g,b,a)
return True
animation_arrays( from_to(self.color, BLUE, 10, set_color))
def on_timer():
if self.animation == "standard":
self.anim_blink()
#self.anim_flash()
interval = blink_min_delay + (random()* (blink_max_delay-blink_min_delay))
Timer(interval, on_timer)
return False
on_timer()
def anim_tired(self, cb=lambda: None):
self.animation="tired"
def cycle(cb):
def check():
if self.animation=="tired":
self.anim_tired()
return True
else:
cb()
return False
return check
def on_timer_set_awaken():
if self.animation == "tired":
self.anim_blink()
self.anim_flash(lighten(BLUE, 0.1), 20, lambda: self.anim_colorize(darken(BLUE, 0.02), 20 ) )
#self.anim_flash()
interval = 1 + (random()* 2)
Timer(interval, on_timer_set_tired)
return False
def on_timer_set_tired():
if self.animation == "tired":
self.tired()
self.anim_colorize(darken(BLUE, 0.05), 180)
#self.anim_flash()
interval = 1 + (random()* 4)
Timer(interval, on_timer_set_asleep)
return False
def on_timer_set_asleep():
if self.animation == "tired":
self.sleep()
self.anim_colorize(darken(BLUE, 0.1), 180)
interval = 1 + (random()* 2)
Timer(interval, on_timer_set_awaken)
return False
on_timer_set_tired()
#animation_queue(self.standard, self.noop, self.noop
# , self.tired, self.noop, self.noop, self.noop, self.noop
# , self.standard, self.noop, self.noop, self.tired # , self.noop, self.noop, self.noop, self.noop
# , cycle(cb))
def anim_blink(self, cb=lambda: None):
#self.animation="standard"
animation_queue(
self.tired, self.noop, self.noop, self.noop, self.noop,
self.sleep, self.noop, self.noop, self.noop, self.noop, self.standard, cb)
def anim_sleep(self, cb=lambda: None):
self.animation="sleep"
def step(x,y,color):
def move():
if self.animation=="sleep":
self.eyes.move_relative(x,y)
self.color_set(*color)
return True
else:
self.eyes.move_origin()
return move
def cycle(cb):
def check():
if self.animation=="sleep":
self.anim_sleep()
return True
else:
self.eyes.move_origin()
cb()
return False
return check
animation_queue(
self.sleep
, step(0, -2, darken(BLUE,0.002*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, -5, darken(BLUE,0.007*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, -10, darken(BLUE,0.017*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, -10, darken(BLUE,0.027*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, -5, darken(BLUE,0.032*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, -2, darken(BLUE,0.034*5)), self.noop, self.noop, self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, 2, darken(BLUE,0.032*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, 5, darken(BLUE,0.027*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, 10, darken(BLUE,0.017*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, 10, darken(BLUE,0.007*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, 5, darken(BLUE,0.002*5)), self.noop, self.noop, self.noop, self.noop, self.noop
, step(0, 2, BLUE), self.noop, self.noop, self.noop, self.noop, self.noop, self.noop, self.noop
#, self.anim_flash
, cycle(cb))
def anim_colorize(self, color=YELLOW, quantity_of_ticks=10, cb=lambda: None):
print "colorize original color", self.color, "destination color", color
def set_color(rgba):
(r, g, b, a) = rgba
self.color_set(r,g,b,a)
return True
animation_arrays( from_to(self.color, color, quantity_of_ticks, set_color)
, [cb])
def anim_flash(self, color=YELLOW, quantity_of_ticks=10, cb=lambda: None):
print "flash original color", self.color, "destination color", color
def set_color(rgba):
(r, g, b, a) = rgba
self.color_set(r,g,b,a)
return True
animation_arrays( from_to(self.color, color, quantity_of_ticks, set_color)
, from_to(color, self.color, quantity_of_ticks, set_color)
, [cb])
def anim_eyeballs(self, coords, quantity_of_ticks=3, cb=lambda: None):
print "original eyeball pos", self.eyes.eyes.pos
dest_pos=(coords.x, coords.y)
print "destination eyeball pos", dest_pos
animation_arrays( timing.linear_tuple_number( self.eyes.eyeballs.eyeballs.pos, dest_pos , quantity_of_ticks), [cb])
def anim_eyes_zoom(self, animation="eyeballs_zoom", quantity_of_ticks=20, cb=lambda: None):
max_zoom=1.5
min_zoom=1.0
delta = 4.0/(quantity_of_ticks)
#print 'delta', delta
def scaler(start, stop, ratio):
#print "scaler", start, stop, ratio
def scale():
if self.animation == animation:
self.eyes.smooth = False
self.eyes.map.smooth = False
self.eyes.scale( sinusoidal_number (start, stop, ratio) )
return True
else:
self.eyes.scale( 1.0 )
self.eyes.smooth = True
self.eyes.map.smooth = True
cb()
return False
return scale
def cycle(cb):
def check():
#print "should display again if current animation is alarma:", self.animation
if self.animation == animation:
self.anim_eyes_zoom(animation, quantity_of_ticks, cb)
return True
else:
cb()
return False
return check
animation_arrays(
[ scaler(1.0, max_zoom, ratio) for ratio in np.arange( 0.0, 1.0 +delta/2, delta )]
, [scaler(max_zoom, min_zoom, ratio) for ratio in np.arange( 0.0, 1.0 +delta/2, delta/2 )]
, [scaler(min_zoom, 1.0, ratio) for ratio in np.arange( 0.0, 1.0 +delta/2, delta )]
, [cycle(cb)] )
def anim_mustache_dance(self, animation="mustache", quantity_of_ticks=30, cb=lambda: None):
dest_angle=22.0
delta = 4.0/(quantity_of_ticks)
#print 'delta', delta
def rotator(start, stop, ratio):
#print "rotator", start, stop, ratio
def rotate():
if self.animation == animation:
self.mustache.smooth = False
self.mustache.rotate( sinusoidal_number (start, stop, ratio) )
return True
else:
self.mustache.rotate( 0 )
self.mustache.smooth = True
cb()
return False
return rotate
def cycle(cb):
def check():
#print "should display again if current animation is alarma:", self.animation
if self.animation == animation:
self.anim_mustache_dance(animation, quantity_of_ticks, cb)
return True
else:
self.mustache.rotate( 0 )
self.mustache.smooth = True
cb()
return False
return check
animation_arrays(
[ rotator(0, dest_angle, ratio) for ratio in np.arange( 0.0, 1.0 +delta/2, delta )]
, [rotator(-dest_angle, dest_angle, ratio) for ratio in np.arange( 1.0, -1.0 +delta/2, -delta/2 )]
, [rotator(0, dest_angle, ratio) for ratio in np.arange( -1.0, 0.0 +delta/2, delta )]
, [cycle(cb)] )
def anim_alarm(self, cb=lambda: None):
#print "alarm !"
self.animation="alarm"
ref_color = lighten(BLUE, 0.1)
def cycle(cb):
def check():
#print "should display again if current animation is alarma:", self.animation
if self.animation == "alarm":
self.anim_alarm()
return True
else:
cb()
return False
return check
def color_rotate(variation):
mixed=rotate_hue(ref_color, variation)
def colorize():
if self.animation == "alarm":
self.color_set(*mixed)
return True
elif self.animation == "mustache":
self.standard()
return True
else:
self.standard()
cb()
return False
return colorize
hues = [ color_rotate(variation) for variation in np.arange(0, 1.001, 0.05) ]
animation_arrays( hues, [cycle(cb)])
# ROS event handlers
def on_move_eye(pos, face):
#position = pos
#print "ROS Eye move:", pos
#face.eyes.eyeballs.move(pos.x, pos.y)
face.eyes.eyeballs.move(pos.x, pos.y)
def on_scenario_state(state, face):
global todo_queue
#=> Independently of scenario, QR code should flash in yellow, and node animation should be launched too
print "--> New scenario State received", state
m = re.search(".*?([0-9]+).*?", str(state))
state = m.group(1)
print "--> State :", state
if(state not in ["1","2","3","4","5","6","7"]): # default + "0"
# Hyve is tracking face. (EyesLook message should manage that by itself)
# When a visitor badges its qrcode pass
#=> standard face is required
#=> if state 0, timer to aske for qr in 15s
print "Switching to standard face & standard animation with QR bubble displayed"
def state0():
print "doing todo for state 0"
face.standard()
face.anim_standard()
face.hide_all_bubbles()
face.show_qr_bubble()
#face.anim_mustache_dance() #disable mustache if rotate
return todo_queue.put(state0, 1)
elif (state == "1"):
print "Standard face, noding, flashing and saying thank you"
def state1():
print "doing todo for state 1"
face.standard()
face.anim_standard()
face.hide_all_bubbles()
face.anim_flash(GREEN)
return todo_queue.put(state1, 1)
elif (state == "2"):
# Hyve is watching the TV
#=> display a thinking bubble where hiwr shows a TV
print "Switching watching TV animation"
def state2():
print "doing todo for state 2"
face.standard()
face.anim_standard()
#create bubble
face.hide_all_bubbles()
face.show_tv_bubble()
return todo_queue.put(state2, 1)
elif (state == "3"):
# Hyve is tired
#=> tired animation every few seconds
#=> bubble to ask for night cap in order to sleep
print "Switching random tired animation"
def state3():
print "doing todo for state 3"
face.tired()
face.anim_tired()
face.hide_all_bubbles()
return todo_queue.put(state3, 1)
elif (state == "4"):
# Hyve is sleeping
#=> sleep animation
print "Switching to sleep animation"
def state4():
print "doing todo for state 4"
face.sleep()
face.anim_sleep()
face.hide_all_bubbles()
return todo_queue.put(state4, 1)
elif (state == "5"):
# Hyve wakes up
#=> alarm animation, waiting for the visitor to remove the night cap
print "Switching to Alarm animation"
def state5():
print "doing todo for state 5"
#face.anim_standard()
face.anim_alarm()
face.intrigued()
face.hide_all_bubbles()
#face.eyes.eyeballs.scale(1.0)
#face.eyes.eyeballs.scale(0.9)
#face.eyes.eyeballs.scale(1.0)
face.anim_eyes_zoom("alarm")
face.anim_mustache_dance("alarm")
return todo_queue.put(state5, 1)
elif (state == "6"):
# Hyve wakes up
#=> alarm animation, waiting for the visitor to remove the night cap
print "Switching to standard face & standard animation"
def state6():
print "doing todo for state 0"
face.standard()
face.anim_standard()
face.hide_all_bubbles()
return todo_queue.put(state6, 1)
elif(state== "7"):
#Grumpy mode
def state7():
face.animation = "grumpy"
#face.standard()
#face.anim_alarm()
face.grumpy()
face.hide_all_bubbles()
face.anim_mustache_dance("grumpy")
return todo_queue.put(state7,1)
def listener(face):
print "Setting up ROS listening"
rospy.init_node('listener', anonymous=True)
print "ROS listens for EyesLook messages on /EyesLook"
follow = rospy.Subscriber(
"EyesLook", EyesLook, lambda pos: on_move_eye(pos, face), None, 1)
print "ROS listens for String messages on /Scenario/state"
follow = rospy.Subscriber(
"Scenario/state", std_msgs.msg.String, lambda state: on_scenario_state(state, face), None, 1)
#animate = rospy.Subscriber("animate", Animation, canimate, None, 1)
#touch = rospy.Subscriber("touch", TouchEvent, touchback, None, 1)
#rospy.spin()
def start():
win = StandardWindow("Robot Eyes", "Eyes of the robot", autodel=True)
win.callback_delete_request_add(lambda o: elementary.exit())
face = Face(win)
draw(win, face)
def on_tick(*args, **kargs):
draw(win, face)
return True
Animator(on_tick)
win.resize(screenX, screenY)
win.show()
listener(face)
if __name__ == "__main__":
elementary.init()
start()
if(os.geteuid() is 0):
print "current user is root, renicing the process"
os.nice(-12)
elementary.run()
#elementary.shutdown()
|
ryfx/gyp
|
refs/heads/master
|
test/ninja/empty-and-non-empty-duplicate-name/gyptest-empty-and-non-empty-duplicate-name.py
|
100
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies a phony target isn't output if a target exists with the same name that
was output.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['ninja'])
# Reset xcode_ninja_target_pattern to its default for this test.
test.run_gyp('test.gyp', '-G', 'xcode_ninja_target_pattern=^$')
# Check for both \r and \n to cover both windows and linux.
test.must_not_contain('out/Default/build.ninja', 'build empty_target: phony\r')
test.must_not_contain('out/Default/build.ninja', 'build empty_target: phony\n')
test.pass_test()
|
xNovax/SickRage
|
refs/heads/master
|
lib/github/IssueEvent.py
|
74
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.Issue
import github.NamedUser
class IssueEvent(github.GithubObject.CompletableGithubObject):
"""
This class represents IssueEvents as returned for example by http://developer.github.com/v3/todo
"""
@property
def actor(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._actor)
return self._actor.value
@property
def commit_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._commit_id)
return self._commit_id.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def event(self):
"""
:type: string
"""
self._completeIfNotSet(self._event)
return self._event.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue(self):
"""
:type: :class:`github.Issue.Issue`
"""
self._completeIfNotSet(self._issue)
return self._issue.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def _initAttributes(self):
self._actor = github.GithubObject.NotSet
self._commit_id = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._event = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "actor" in attributes: # pragma no branch
self._actor = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["actor"])
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "event" in attributes: # pragma no branch
self._event = self._makeStringAttribute(attributes["event"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue" in attributes: # pragma no branch
self._issue = self._makeClassAttribute(github.Issue.Issue, attributes["issue"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
anisyonk/pilot
|
refs/heads/master
|
saga/adaptors/redis/redis_1.py
|
10
|
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2013, The SAGA Project"
__license__ = "MIT"
import redis
r = redis.Redis (host='localhost', password='securedis')
print "------------------ set / get ------------------"
print r.set ("name", "DeGizmo")
print r.get ("name")
print "------------------ set / inc / decr / get -----"
print r.set ("hit_counter", 1)
print r.incr ("hit_counter")
print r.get ("hit_counter")
print r.decr ("hit_counter")
print r.get ("hit_counter")
print "------------------ rpush / lrange / ... -------"
print r.rpush ("members", "Adam")
print r.rpush ("members", "Bob")
print r.rpush ("members", "Carol")
print r.lrange ("members", 0, -1)
print r.llen ("members")
print r.lindex ("members", 1)
print "------------------ dict set -------------------"
print r.hmset ("key1", {'11' : 'ONE', '1' : 'one'})
print r.hmset ("key2", {'22' : 'TWO', '2' : 'two'})
print r.hmset ("key3", {'33' : 'TRE', '3' : 'tre'})
print "------------------ pipeline dict get ----------"
pipe = r.pipeline()
for key in ['key1', 'key2', 'key3'] :
pipe.hgetall (key)
for val in pipe.execute ():
print val
print "------------------ list keys ------------------"
print r.keys ("*")
print "------------------ lua script -----------------"
lua = """
local value = redis.call ('GET', KEYS[1])
value = tonumber (value)
return value * ARGV[1]"""
multiply = r.register_script (lua)
print r.set ('foo', 2)
print multiply (keys=['foo'], args=[5])
print "-----------------------------------------------"
|
suzp1984/shadowsocks
|
refs/heads/master
|
shadowsocks/tcprelay.py
|
922
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
|
tbeadle/django
|
refs/heads/master
|
tests/aggregation/tests.py
|
31
|
from __future__ import unicode_literals
import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg, Count, DecimalField, DurationField, F, FloatField, Func, IntegerField,
Max, Min, Sum, Value,
)
from django.test import TestCase
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
'Using an aggregate in order_by() without also including it in '
'annotate() is not allowed: Avg(F(book__rating)'
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values('age').order_by(Avg('book__rating'))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg('authors__age'))
.values('pk', 'isbn', 'mean_age')
)
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = (
Book.objects
.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]['sql']
self.assertIn('SELECT COUNT(*) ', sql)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 2},
]
)
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 1},
{'rating': 4.0, 'count': 2},
]
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration')),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
{'duration__sum': datetime.timedelta(days=3)}
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[5, 6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum('age'))
self.assertEqual(age_sum['age__sum'], 103)
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = (
Author.objects
.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
).exclude(earliest_book=None).order_by("earliest_book").values(
'earliest_book',
'num_awards',
'id',
'name',
)
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
self.assertIsInstance(v, float)
self.assertEqual(v, Approximate(47.39, places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
with self.assertRaisesMessage(FieldError, 'Expression contains mixed types. You must set output_field'):
Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk)
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super(MyMax, self).as_sql(compiler, connection)
with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price'))
def test_multi_arg_aggregate(self):
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super(MyMax, self).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Book.objects.aggregate(MyMax('pages', 'price'))
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Book.objects.annotate(MyMax('pages', 'price'))
Book.objects.aggregate(max_field=MyMax('pages', 'price'))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = dict(function=self.function.lower(), expressions=sql)
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = dict(function='MAX', expressions='2')
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection):
return super(Greatest, self).as_sql(compiler, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
|
Hamza-Megahed/des-calculator
|
refs/heads/master
|
des-decryption.py
|
1
|
#!/usr/bin/python3
# ============================================================================
# Name : des-decryption.py
# Author : Hamza Megahed
# Version : 1.0
# Copyright : Copyright 2014 Hamza Megahed
# Description : DES Decryption Algorithm
# ============================================================================
#
#
# ============================================================================
# This file is part of DES Calculator.
#
# DES Calculator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DES Calculator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DES Calculator. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
import itertools
from keygen import *
# Convert plaintext input from a Hex to binary
pt_hexinput = input("Enter The CipherText in Hex(16 digits):\n")
try:
(int(pt_hexinput, 16))
except:
print ("That is an invalid hex value")
if len(pt_hexinput) == 16:
pass
else: raise ValueError('error')
pt_bininput=bin(int(pt_hexinput, 16))[2:].zfill(64)
pt= []
pt.append(0)
for digit in str(pt_bininput):
pt.append(int(digit))
# Initial permutation
IP= [pt[58], pt[50], pt[42], pt[34], pt[26], pt[18], pt[10], pt[2],
pt[60], pt[52], pt[44], pt[36], pt[28], pt[20], pt[12], pt[4],
pt[62], pt[54], pt[46], pt[38], pt[30], pt[22], pt[14], pt[6],
pt[64], pt[56], pt[48], pt[40], pt[32], pt[24], pt[16], pt[8],
pt[57], pt[49], pt[41], pt[33], pt[25], pt[17], pt[9], pt[1],
pt[59], pt[51], pt[43], pt[35], pt[27], pt[19], pt[11], pt[3],
pt[61], pt[53], pt[45], pt[37], pt[29], pt[21], pt[13], pt[5],
pt[63], pt[55], pt[47], pt[39], pt[31], pt[23], pt[15], pt[7]]
#Permutation Function
def permu(perm):
p= [perm[15], perm[6], perm[19], perm[20],
perm[28], perm[11], perm[27], perm[16],
perm[0], perm[14], perm[22], perm[25],
perm[4], perm[17], perm[30], perm[9],
perm[1], perm[7], perm[23], perm[13],
perm[31], perm[26], perm[2], perm[8],
perm[18], perm[12], perm[29], perm[5],
perm[21], perm[10], perm[3], perm[24]]
return (p)
#Left side
L_IP = [pt[58], pt[50], pt[42], pt[34], pt[26], pt[18], pt[10], pt[2],
pt[60], pt[52], pt[44], pt[36], pt[28], pt[20], pt[12], pt[4],
pt[62], pt[54], pt[46], pt[38], pt[30], pt[22], pt[14], pt[6],
pt[64], pt[56], pt[48], pt[40], pt[32], pt[24], pt[16], pt[8]]
#Right side
R_IP = [pt[57], pt[49], pt[41], pt[33], pt[25], pt[17], pt[9], pt[1],
pt[59], pt[51], pt[43], pt[35], pt[27], pt[19], pt[11], pt[3],
pt[61], pt[53], pt[45], pt[37], pt[29], pt[21], pt[13], pt[5],
pt[63], pt[55], pt[47], pt[39], pt[31], pt[23], pt[15], pt[7]]
#Expand right side from 32 bits to 48 bits
def extend(ex):
EX = [ex[31], ex[0], ex[1], ex[2], ex[3], ex[4],
ex[3], ex[4], ex[5], ex[6], ex[7], ex[8],
ex[7], ex[8], ex[9], ex[10], ex[11], ex[12],
ex[11], ex[12], ex[13], ex[14], ex[15], ex[16],
ex[15], ex[16], ex[17], ex[18], ex[19], ex[20],
ex[19], ex[20], ex[21], ex[22], ex[23], ex[24],
ex[23], ex[24], ex[25], ex[26], ex[27], ex[28],
ex[27], ex[28], ex[29], ex[30], ex[31], ex[0]]
return (EX)
#S-Boxes
def S_Boxes():
S1 = [[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]]
S2 = [[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]]
S3 = [[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]]
S4 = [[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]]
S5 = [[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]]
S6 = [[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]]
S7 = [[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]]
S8 = [[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]
return [S1, S2, S3, S4, S5, S6, S7, S8]
#////////////////////////////////////////////////////////////////////////////#
EX_R=extend(R_IP)
print("Initial Permutation =",format(IP))
print("Left ",format(L_IP))
print("Right ",format(R_IP))
r=1
shift
for x in range(16):
print("================================================================================")
print ("==========")
print ("Round ",format(r))
print ("==========\n")
r+=1
print("Expanded Right ",format(EX_R))
new=[]
sub_key=gend(x)
print("Round Key ",format(sub_key))
for i in range(48):
new.append(EX_R[i] ^ sub_key[i])
print("XOR result ",format(new))
new= list(map(str, new))
temp=0
temp1=[]
s_box = S_Boxes()
y=0
for x in range (0,48,6):
temp = s_box[y][int(''.join(new[x]+new[x+5]),2)][int(''.join(new[x+1:x+5]),2)]
if y < 8:
y+=1
temp=(bin(int(temp))[2:].zfill(4))
temp1.append([int(i) for i in str(temp)])
temp1 = list(itertools.chain(*temp1))
print("F Function output ",format(temp1))
temp1=permu(temp1)
print("Output of permutation function ",format(temp1))
temp2=[]
for i in range(32):
temp2.append(temp1[i] ^ L_IP[i])
L_IP=R_IP
R_IP=temp2
if r==17:
break
print("New Right ",format(R_IP))
print("New Left ",format(L_IP))
EX_R=extend(R_IP)
R_IP, L_IP = L_IP, R_IP
res=L_IP+R_IP
invIP = [res[39], res[7], res[47], res[15], res[55], res[23], res[63], res[31],
res[38], res[6], res[46], res[14], res[54], res[22], res[62], res[30],
res[37], res[5], res[45], res[13], res[53], res[21], res[61], res[29],
res[36], res[4], res[44], res[12], res[52], res[20], res[60], res[28],
res[35], res[3], res[43], res[11], res[51], res[19], res[59], res[27],
res[34], res[2], res[42], res[10], res[50], res[18], res[58], res[26],
res[33], res[1], res[41], res[9], res[49], res[17], res[57], res[25],
res[32], res[0], res[40], res[8], res[48], res[16], res[56], res[24]]
print("================================================================================\n")
print("CipherText in Binary = ",format(invIP))
invIP= list(map(str, invIP))
invIP=''.join(invIP)
invIP=hex(int(invIP, 2))[2:].zfill(16)
print("CipherText in Hex = ",format(invIP))
|
mycodeday/crm-platform
|
refs/heads/master
|
l10n_si/__init__.py
|
439
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright: (C) 2012 - Mentis d.o.o., Dravograd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
|
ya-mouse/python-opcua
|
refs/heads/master
|
opcua/binary_server.py
|
2
|
"""
Socket server forwarding request to internal server
"""
import logging
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from threading import Thread
from threading import Condition
from opcua import ua
from opcua.uaprocessor import UAProcessor
logger = logging.getLogger(__name__)
class BinaryServer(Thread):
"""
Socket server forwarding request to internal server
"""
def __init__(self, internal_server, hostname, port):
Thread.__init__(self)
self.socket_server = None
self.hostname = hostname
self.port = port
self.iserver = internal_server
self._cond = Condition()
def start(self):
with self._cond:
Thread.start(self)
self._cond.wait()
def run(self):
logger.warning("Listening on %s:%s", self.hostname, self.port)
socketserver.TCPServer.allow_reuse_address = True # get rid of address already in used warning
self.socket_server = ThreadingTCPServer((self.hostname, self.port), UAHandler)
# self.socket_server.daemon_threads = True # this will force a shutdown of all threads, maybe too hard
self.socket_server.internal_server = self.iserver # allow handler to acces server properties
with self._cond:
self._cond.notify_all()
self.socket_server.serve_forever()
def stop(self):
logger.warning("server shutdown request")
self.socket_server.shutdown()
class UAHandler(socketserver.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
sock = ua.utils.SocketWrapper(self.request)
processor = UAProcessor(self.server.internal_server, sock, self.client_address)
try:
while True:
hdr = ua.Header.from_string(sock)
body = sock.read(hdr.body_size)
ret = processor.process(hdr, ua.utils.Buffer(body))
if not ret:
break
except ua.utils.SocketClosedException:
logger.warning("Server has closed connection")
except Exception:
logger.exception("Exception raised while parsing message from client, closing")
class ThreadingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
|
aperigault/ansible
|
refs/heads/devel
|
test/units/modules/network/netscaler/test_netscaler_cs_policy.py
|
68
|
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat.mock import patch, Mock, MagicMock, call
import sys
if sys.version_info[:2] != (2, 6):
import requests
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
class TestNetscalerCSPolicyModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.cs': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.cs.cspolicy': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def set_module_state(self, state):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state=state,
))
def setUp(self):
super(TestNetscalerCSPolicyModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
def tearDown(self):
super(TestNetscalerCSPolicyModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
self.set_module_state('present')
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_cs_policy
self.module = netscaler_cs_policy
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_cs_policy.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_cs_policy.nitro_exception', MockException):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
get_nitro_client=m,
nitro_exception=self.MockException,
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_create_non_existing_cs_policy(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_cs_policy_when_cs_policy_differs(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[True, True])
policy_identical_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
policy_identical=policy_identical_mock,
ensure_feature_is_enabled=Mock(),
nitro_exception=self.MockException,
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_no_change_to_module_when_all_identical(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[True, True])
policy_identical_mock = Mock(side_effect=[True, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
policy_identical=policy_identical_mock,
ensure_feature_is_enabled=Mock(),
nitro_exception=self.MockException,
):
self.module = netscaler_cs_policy
result = self.exited()
self.assertFalse(result['changed'], msg='Erroneous changed status update')
def test_absent_operation(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[True, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'], msg='Changed status not set correctly')
def test_absent_operation_no_change(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[False, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_not_called()
self.assertFalse(result['changed'], msg='Changed status not set correctly')
def test_graceful_nitro_exception_operation_present(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
policy_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation present'
)
def test_graceful_nitro_exception_operation_absent(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_cs_policy
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
policy_exists=m,
nitro_exception=MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_ensure_feature_is_enabled_called(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
client_mock = Mock()
ensure_feature_is_enabled_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
get_nitro_client=Mock(return_value=client_mock),
policy_exists=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=ensure_feature_is_enabled_mock,
):
self.module = netscaler_cs_policy
result = self.exited()
ensure_feature_is_enabled_mock.assert_has_calls([call(client_mock, 'CS')])
|
dieface/erpnext
|
refs/heads/develop
|
erpnext/selling/page/sales_browser/sales_browser.py
|
52
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_children():
ctype = frappe.local.form_dict.get('ctype')
frappe.local.form_dict['parent_field'] = 'parent_' + ctype.lower().replace(' ', '_')
if not frappe.form_dict.get('parent'):
frappe.local.form_dict['parent'] = ''
return frappe.db.sql("""select name as value,
if(is_group='Yes', 1, 0) as expandable
from `tab%(ctype)s`
where docstatus < 2
and ifnull(%(parent_field)s,'') = "%(parent)s"
order by name""" % frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def add_node():
ctype = frappe.form_dict.get('ctype')
parent_field = 'parent_' + ctype.lower().replace(' ', '_')
name_field = ctype.lower().replace(' ', '_') + '_name'
doc = frappe.new_doc(ctype)
doc.update({
name_field: frappe.form_dict['name_field'],
parent_field: frappe.form_dict['parent'],
"is_group": frappe.form_dict['is_group']
})
if ctype == "Sales Person":
doc.employee = frappe.form_dict.get('employee')
doc.save()
|
adfernandes/mbed
|
refs/heads/master
|
tools/test/toolchains/api_test.py
|
13
|
"""
Copyright (c) 2017-2019 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
from string import printable
from copy import deepcopy
from mock import MagicMock, patch
from hypothesis import given, settings, HealthCheck
from hypothesis.strategies import text, lists, fixed_dictionaries, booleans
"""Tests for the toolchain sub-system"""
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..",
".."))
sys.path.insert(0, ROOT)
from tools.toolchains import (
TOOLCHAIN_CLASSES,
TOOLCHAIN_PATHS,
mbedToolchain,
)
from tools.resources import LEGACY_TOOLCHAIN_NAMES, Resources, FileType
from tools.targets import TARGET_MAP, set_targets_json_location
from tools.notifier.mock import MockNotifier
ALPHABET = [char for char in printable if char not in [u'.', u'/', u'\\']]
#Create a global test target
test_target_map = TARGET_MAP["K64F"]
#We have to add ARMC5,UARM here to supported_toolchains, otherwise the creation of ARM class would fail as it won't find ARMC5 entry in supported_toolchains
#We also have to add uARM, cause, ARM_MICRO class would check for both uARM and ARMC5 in supported_toolchains(as ARM_MICRO represents ARMC5+Micro).
#And do this globally here so all tests can use this
test_target_map.supported_toolchains.append("ARMC5")
test_target_map.supported_toolchains.append("uARM")
@patch('tools.toolchains.arm.run_cmd')
def test_armc5_version_check(_run_cmd):
set_targets_json_location()
_run_cmd.return_value = ("""
Product: ARM Compiler 5.06
Component: ARM Compiler 5.06 update 5 (build 528)
Tool: armcc [4d3621]
""", "", 0)
notifier = MockNotifier()
target_map = TARGET_MAP["K64F"]
#We have to add ARMC5 here to supported_toolchains, otherwise the creation of ARM class would fail as it wont find ARMC5 entry in supported_toolchains
target_map.supported_toolchains.append("ARMC5")
toolchain = TOOLCHAIN_CLASSES["ARM"](target_map, notify=notifier)
toolchain.version_check()
assert notifier.messages == []
_run_cmd.return_value = ("""
Product: MDK Professional 5.22
Component: ARM Compiler 5.06 update 5 (build 528)
Tool: armcc [4d3621]
""", "", 0)
toolchain.version_check()
assert notifier.messages == []
_run_cmd.return_value = ("""
Product: ARM Compiler
Component: ARM Compiler
Tool: armcc [4d3621]
""", "", 0)
toolchain.version_check()
assert len(notifier.messages) == 1
@patch('tools.toolchains.arm.run_cmd')
def test_armc6_version_check(_run_cmd):
set_targets_json_location()
notifier = MockNotifier()
toolchain = TOOLCHAIN_CLASSES["ARMC6"](TARGET_MAP["K64F"], notify=notifier)
_run_cmd.return_value = ("""
Product: ARM Compiler 6.11 Professional
Component: ARM Compiler 6.11
Tool: armclang [5d3b4200]
""", "", 0)
toolchain.version_check()
assert notifier.messages == []
assert not toolchain.is_mbed_studio_armc6
_run_cmd.return_value = ("""
armclang: error: Failed to check out a license.
The provided license does not enable these tools.
Information about this error is available at: http://ds.arm.com/support/lic56/m5
General licensing information is available at: http://ds.arm.com/support/licensing/
If you need further help, provide this complete error report to your supplier or license.support@arm.com.
- ARMLMD_LICENSE_FILE: unset
- LM_LICENSE_FILE: unset
- ARM_TOOL_VARIANT: unset
- ARM_PRODUCT_PATH: unset
- Product location: C:\MbedStudio\tools\ac6\sw\mappings
- Toolchain location: C:\MbedStudio\tools\ac6\bin
- Selected tool variant: product
- Checkout feature: mbed_armcompiler
- Feature version: 5.0201810
- Flex error code: -5
Product: ARM Compiler 6.11 for Mbed Studio
Component: ARM Compiler 6.11
Tool: armclang [5d3b3c00]
""", "", 0)
toolchain.version_check()
assert notifier.messages == []
assert toolchain.is_mbed_studio_armc6
@patch('tools.toolchains.iar.run_cmd')
def test_iar_version_check(_run_cmd):
set_targets_json_location()
_run_cmd.return_value = ("""
IAR ANSI C/C++ Compiler V8.32.1/LNX for ARM
""", "", 0)
notifier = MockNotifier()
toolchain = TOOLCHAIN_CLASSES["IAR"](TARGET_MAP["K64F"], notify=notifier)
toolchain.version_check()
assert notifier.messages == []
_run_cmd.return_value = ("""
IAR ANSI C/C++ Compiler V/LNX for ARM
""", "", 0)
toolchain.version_check()
assert len(notifier.messages) == 1
_run_cmd.return_value = ("""
IAR ANSI C/C++ Compiler V/8.80LNX for ARM
""", "", 0)
toolchain.version_check()
assert len(notifier.messages) == 2
@patch('tools.toolchains.gcc.run_cmd')
def test_gcc_version_check(_run_cmd):
set_targets_json_location()
_run_cmd.return_value = ("""
arm-none-eabi-gcc (Arch Repository) 6.4.4
Copyright (C) 2018 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
""", "", 0)
notifier = MockNotifier()
toolchain = TOOLCHAIN_CLASSES["GCC_ARM"](
TARGET_MAP["K64F"], notify=notifier)
toolchain.version_check()
assert len(notifier.messages) == 1
_run_cmd.return_value = ("""
arm-none-eabi-gcc (GNU Tools for Arm Embedded Processors 9-2019-q4-major) 9.2.1 20191025 (release) [ARM/arm-9-branch revision 277599]
Copyright (C) 2019 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
""", "", 0)
toolchain.version_check()
assert len(notifier.messages) == 1
_run_cmd.return_value = ("""
arm-none-eabi-gcc (GNU Tools for Arm Embedded Processors 10-2020-q4-major) 10.2.1 20201025 (release) [ARM/arm-10-branch revision 377599]
Copyright (C) 2020 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
""", "", 0)
toolchain.version_check()
assert len(notifier.messages) == 2
_run_cmd.return_value = ("""
arm-none-eabi-gcc (Arch Repository)
Copyright (C) 2018 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
""", "", 0)
toolchain.version_check()
assert len(notifier.messages) == 3
@given(fixed_dictionaries({
'common': lists(text()),
'c': lists(text()),
'cxx': lists(text()),
'asm': lists(text()),
'ld': lists(text())}),
lists(text(min_size=1, alphabet=ALPHABET), min_size=1))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_toolchain_profile_c(profile, source_file):
"""Test that the appropriate profile parameters are passed to the
C compiler"""
filename = deepcopy(source_file)
filename[-1] += ".c"
to_compile = os.path.join(*filename)
set_targets_json_location()
with patch('os.mkdir') as _mkdir:
for _, tc_class in TOOLCHAIN_CLASSES.items():
toolchain = tc_class(test_target_map, build_profile=profile,
notify=MockNotifier())
toolchain.inc_md5 = ""
toolchain.build_dir = ""
toolchain.config = MagicMock(app_config_location=None)
for parameter in profile['c'] + profile['common']:
assert any(parameter in cmd for cmd in toolchain.cc), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
compile_command = toolchain.compile_command(to_compile,
to_compile + ".o", [])
for parameter in profile['c'] + profile['common']:
assert any(parameter in cmd for cmd in compile_command), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
@given(fixed_dictionaries({
'common': lists(text()),
'c': lists(text()),
'cxx': lists(text()),
'asm': lists(text()),
'ld': lists(text())}),
lists(text(min_size=1, alphabet=ALPHABET), min_size=1))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_toolchain_profile_cpp(profile, source_file):
"""Test that the appropriate profile parameters are passed to the
C++ compiler"""
filename = deepcopy(source_file)
filename[-1] += ".cpp"
to_compile = os.path.join(*filename)
with patch('os.mkdir') as _mkdir:
for _, tc_class in TOOLCHAIN_CLASSES.items():
toolchain = tc_class(test_target_map, build_profile=profile,
notify=MockNotifier())
toolchain.inc_md5 = ""
toolchain.build_dir = ""
toolchain.config = MagicMock(app_config_location=None)
for parameter in profile['cxx'] + profile['common']:
assert any(parameter in cmd for cmd in toolchain.cppc), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
compile_command = toolchain.compile_command(to_compile,
to_compile + ".o", [])
for parameter in profile['cxx'] + profile['common']:
assert any(parameter in cmd for cmd in compile_command), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
@given(fixed_dictionaries({
'common': lists(text()),
'c': lists(text()),
'cxx': lists(text()),
'asm': lists(text()),
'ld': lists(text())}),
lists(text(min_size=1, alphabet=ALPHABET), min_size=1))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_toolchain_profile_asm(profile, source_file):
"""Test that the appropriate profile parameters are passed to the
Assembler"""
filename = deepcopy(source_file)
filename[-1] += ".s"
to_compile = os.path.join(*filename)
with patch('os.mkdir') as _mkdir:
for _, tc_class in TOOLCHAIN_CLASSES.items():
toolchain = tc_class(test_target_map, build_profile=profile,
notify=MockNotifier())
toolchain.inc_md5 = ""
toolchain.build_dir = ""
toolchain.config = MagicMock()
toolchain.config.get_config_data_macros.return_value = []
for parameter in profile['asm']:
assert any(parameter in cmd for cmd in toolchain.asm), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
compile_command = toolchain.compile_command(to_compile,
to_compile + ".o", [])
if not compile_command:
assert compile_command, to_compile
for parameter in profile['asm']:
assert any(parameter in cmd for cmd in compile_command), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(test_target_map, notify=MockNotifier())
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
@given(fixed_dictionaries({
'common': lists(text()),
'c': lists(text()),
'cxx': lists(text()),
'asm': lists(text()),
'ld': lists(text(min_size=1))}),
lists(text(min_size=1, alphabet=ALPHABET), min_size=1))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_toolchain_profile_ld(profile, source_file):
"""Test that the appropriate profile parameters are passed to the
Linker"""
filename = deepcopy(source_file)
filename[-1] += ".o"
to_compile = os.path.join(*filename)
with patch('os.mkdir') as _mkdir,\
patch('tools.toolchains.mbedToolchain.default_cmd') as _dflt_cmd:
for _, tc_class in TOOLCHAIN_CLASSES.items():
toolchain = tc_class(test_target_map, build_profile=profile,
notify=MockNotifier())
toolchain.RESPONSE_FILES = False
toolchain.inc_md5 = ""
toolchain.build_dir = ""
for parameter in profile['ld']:
assert any(parameter in cmd for cmd in toolchain.ld), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
toolchain.link(to_compile + ".elf", [to_compile], [], [], None)
compile_cmd = _dflt_cmd.call_args_list
if not compile_cmd:
assert compile_cmd, to_compile
for parameter in profile['ld']:
assert any(parameter in cmd[0][0] for cmd in compile_cmd), \
"Toolchain %s did not propagate arg %s" % (toolchain.name,
parameter)
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(test_target_map, notify=MockNotifier())
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
@given(lists(text(alphabet=ALPHABET, min_size=1), min_size=1))
def test_detect_duplicates(filenames):
c_sources = [os.path.join(name, "dupe.c") for name in filenames]
s_sources = [os.path.join(name, "dupe.s") for name in filenames]
cpp_sources = [os.path.join(name, "dupe.cpp") for name in filenames]
notify = MockNotifier()
res = Resources(notify)
res.add_files_to_type(FileType.C_SRC, c_sources)
res.add_files_to_type(FileType.ASM_SRC, s_sources)
res.add_files_to_type(FileType.CPP_SRC, cpp_sources)
assert res.detect_duplicates() == 1,\
"Not Enough duplicates found"
notification = notify.messages[0]
assert "dupe.o" in notification["message"]
assert "dupe.s" in notification["message"]
assert "dupe.c" in notification["message"]
assert "dupe.cpp" in notification["message"]
@given(text(alphabet=ALPHABET + [os.sep], min_size=1))
@given(booleans())
@given(booleans())
@settings(max_examples=20)
def test_path_specified_gcc(gcc_loc, exists_at_loc, exists_in_path):
with patch('tools.toolchains.gcc.exists') as _exists:
with patch('tools.toolchains.gcc.find_executable') as _find:
_exists.return_value = exists_at_loc
_find.return_value = exists_in_path
TOOLCHAIN_PATHS['GCC_ARM'] = gcc_loc
toolchain_class = TOOLCHAIN_CLASSES["GCC_ARM"]
found_p = toolchain_class.check_executable()
assert found_p == (exists_at_loc or exists_in_path)
if exists_at_loc:
assert TOOLCHAIN_PATHS['GCC_ARM'] == gcc_loc
elif exists_in_path:
assert TOOLCHAIN_PATHS['GCC_ARM'] == ''
|
ignatenkobrain/dnf
|
refs/heads/master
|
dnf/automatic/main.py
|
9
|
# __init__.py
# dnf.automatic CLI
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from dnf.i18n import _, ucd
import dnf
import dnf.automatic.emitter
import dnf.cli
import dnf.cli.cli
import dnf.cli.output
import dnf.conf.parser
import dnf.const
import dnf.exceptions
import dnf.util
import dnf.logging
import dnf.yum.config
import hawkey
import iniparse.compat
import logging
import socket
import argparse
import random
import time
logger = logging.getLogger('dnf')
def build_emitters(conf):
emitters = dnf.util.MultiCallList([])
system_name = conf.emitters.system_name
for name in conf.emitters.emit_via:
if name == 'email':
emitter = dnf.automatic.emitter.EmailEmitter(system_name, conf.email)
emitters.append(emitter)
elif name == 'stdio':
emitter = dnf.automatic.emitter.StdIoEmitter(system_name)
emitters.append(emitter)
elif name == 'motd':
emitter = dnf.automatic.emitter.MotdEmitter(system_name)
emitters.append(emitter)
else:
assert False
return emitters
def parse_arguments(args):
parser = argparse.ArgumentParser()
parser.add_argument('conf_path', nargs='?', default=dnf.const.CONF_AUTOMATIC_FILENAME)
parser.add_argument('--timer', action='store_true')
return parser.parse_args(args), parser
class AutomaticConfig(object):
def __init__(self, filename):
self.commands = CommandsConfig()
self.email = EmailConfig()
self.emitters = EmittersConfig()
self._parser = None
self._load(filename)
self.commands.imply()
def _load(self, filename):
parser = iniparse.compat.ConfigParser()
config_pp = dnf.conf.parser.ConfigPreProcessor(filename)
try:
parser.readfp(config_pp)
except iniparse.compat.ParsingError as e:
raise dnf.exceptions.ConfigError("Parsing file failed: %s" % e)
self.commands.populate(parser, 'commands')
self.email.populate(parser, 'email')
self.emitters.populate(parser, 'emitters')
self._parser = parser
@property
def base_overrides(self):
return {k: v for (k, v) in self._parser.items('base')}
class CommandsConfig(dnf.yum.config.BaseConfig):
apply_updates = dnf.yum.config.BoolOption(False)
base_config_file = dnf.yum.config.Option('/etc/dnf/dnf.conf')
download_updates = dnf.yum.config.BoolOption(False)
upgrade_type = dnf.yum.config.SelectionOption(
'default', ('default', 'security'))
random_sleep = dnf.yum.config.SecondsOption(300)
def imply(self):
if self.apply_updates:
self.download_updates = True
class EmailConfig(dnf.yum.config.BaseConfig):
email_to = dnf.yum.config.ListOption(["root"])
email_from = dnf.yum.config.Option("root")
email_host = dnf.yum.config.Option("localhost")
email_port = dnf.yum.config.IntOption(25)
class EmittersConfig(dnf.yum.config.BaseConfig):
emit_via = dnf.yum.config.ListOption(['email', 'stdio'])
output_width = dnf.yum.config.IntOption(80)
system_name = dnf.yum.config.Option(socket.gethostname())
def main(args):
(opts, parser) = parse_arguments(args)
try:
conf = AutomaticConfig(opts.conf_path)
with dnf.Base() as base:
cli = dnf.cli.Cli(base)
cli.read_conf_file(conf.commands.base_config_file,
overrides=conf.base_overrides)
base_conf = base.conf
base_conf.cachedir, _alt_dir = dnf.cli.cli.cachedir_fit(base_conf)
logger.debug('Started dnf-automatic.')
if opts.timer:
sleeper = random.randint(0, conf.commands.random_sleep)
logger.debug('Sleep for %s seconds', sleeper)
time.sleep(sleeper)
base.read_all_repos()
base.fill_sack()
upgrade(base, conf.commands.upgrade_type)
base.resolve()
output = dnf.cli.output.Output(base, base.conf)
trans = base.transaction
if not trans:
return 0
lst = output.list_transaction(trans)
emitters = build_emitters(conf)
emitters.notify_available(lst)
if not conf.commands.download_updates:
emitters.commit()
return 0
base.download_packages(trans.install_set)
emitters.notify_downloaded()
if not conf.commands.apply_updates:
emitters.commit()
return 0
base.do_transaction()
emitters.notify_applied()
emitters.commit()
except dnf.exceptions.Error as exc:
logger.error(_('Error: %s'), ucd(exc))
return 1
return 0
def upgrade(base, upgrade_type):
if upgrade_type == 'default':
base.upgrade_all()
elif upgrade_type == 'security':
for pkg in base.sack.query().installed():
for advisory in pkg.get_advisories(hawkey.GT):
if advisory.type != hawkey.ADVISORY_SECURITY:
continue
base.upgrade(pkg.name)
else:
assert False
|
IndraVikas/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/tests/test_nearest_centroid.py
|
305
|
"""
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
|
LosFuzzys/CTFd
|
refs/heads/losctf
|
migrations/versions/1093835a1051_add_default_email_templates.py
|
4
|
"""Add default email templates
Revision ID: 1093835a1051
Revises: a03403986a32
Create Date: 2020-02-15 01:32:10.959373
"""
from alembic import op
from sqlalchemy.sql import column, table
from CTFd.models import db
from CTFd.utils.email import (
DEFAULT_PASSWORD_RESET_BODY,
DEFAULT_PASSWORD_RESET_SUBJECT,
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_BODY,
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_SUBJECT,
DEFAULT_USER_CREATION_EMAIL_BODY,
DEFAULT_USER_CREATION_EMAIL_SUBJECT,
DEFAULT_VERIFICATION_EMAIL_BODY,
DEFAULT_VERIFICATION_EMAIL_SUBJECT,
)
# revision identifiers, used by Alembic.
revision = "1093835a1051"
down_revision = "a03403986a32"
branch_labels = None
depends_on = None
configs_table = table(
"config", column("id", db.Integer), column("key", db.Text), column("value", db.Text)
)
def get_config(key):
connection = op.get_bind()
return connection.execute(
configs_table.select().where(configs_table.c.key == key).limit(1)
).fetchone()
def set_config(key, value):
connection = op.get_bind()
connection.execute(configs_table.insert().values(key=key, value=value))
def upgrade():
# Only run if this instance already been setup before
if bool(get_config("setup")) is True:
for k, v in [
("password_reset_body", DEFAULT_PASSWORD_RESET_BODY),
("password_reset_subject", DEFAULT_PASSWORD_RESET_SUBJECT),
(
"successful_registration_email_body",
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_BODY,
),
(
"successful_registration_email_subject",
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_SUBJECT,
),
("user_creation_email_body", DEFAULT_USER_CREATION_EMAIL_BODY),
("user_creation_email_subject", DEFAULT_USER_CREATION_EMAIL_SUBJECT),
("verification_email_body", DEFAULT_VERIFICATION_EMAIL_BODY),
("verification_email_subject", DEFAULT_VERIFICATION_EMAIL_SUBJECT),
]:
if get_config(k) is None:
set_config(k, v)
def downgrade():
pass
|
edxzw/edx-platform
|
refs/heads/master
|
cms/urls_dev.py
|
201
|
"""
URLconf for development-only views.
This gets imported by urls.py and added to its URLconf if we are running in
development mode; otherwise, it is ignored.
"""
from django.conf.urls import url
urlpatterns = (
url(r'^dev_mode$', 'contentstore.views.dev.dev_mode', name='dev_mode'),
url(r'^template/(?P<template>.+)$', 'contentstore.views.dev.dev_show_template'),
)
|
FlyLu/rt-thread
|
refs/heads/master
|
bsp/imxrt/imxrt1052-nxp-evk/rtconfig.py
|
5
|
import os
import sys
# toolchains options
ARCH='arm'
CPU='cortex-m7'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
STRIP = PREFIX + 'strip'
DEVICE = ' -mcpu=' + CPU + ' -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Wall -D__FPU_PRESENT -eentry'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb -D__START=entry'
LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -gdwarf-2'
AFLAGS += ' -gdwarf-2'
CFLAGS += ' -O0'
else:
CFLAGS += ' -O2 -Os'
POST_ACTION = OBJCPY + ' -O binary --remove-section=.boot_data --remove-section=.image_vertor_table --remove-section=.ncache $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
# module setting
CXXFLAGS = ' -Woverloaded-virtual -fno-exceptions -fno-rtti '
M_CFLAGS = CFLAGS + ' -mlong-calls -fPIC '
M_CXXFLAGS = CXXFLAGS + ' -mlong-calls -fPIC'
M_LFLAGS = DEVICE + CXXFLAGS + ' -Wl,--gc-sections,-z,max-page-size=0x4' +\
' -shared -fPIC -nostartfiles -static-libgcc'
M_POST_ACTION = STRIP + ' -R .hash $TARGET\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu ' + CPU + '.fp.sp'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --libpath "' + EXEC_PATH + '\ARM\ARMCC\lib" --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' --diag_suppress=66,1296,186,6314'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' --c99'
POST_ACTION = 'fromelf -z $TARGET'
# POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D__FPU_PRESENT'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=' + CPU
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu ' + CPU
AFLAGS += ' --fpu None'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT):
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT)
|
dukhlov/oslo.messaging
|
refs/heads/master
|
oslo_messaging/tests/notify/test_logger.py
|
1
|
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import logging
import logging.config
import os
import sys
from oslo_utils import timeutils
import testscenarios
import oslo_messaging
from oslo_messaging.tests.notify import test_notifier
from oslo_messaging.tests import utils as test_utils
from six.moves import mock
load_tests = testscenarios.load_tests_apply_scenarios
# Stolen from openstack.common.logging
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
class TestLogNotifier(test_utils.BaseTestCase):
scenarios = [
('debug', dict(priority='debug')),
('info', dict(priority='info')),
('warning', dict(priority='warning', queue='WARN')),
('warn', dict(priority='warn')),
('error', dict(priority='error')),
('critical', dict(priority='critical')),
('audit', dict(priority='audit')),
]
def setUp(self):
super(TestLogNotifier, self).setUp()
self.addCleanup(oslo_messaging.notify._impl_test.reset)
self.config(driver=['test'],
group='oslo_messaging_notifications')
# NOTE(jamespage) disable thread information logging for testing
# as this causes test failures when zmq tests monkey_patch via
# eventlet
logging.logThreads = 0
@mock.patch('oslo_utils.timeutils.utcnow')
def test_logger(self, mock_utcnow):
with mock.patch('oslo_messaging.transport.get_transport',
return_value=test_notifier._FakeTransport(self.conf)):
self.logger = oslo_messaging.LoggingNotificationHandler('test://')
mock_utcnow.return_value = datetime.datetime.utcnow()
levelno = getattr(logging, self.priority.upper(), 42)
record = logging.LogRecord('foo',
levelno,
'/foo/bar',
42,
'Something happened',
None,
None)
self.logger.emit(record)
context = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][0]
self.assertEqual({}, context)
n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1]
self.assertEqual(getattr(self, 'queue', self.priority.upper()),
n['priority'])
self.assertEqual('logrecord', n['event_type'])
self.assertEqual(str(timeutils.utcnow()), n['timestamp'])
self.assertIsNone(n['publisher_id'])
self.assertEqual(
{'process': os.getpid(),
'funcName': None,
'name': 'foo',
'thread': None,
'levelno': levelno,
'processName': 'MainProcess',
'pathname': '/foo/bar',
'lineno': 42,
'msg': 'Something happened',
'exc_info': None,
'levelname': logging.getLevelName(levelno),
'extra': None},
n['payload'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_logging_conf(self, mock_utcnow):
with mock.patch('oslo_messaging.transport.get_transport',
return_value=test_notifier._FakeTransport(self.conf)):
logging.config.dictConfig({
'version': 1,
'handlers': {
'notification': {
'class': 'oslo_messaging.LoggingNotificationHandler',
'level': self.priority.upper(),
'url': 'test://',
},
},
'loggers': {
'default': {
'handlers': ['notification'],
'level': self.priority.upper(),
},
},
})
mock_utcnow.return_value = datetime.datetime.utcnow()
levelno = getattr(logging, self.priority.upper())
logger = logging.getLogger('default')
lineno = sys._getframe().f_lineno + 1
logger.log(levelno, 'foobar')
n = oslo_messaging.notify._impl_test.NOTIFICATIONS[0][1]
self.assertEqual(getattr(self, 'queue', self.priority.upper()),
n['priority'])
self.assertEqual('logrecord', n['event_type'])
self.assertEqual(str(timeutils.utcnow()), n['timestamp'])
self.assertIsNone(n['publisher_id'])
pathname = __file__
if pathname.endswith(('.pyc', '.pyo')):
pathname = pathname[:-1]
self.assertDictEqual(
n['payload'],
{'process': os.getpid(),
'funcName': 'test_logging_conf',
'name': 'default',
'thread': None,
'levelno': levelno,
'processName': 'MainProcess',
'pathname': pathname,
'lineno': lineno,
'msg': 'foobar',
'exc_info': None,
'levelname': logging.getLevelName(levelno),
'extra': None})
|
wroersma/volatility
|
refs/heads/master
|
volatility/renderers/html.py
|
9
|
import StringIO
from volatility.renderers.basic import Renderer
try:
import ujson as json
except ImportError:
import json
__author__ = 'mike'
class HTMLRenderer(Renderer):
def __init__(self):
pass
def render(self, outfd, data):
"""Renders the treegrid to HTML"""
column_titles = ", \n".join(["{ \"title\": \"" + column.name + "\"}" for column in data.columns])
json = StringIO.StringIO()
JSONRenderer().render(json, data)
outfd.write("""<html>
<head>
<link rel="stylesheet" type="text/css" href="http://cdn.datatables.net/1.10.2/css/jquery.dataTables.css">
<script type="text/javascript" language="javascript" src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script type="text/javascript" language="javascript" src="http://cdn.datatables.net/1.10.2/js/jquery.dataTables.min.js"></script>
<script type="text/javascript" class="init">
var dataSet = """ + json.getvalue() + """;
$(document).ready(function() {
$('#page').html( '<table cellpadding="0" cellspacing="0" border="0" class="display" id="data"></table>' );
$('#data').dataTable( {
"data": dataSet['rows'],
"columns": [""" + column_titles + """]
} );
} );
</script>
</head>
<body><div id="page"></div></body></html>""" + "\n")
class JSONRenderer(Renderer):
def render_row(self, node, accumulator):
return accumulator + [node.values]
def render(self, outfd, data):
"""Renderers a treegrid as columns/row items in JSON format"""
# TODO: Implement tree structure in JSON
if data.max_depth() > 1:
raise NotImplementedError("JSON output for trees has not yet been implemented")
# TODO: Output (basic) type information in JSON
json_input = {"columns": [column.name for column in data.columns], "rows": data.visit(None, self.render_row, [])}
return outfd.write(json.dumps(json_input))
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2016_09_01/aio/_configuration.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class WebSiteManagementClientConfiguration(Configuration):
"""Configuration for WebSiteManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(WebSiteManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2016-09-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-web/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
jonhadfield/ansible-modules-core
|
refs/heads/devel
|
system/seboolean.py
|
50
|
#!/usr/bin/python
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans.
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure
required: true
default: null
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot
required: false
default: no
choices: [ "yes", "no" ]
state:
description:
- Desired boolean value
required: true
default: null
choices: [ 'yes', 'no' ]
notes:
- Not tested on any debian based system
requirements: [ ]
author: "Stephen Fromm (@sfromm)"
'''
EXAMPLES = '''
# Set (httpd_can_network_connect) flag on and keep it persistent across reboots
- seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import semanage
HAVE_SEMANAGE=True
except ImportError:
HAVE_SEMANAGE=False
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
if to_bytes(name) in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
handle = semanage.semanage_handle_create()
if handle is None:
module.fail_json(msg="Failed to create semanage library handle")
try:
managed = semanage.semanage_is_managed(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
module.fail_json(msg="Failed to connect to semanage")
if semanage.semanage_begin_transaction(handle) < 0:
module.fail_json(msg="Failed to begin semanage transaction")
rc, sebool = semanage.semanage_bool_create(handle)
if rc < 0:
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, sebool, name) < 0:
module.fail_json(msg="Failed to set seboolean name with semanage")
semanage.semanage_bool_set_value(sebool, value)
rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool)
if rc < 0:
module.fail_json(msg="Failed to extract boolean key with semanage")
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(sebool)
semanage.semanage_set_reload(handle, 0)
if semanage.semanage_commit(handle) < 0:
module.fail_json(msg="Failed to commit changes to semanage")
semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e)))
return True
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True),
persistent=dict(default='no', type='bool'),
state=dict(required=True, type='bool')
),
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = {}
result['name'] = name
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
cur_value = get_boolean_value(module, name)
if cur_value == state:
result['state'] = cur_value
result['changed'] = False
module.exit_json(**result)
if module.check_mode:
module.exit_json(changed=True)
if persistent:
r = semanage_boolean_value(module, name, state)
else:
r = set_boolean_value(module, name, state)
result['changed'] = r
if not r:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
try:
selinux.security_commit_booleans()
except:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils._text import to_bytes
if __name__ == '__main__':
main()
|
nagyistoce/odoo-dev-odoo
|
refs/heads/8.0
|
addons/document/__init__.py
|
434
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import content_index
import std_index
import document
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tonysyu/scrappyr-app
|
refs/heads/master
|
scrappyr/scraps/serializers.py
|
1
|
from rest_framework import serializers
from . import models
from ..utils.serializers import JSONMixin
class ScrapSerializer(JSONMixin, serializers.ModelSerializer):
class Meta:
model = models.Scrap
fields = ('id', 'raw_title', 'markup_type', 'html_title', 'modified', 'created')
read_only_fields = ('created', 'modified')
|
ZuluPro/libcloud
|
refs/heads/trunk
|
docs/examples/container/kubernetes/instantiate_driver.py
|
26
|
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
cls = get_driver(Provider.KUBERNETES)
conn = cls(key='my_username',
secret='THIS_IS)+_MY_SECRET_KEY+I6TVkv68o4H',
host='126.32.21.4')
for container in conn.list_containers():
print(container.name)
for cluster in conn.list_clusters():
print(cluster.name)
|
EricNeedham/assignment-1
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/pyodbc.py
|
32
|
# mysql/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
"""
from .base import MySQLDialect, MySQLExecutionContext
from ...connectors.pyodbc import PyODBCConnector
from ... import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. "
"Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
|
abhisheklolage/ttgen2
|
refs/heads/master
|
src/core/subject.py
|
2
|
#!/usr/bin/env python3
class Subject(object):
def __init__(self, name):
self.name = name
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def __repr__(self):
return __name__ + ".Subject({name})".format(name=repr(self.name))
|
gg7/diamond
|
refs/heads/master
|
src/collectors/ksm/ksm.py
|
69
|
# coding=utf-8
"""
This class collects 'Kernel Samepage Merging' statistics.
KSM is a memory de-duplication feature of the Linux Kernel (2.6.32+).
It can be enabled, if compiled into your kernel, by echoing 1 to
/sys/kernel/mm/ksm/run. You can find more information about KSM at
[http://www.linux-kvm.org/page/KSM](http://www.linux-kvm.org/page/KSM).
#### Dependencies
* KSM built into your kernel. It does not have to be enabled, but the stats
will be less than useful if it isn't:-)
"""
import os
import glob
import diamond.collector
class KSMCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(KSMCollector, self).get_default_config_help()
config_help.update({
'ksm_path': "location where KSM kernel data can be found",
})
return config_help
def get_default_config(self):
"""
Return default config.
path: Graphite path output
ksm_path: location where KSM kernel data can be found
"""
config = super(KSMCollector, self).get_default_config()
config.update({
'path': 'ksm',
'ksm_path': '/sys/kernel/mm/ksm'})
return config
def collect(self):
for item in glob.glob(os.path.join(self.config['ksm_path'], "*")):
if os.access(item, os.R_OK):
filehandle = open(item)
try:
self.publish(os.path.basename(item),
float(filehandle.readline().rstrip()))
except ValueError:
pass
filehandle.close()
|
yiannist/ganeti
|
refs/heads/master
|
lib/storage/drbd.py
|
3
|
#
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DRBD block device related functionality"""
import errno
import logging
import time
from ganeti import constants
from ganeti import utils
from ganeti import errors
from ganeti import netutils
from ganeti import objects
from ganeti.storage import base
from ganeti.storage.drbd_info import DRBD8Info
from ganeti.storage import drbd_info
from ganeti.storage import drbd_cmdgen
# Size of reads in _CanReadDevice
_DEVICE_READ_SIZE = 128 * 1024
class DRBD8(object):
"""Various methods to deals with the DRBD system as a whole.
This class provides a set of methods to deal with the DRBD installation on
the node or with uninitialized devices as opposed to a DRBD device.
"""
_USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
_MAX_MINORS = 255
@staticmethod
def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
"""Returns DRBD usermode_helper currently set.
@type filename: string
@param filename: the filename to read the usermode helper from
@rtype: string
@return: the currently configured DRBD usermode helper
"""
try:
helper = utils.ReadFile(filename).splitlines()[0]
except EnvironmentError, err:
if err.errno == errno.ENOENT:
base.ThrowError("The file %s cannot be opened, check if the module"
" is loaded (%s)", filename, str(err))
else:
base.ThrowError("Can't read DRBD helper file %s: %s",
filename, str(err))
if not helper:
base.ThrowError("Can't read any data from %s", filename)
return helper
@staticmethod
def GetProcInfo():
"""Reads and parses information from /proc/drbd.
@rtype: DRBD8Info
@return: a L{DRBD8Info} instance containing the current /proc/drbd info
"""
return DRBD8Info.CreateFromFile()
@staticmethod
def GetUsedDevs():
"""Compute the list of used DRBD minors.
@rtype: list of ints
"""
info = DRBD8.GetProcInfo()
return [m for m in info.GetMinors()
if not info.GetMinorStatus(m).is_unconfigured]
@staticmethod
def FindUnusedMinor():
"""Find an unused DRBD device.
This is specific to 8.x as the minors are allocated dynamically,
so non-existing numbers up to a max minor count are actually free.
@rtype: int
"""
highest = None
info = DRBD8.GetProcInfo()
for minor in info.GetMinors():
status = info.GetMinorStatus(minor)
if not status.is_in_use:
return minor
highest = max(highest, minor)
if highest is None: # there are no minors in use at all
return 0
if highest >= DRBD8._MAX_MINORS:
logging.error("Error: no free drbd minors!")
raise errors.BlockDeviceError("Can't find a free DRBD minor")
return highest + 1
@staticmethod
def GetCmdGenerator(info):
"""Creates a suitable L{BaseDRBDCmdGenerator} based on the given info.
@type info: DRBD8Info
@rtype: BaseDRBDCmdGenerator
"""
version = info.GetVersion()
if version["k_minor"] <= 3:
return drbd_cmdgen.DRBD83CmdGenerator(version)
else:
return drbd_cmdgen.DRBD84CmdGenerator(version)
@staticmethod
def ShutdownAll(minor):
"""Deactivate the device.
This will, of course, fail if the device is in use.
@type minor: int
@param minor: the minor to shut down
"""
info = DRBD8.GetProcInfo()
cmd_gen = DRBD8.GetCmdGenerator(info)
cmd = cmd_gen.GenDownCmd(minor)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: can't shutdown drbd device: %s",
minor, result.output)
class DRBD8Dev(base.BlockDev):
"""DRBD v8.x block device.
This implements the local host part of the DRBD device, i.e. it
doesn't do anything to the supposed peer. If you need a fully
connected DRBD pair, you need to use this class on both hosts.
The unique_id for the drbd device is a (pnode_uuid, snode_uuid,
port, pnode_minor, lnode_minor, secret) tuple, and it must have
two children: the data device and the meta_device. The meta
device is checked for valid size and is zeroed on create.
"""
_DRBD_MAJOR = 147
# timeout constants
_NET_RECONFIG_TIMEOUT = 60
def __init__(self, unique_id, children, size, params, dyn_params, **kwargs):
if children and children.count(None) > 0:
children = []
if len(children) not in (0, 2):
raise ValueError("Invalid configuration data %s" % str(children))
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
raise ValueError("Invalid configuration data %s" % str(unique_id))
if constants.DDP_LOCAL_IP not in dyn_params or \
constants.DDP_REMOTE_IP not in dyn_params or \
constants.DDP_LOCAL_MINOR not in dyn_params or \
constants.DDP_REMOTE_MINOR not in dyn_params:
raise ValueError("Invalid dynamic parameters %s" % str(dyn_params))
self._lhost = dyn_params[constants.DDP_LOCAL_IP]
self._lport = unique_id[2]
self._rhost = dyn_params[constants.DDP_REMOTE_IP]
self._rport = unique_id[2]
self._aminor = dyn_params[constants.DDP_LOCAL_MINOR]
# The secret is wrapped in the Private data type, and it has to be extracted
# before use
self._secret = unique_id[5].Get()
if children:
if not _CanReadDevice(children[1].dev_path):
logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
children = []
super(DRBD8Dev, self).__init__(unique_id, children, size, params,
dyn_params, **kwargs)
self.major = self._DRBD_MAJOR
info = DRBD8.GetProcInfo()
version = info.GetVersion()
if version["k_major"] != 8:
base.ThrowError("Mismatch in DRBD kernel version and requested ganeti"
" usage: kernel is %s.%s, ganeti wants 8.x",
version["k_major"], version["k_minor"])
if version["k_minor"] <= 3:
self._show_info_cls = drbd_info.DRBD83ShowInfo
else:
self._show_info_cls = drbd_info.DRBD84ShowInfo
self._cmd_gen = DRBD8.GetCmdGenerator(info)
if (self._lhost is not None and self._lhost == self._rhost and
self._lport == self._rport):
raise ValueError("Invalid configuration data, same local/remote %s, %s" %
(unique_id, dyn_params))
self.Attach()
@staticmethod
def _DevPath(minor):
"""Return the path to a drbd device for a given minor.
@type minor: int
@rtype: string
"""
return "/dev/drbd%d" % minor
def _SetFromMinor(self, minor):
"""Set our parameters based on the given minor.
This sets our minor variable and our dev_path.
@type minor: int
"""
if minor is None:
self.minor = self.dev_path = None
self.attached = False
else:
self.minor = minor
self.dev_path = self._DevPath(minor)
self.attached = True
@staticmethod
def _CheckMetaSize(meta_device):
"""Check if the given meta device looks like a valid one.
This currently only checks the size, which must be around
128MiB.
@type meta_device: string
@param meta_device: the path to the device to check
"""
result = utils.RunCmd(["blockdev", "--getsize", meta_device])
if result.failed:
base.ThrowError("Failed to get device size: %s - %s",
result.fail_reason, result.output)
try:
sectors = int(result.stdout)
except (TypeError, ValueError):
base.ThrowError("Invalid output from blockdev: '%s'", result.stdout)
num_bytes = sectors * 512
if num_bytes < 128 * 1024 * 1024: # less than 128MiB
base.ThrowError("Meta device too small (%.2fMib)",
(num_bytes / 1024 / 1024))
# the maximum *valid* size of the meta device when living on top
# of LVM is hard to compute: it depends on the number of stripes
# and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
# (normal size), but an eight-stripe 128MB PE will result in a 1GB
# size meta device; as such, we restrict it to 1GB (a little bit
# too generous, but making assumptions about PE size is hard)
if num_bytes > 1024 * 1024 * 1024:
base.ThrowError("Meta device too big (%.2fMiB)",
(num_bytes / 1024 / 1024))
def _GetShowData(self, minor):
"""Return the `drbdsetup show` data.
@type minor: int
@param minor: the minor to collect show output for
@rtype: string
"""
result = utils.RunCmd(self._cmd_gen.GenShowCmd(minor))
if result.failed:
logging.error("Can't display the drbd config: %s - %s",
result.fail_reason, result.output)
return None
return result.stdout
def _GetShowInfo(self, minor):
"""Return parsed information from `drbdsetup show`.
@type minor: int
@param minor: the minor to return information for
@rtype: dict as described in L{drbd_info.BaseShowInfo.GetDevInfo}
"""
return self._show_info_cls.GetDevInfo(self._GetShowData(minor))
def _MatchesLocal(self, info):
"""Test if our local config matches with an existing device.
The parameter should be as returned from `_GetShowInfo()`. This
method tests if our local backing device is the same as the one in
the info parameter, in effect testing if we look like the given
device.
@type info: dict as described in L{drbd_info.BaseShowInfo.GetDevInfo}
@rtype: boolean
"""
if self._children:
backend, meta = self._children
else:
backend = meta = None
if backend is not None:
retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
else:
retval = ("local_dev" not in info)
if meta is not None:
retval = retval and ("meta_dev" in info and
info["meta_dev"] == meta.dev_path)
if "meta_index" in info:
retval = retval and info["meta_index"] == 0
else:
retval = retval and ("meta_dev" not in info and
"meta_index" not in info)
return retval
def _MatchesNet(self, info):
"""Test if our network config matches with an existing device.
The parameter should be as returned from `_GetShowInfo()`. This
method tests if our network configuration is the same as the one
in the info parameter, in effect testing if we look like the given
device.
@type info: dict as described in L{drbd_info.BaseShowInfo.GetDevInfo}
@rtype: boolean
"""
if (((self._lhost is None and not ("local_addr" in info)) and
(self._rhost is None and not ("remote_addr" in info)))):
return True
if self._lhost is None:
return False
if not ("local_addr" in info and
"remote_addr" in info):
return False
retval = (info["local_addr"] == (self._lhost, self._lport))
retval = (retval and
info["remote_addr"] == (self._rhost, self._rport))
return retval
def _AssembleLocal(self, minor, backend, meta, size):
"""Configure the local part of a DRBD device.
@type minor: int
@param minor: the minor to assemble locally
@type backend: string
@param backend: path to the data device to use
@type meta: string
@param meta: path to the meta device to use
@type size: int
@param size: size in MiB
"""
cmds = self._cmd_gen.GenLocalInitCmds(minor, backend, meta,
size, self.params)
for cmd in cmds:
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: can't attach local disk: %s",
minor, result.output)
def _AssembleNet(self, minor, net_info, dual_pri=False, hmac=None,
secret=None):
"""Configure the network part of the device.
@type minor: int
@param minor: the minor to assemble the network for
@type net_info: (string, int, string, int)
@param net_info: tuple containing the local address, local port, remote
address and remote port
@type dual_pri: boolean
@param dual_pri: whether two primaries should be allowed or not
@type hmac: string
@param hmac: the HMAC algorithm to use
@type secret: string
@param secret: the shared secret to use
"""
lhost, lport, rhost, rport = net_info
if None in net_info:
# we don't want network connection and actually want to make
# sure its shutdown
self._ShutdownNet(minor)
return
if dual_pri:
protocol = constants.DRBD_MIGRATION_NET_PROTOCOL
else:
protocol = self.params[constants.LDP_PROTOCOL]
# Workaround for a race condition. When DRBD is doing its dance to
# establish a connection with its peer, it also sends the
# synchronization speed over the wire. In some cases setting the
# sync speed only after setting up both sides can race with DRBD
# connecting, hence we set it here before telling DRBD anything
# about its peer.
sync_errors = self._SetMinorSyncParams(minor, self.params)
if sync_errors:
base.ThrowError("drbd%d: can't set the synchronization parameters: %s" %
(minor, utils.CommaJoin(sync_errors)))
family = self._GetNetFamily(minor, lhost, rhost)
cmd = self._cmd_gen.GenNetInitCmd(minor, family, lhost, lport,
rhost, rport, protocol,
dual_pri, hmac, secret, self.params)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: can't setup network: %s - %s",
minor, result.fail_reason, result.output)
def _CheckNetworkConfig():
info = self._GetShowInfo(minor)
if not "local_addr" in info or not "remote_addr" in info:
raise utils.RetryAgain()
if (info["local_addr"] != (lhost, lport) or
info["remote_addr"] != (rhost, rport)):
raise utils.RetryAgain()
try:
utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
except utils.RetryTimeout:
base.ThrowError("drbd%d: timeout while configuring network", minor)
# Once the assembly is over, try to set the synchronization parameters
try:
# The minor may not have been set yet, requiring us to set it at least
# temporarily
old_minor = self.minor
self._SetFromMinor(minor)
sync_errors = self.SetSyncParams(self.params)
if sync_errors:
base.ThrowError("drbd%d: can't set the synchronization parameters: %s" %
(self.minor, utils.CommaJoin(sync_errors)))
finally:
# Undo the change, regardless of whether it will have to be done again
# soon
self._SetFromMinor(old_minor)
@staticmethod
def _GetNetFamily(minor, lhost, rhost):
if netutils.IP6Address.IsValid(lhost):
if not netutils.IP6Address.IsValid(rhost):
base.ThrowError("drbd%d: can't connect ip %s to ip %s" %
(minor, lhost, rhost))
return "ipv6"
elif netutils.IP4Address.IsValid(lhost):
if not netutils.IP4Address.IsValid(rhost):
base.ThrowError("drbd%d: can't connect ip %s to ip %s" %
(minor, lhost, rhost))
return "ipv4"
else:
base.ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
def AddChildren(self, devices):
"""Add a disk to the DRBD device.
@type devices: list of L{BlockDev}
@param devices: a list of exactly two L{BlockDev} objects; the first
denotes the data device, the second the meta device for this DRBD device
"""
if self.minor is None:
base.ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
self._aminor)
if len(devices) != 2:
base.ThrowError("drbd%d: need two devices for AddChildren", self.minor)
info = self._GetShowInfo(self.minor)
if "local_dev" in info:
base.ThrowError("drbd%d: already attached to a local disk", self.minor)
backend, meta = devices
if backend.dev_path is None or meta.dev_path is None:
base.ThrowError("drbd%d: children not ready during AddChildren",
self.minor)
backend.Open()
meta.Open()
self._CheckMetaSize(meta.dev_path)
self._InitMeta(DRBD8.FindUnusedMinor(), meta.dev_path)
self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
self._children = devices
def RemoveChildren(self, devices):
"""Detach the drbd device from local storage.
@type devices: list of L{BlockDev}
@param devices: a list of exactly two L{BlockDev} objects; the first
denotes the data device, the second the meta device for this DRBD device
"""
if self.minor is None:
base.ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
self._aminor)
# early return if we don't actually have backing storage
info = self._GetShowInfo(self.minor)
if "local_dev" not in info:
return
if len(self._children) != 2:
base.ThrowError("drbd%d: we don't have two children: %s", self.minor,
self._children)
if self._children.count(None) == 2: # we don't actually have children :)
logging.warning("drbd%d: requested detach while detached", self.minor)
return
if len(devices) != 2:
base.ThrowError("drbd%d: we need two children in RemoveChildren",
self.minor)
for child, dev in zip(self._children, devices):
if dev != child.dev_path:
base.ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
" RemoveChildren", self.minor, dev, child.dev_path)
self._ShutdownLocal(self.minor)
self._children = []
def _SetMinorSyncParams(self, minor, params):
"""Set the parameters of the DRBD syncer.
This is the low-level implementation.
@type minor: int
@param minor: the drbd minor whose settings we change
@type params: dict
@param params: LD level disk parameters related to the synchronization
@rtype: list
@return: a list of error messages
"""
cmd = self._cmd_gen.GenSyncParamsCmd(minor, params)
result = utils.RunCmd(cmd)
if result.failed:
msg = ("Can't change syncer rate: %s - %s" %
(result.fail_reason, result.output))
logging.error(msg)
return [msg]
return []
def SetSyncParams(self, params):
"""Set the synchronization parameters of the DRBD syncer.
See L{BlockDev.SetSyncParams} for parameter description.
"""
if self.minor is None:
err = "Not attached during SetSyncParams"
logging.info(err)
return [err]
children_result = super(DRBD8Dev, self).SetSyncParams(params)
children_result.extend(self._SetMinorSyncParams(self.minor, params))
return children_result
def PauseResumeSync(self, pause):
"""Pauses or resumes the sync of a DRBD device.
See L{BlockDev.PauseResumeSync} for parameter description.
"""
if self.minor is None:
logging.info("Not attached during PauseSync")
return False
children_result = super(DRBD8Dev, self).PauseResumeSync(pause)
if pause:
cmd = self._cmd_gen.GenPauseSyncCmd(self.minor)
else:
cmd = self._cmd_gen.GenResumeSyncCmd(self.minor)
result = utils.RunCmd(cmd)
if result.failed:
logging.error("Can't %s: %s - %s", cmd,
result.fail_reason, result.output)
return not result.failed and children_result
def GetProcStatus(self):
"""Return the current status data from /proc/drbd for this device.
@rtype: DRBD8Status
"""
if self.minor is None:
base.ThrowError("drbd%d: GetStats() called while not attached",
self._aminor)
info = DRBD8.GetProcInfo()
if not info.HasMinorStatus(self.minor):
base.ThrowError("drbd%d: can't find myself in /proc", self.minor)
return info.GetMinorStatus(self.minor)
def GetSyncStatus(self):
"""Returns the sync status of the device.
If sync_percent is None, it means all is ok
If estimated_time is None, it means we can't estimate
the time needed, otherwise it's the time left in seconds.
We set the is_degraded parameter to True on two conditions:
network not connected or local disk missing.
We compute the ldisk parameter based on whether we have a local
disk or not.
@rtype: objects.BlockDevStatus
"""
if self.minor is None and not self.Attach():
base.ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
stats = self.GetProcStatus()
is_degraded = not stats.is_connected or not stats.is_disk_uptodate
if stats.is_disk_uptodate:
ldisk_status = constants.LDS_OKAY
elif stats.is_diskless:
ldisk_status = constants.LDS_FAULTY
elif stats.is_in_resync:
ldisk_status = constants.LDS_SYNC
else:
ldisk_status = constants.LDS_UNKNOWN
return objects.BlockDevStatus(dev_path=self.dev_path,
major=self.major,
minor=self.minor,
sync_percent=stats.sync_percent,
estimated_time=stats.est_time,
is_degraded=is_degraded,
ldisk_status=ldisk_status)
def Open(self, force=False, exclusive=True):
"""Make the local state primary.
If the 'force' parameter is given, DRBD is instructed to switch the device
into primary mode. Since this is a potentially dangerous operation, the
force flag should be only given after creation, when it actually is
mandatory.
"""
if self.minor is None and not self.Attach():
logging.error("DRBD cannot attach to a device during open")
return False
cmd = self._cmd_gen.GenPrimaryCmd(self.minor, force)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
result.output)
def Close(self):
"""Make the local state secondary.
This will, of course, fail if the device is in use.
"""
if self.minor is None and not self.Attach():
base.ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
cmd = self._cmd_gen.GenSecondaryCmd(self.minor)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: can't switch drbd device to secondary: %s",
self.minor, result.output)
def DisconnectNet(self):
"""Removes network configuration.
This method shutdowns the network side of the device.
The method will wait up to a hardcoded timeout for the device to
go into standalone after the 'disconnect' command before
re-configuring it, as sometimes it takes a while for the
disconnect to actually propagate and thus we might issue a 'net'
command while the device is still connected. If the device will
still be attached to the network and we time out, we raise an
exception.
"""
if self.minor is None:
base.ThrowError("drbd%d: disk not attached in re-attach net",
self._aminor)
if None in (self._lhost, self._lport, self._rhost, self._rport):
base.ThrowError("drbd%d: DRBD disk missing network info in"
" DisconnectNet()", self.minor)
class _DisconnectStatus(object):
def __init__(self, ever_disconnected):
self.ever_disconnected = ever_disconnected
dstatus = _DisconnectStatus(base.IgnoreError(self._ShutdownNet, self.minor))
def _WaitForDisconnect():
if self.GetProcStatus().is_standalone:
return
# retry the disconnect, it seems possible that due to a well-time
# disconnect on the peer, my disconnect command might be ignored and
# forgotten
dstatus.ever_disconnected = \
base.IgnoreError(self._ShutdownNet, self.minor) or \
dstatus.ever_disconnected
raise utils.RetryAgain()
# Keep start time
start_time = time.time()
try:
# Start delay at 100 milliseconds and grow up to 2 seconds
utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
self._NET_RECONFIG_TIMEOUT)
except utils.RetryTimeout:
if dstatus.ever_disconnected:
msg = ("drbd%d: device did not react to the"
" 'disconnect' command in a timely manner")
else:
msg = "drbd%d: can't shutdown network, even after multiple retries"
base.ThrowError(msg, self.minor)
reconfig_time = time.time() - start_time
if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
self.minor, reconfig_time)
def AttachNet(self, multimaster):
"""Reconnects the network.
This method connects the network side of the device with a
specified multi-master flag. The device needs to be 'Standalone'
but have valid network configuration data.
@type multimaster: boolean
@param multimaster: init the network in dual-primary mode
"""
if self.minor is None:
base.ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
if None in (self._lhost, self._lport, self._rhost, self._rport):
base.ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
status = self.GetProcStatus()
if not status.is_standalone:
base.ThrowError("drbd%d: device is not standalone in AttachNet",
self.minor)
self._AssembleNet(self.minor,
(self._lhost, self._lport, self._rhost, self._rport),
dual_pri=multimaster, hmac=constants.DRBD_HMAC_ALG,
secret=self._secret)
def Attach(self, **kwargs):
"""Check if our minor is configured.
This doesn't do any device configurations - it only checks if the
minor is in a state different from Unconfigured.
Note that this function will not change the state of the system in
any way (except in case of side-effects caused by reading from
/proc).
"""
used_devs = DRBD8.GetUsedDevs()
if self._aminor in used_devs:
minor = self._aminor
else:
minor = None
self._SetFromMinor(minor)
return minor is not None
def Assemble(self):
"""Assemble the drbd.
Method:
- if we have a configured device, we try to ensure that it matches
our config
- if not, we create it from zero
- anyway, set the device parameters
"""
super(DRBD8Dev, self).Assemble()
self.Attach()
if self.minor is None:
# local device completely unconfigured
self._FastAssemble()
else:
# we have to recheck the local and network status and try to fix
# the device
self._SlowAssemble()
def _SlowAssemble(self):
"""Assembles the DRBD device from a (partially) configured device.
In case of partially attached (local device matches but no network
setup), we perform the network attach. If successful, we re-test
the attach if can return success.
"""
# TODO: Rewrite to not use a for loop just because there is 'break'
# pylint: disable=W0631
net_data = (self._lhost, self._lport, self._rhost, self._rport)
for minor in (self._aminor,):
info = self._GetShowInfo(minor)
match_l = self._MatchesLocal(info)
match_r = self._MatchesNet(info)
if match_l and match_r:
# everything matches
break
if match_l and not match_r and "local_addr" not in info:
# disk matches, but not attached to network, attach and recheck
self._AssembleNet(minor, net_data, hmac=constants.DRBD_HMAC_ALG,
secret=self._secret)
if self._MatchesNet(self._GetShowInfo(minor)):
break
else:
base.ThrowError("drbd%d: network attach successful, but 'drbdsetup"
" show' disagrees", minor)
if match_r and "local_dev" not in info:
# no local disk, but network attached and it matches
self._AssembleLocal(minor, self._children[0].dev_path,
self._children[1].dev_path, self.size)
if self._MatchesLocal(self._GetShowInfo(minor)):
break
else:
base.ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
" show' disagrees", minor)
# this case must be considered only if we actually have local
# storage, i.e. not in diskless mode, because all diskless
# devices are equal from the point of view of local
# configuration
if (match_l and "local_dev" in info and
not match_r and "local_addr" in info):
# strange case - the device network part points to somewhere
# else, even though its local storage is ours; as we own the
# drbd space, we try to disconnect from the remote peer and
# reconnect to our correct one
try:
self._ShutdownNet(minor)
except errors.BlockDeviceError, err:
base.ThrowError("drbd%d: device has correct local storage, wrong"
" remote peer and is unable to disconnect in order"
" to attach to the correct peer: %s", minor, str(err))
# note: _AssembleNet also handles the case when we don't want
# local storage (i.e. one or more of the _[lr](host|port) is
# None)
self._AssembleNet(minor, net_data, hmac=constants.DRBD_HMAC_ALG,
secret=self._secret)
if self._MatchesNet(self._GetShowInfo(minor)):
break
else:
base.ThrowError("drbd%d: network attach successful, but 'drbdsetup"
" show' disagrees", minor)
else:
minor = None
self._SetFromMinor(minor)
if minor is None:
base.ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
self._aminor)
def _FastAssemble(self):
"""Assemble the drbd device from zero.
This is run when in Assemble we detect our minor is unused.
"""
minor = self._aminor
if self._children and self._children[0] and self._children[1]:
self._AssembleLocal(minor, self._children[0].dev_path,
self._children[1].dev_path, self.size)
if self._lhost and self._lport and self._rhost and self._rport:
self._AssembleNet(minor,
(self._lhost, self._lport, self._rhost, self._rport),
hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
self._SetFromMinor(minor)
def _ShutdownLocal(self, minor):
"""Detach from the local device.
I/Os will continue to be served from the remote device. If we
don't have a remote device, this operation will fail.
@type minor: int
@param minor: the device to detach from the local device
"""
cmd = self._cmd_gen.GenDetachCmd(minor)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: can't detach local disk: %s",
minor, result.output)
def _ShutdownNet(self, minor):
"""Disconnect from the remote peer.
This fails if we don't have a local device.
@type minor: boolean
@param minor: the device to disconnect from the remote peer
"""
family = self._GetNetFamily(minor, self._lhost, self._rhost)
cmd = self._cmd_gen.GenDisconnectCmd(minor, family,
self._lhost, self._lport,
self._rhost, self._rport)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: can't shutdown network: %s",
minor, result.output)
def Shutdown(self):
"""Shutdown the DRBD device.
"""
if self.minor is None and not self.Attach():
logging.info("drbd%d: not attached during Shutdown()", self._aminor)
return
try:
DRBD8.ShutdownAll(self.minor)
finally:
self.minor = None
self.dev_path = None
def Remove(self):
"""Stub remove for DRBD devices.
"""
self.Shutdown()
def Rename(self, new_id):
"""Rename a device.
This is not supported for drbd devices.
"""
raise errors.ProgrammerError("Can't rename a drbd device")
def Grow(self, amount, dryrun, backingstore, excl_stor):
"""Resize the DRBD device and its backing storage.
See L{BlockDev.Grow} for parameter description.
"""
if self.minor is None:
base.ThrowError("drbd%d: Grow called while not attached", self._aminor)
if len(self._children) != 2 or None in self._children:
base.ThrowError("drbd%d: cannot grow diskless device", self.minor)
self._children[0].Grow(amount, dryrun, backingstore, excl_stor)
if dryrun or backingstore:
# DRBD does not support dry-run mode and is not backing storage,
# so we'll return here
return
cmd = self._cmd_gen.GenResizeCmd(self.minor, self.size + amount)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
@classmethod
def _InitMeta(cls, minor, dev_path):
"""Initialize a meta device.
This will not work if the given minor is in use.
@type minor: int
@param minor: the DRBD minor whose (future) meta device should be
initialized
@type dev_path: string
@param dev_path: path to the meta device to initialize
"""
# Zero the metadata first, in order to make sure drbdmeta doesn't
# try to auto-detect existing filesystems or similar (see
# http://code.google.com/p/ganeti/issues/detail?id=182); we only
# care about the first 128MB of data in the device, even though it
# can be bigger
result = utils.RunCmd([constants.DD_CMD,
"if=/dev/zero", "of=%s" % dev_path,
"bs=%s" % constants.DD_BLOCK_SIZE, "count=128",
"oflag=direct"])
if result.failed:
base.ThrowError("Can't wipe the meta device: %s", result.output)
info = DRBD8.GetProcInfo()
cmd_gen = DRBD8.GetCmdGenerator(info)
cmd = cmd_gen.GenInitMetaCmd(minor, dev_path)
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("Can't initialize meta device: %s", result.output)
@classmethod
def Create(cls, unique_id, children, size, spindles, params, excl_stor,
dyn_params, **kwargs):
"""Create a new DRBD8 device.
Since DRBD devices are not created per se, just assembled, this
function only initializes the metadata.
"""
if len(children) != 2:
raise errors.ProgrammerError("Invalid setup for the drbd device")
if excl_stor:
raise errors.ProgrammerError("DRBD device requested with"
" exclusive_storage")
if constants.DDP_LOCAL_MINOR not in dyn_params:
raise errors.ProgrammerError("Invalid dynamic params for drbd device %s"
% dyn_params)
# check that the minor is unused
aminor = dyn_params[constants.DDP_LOCAL_MINOR]
info = DRBD8.GetProcInfo()
if info.HasMinorStatus(aminor):
status = info.GetMinorStatus(aminor)
in_use = status.is_in_use
else:
in_use = False
if in_use:
base.ThrowError("drbd%d: minor is already in use at Create() time",
aminor)
meta = children[1]
meta.Assemble()
if not meta.Attach():
base.ThrowError("drbd%d: can't attach to meta device '%s'",
aminor, meta)
cls._CheckMetaSize(meta.dev_path)
cls._InitMeta(aminor, meta.dev_path)
return cls(unique_id, children, size, params, dyn_params)
def _CanReadDevice(path):
"""Check if we can read from the given device.
This tries to read the first 128k of the device.
@type path: string
"""
try:
utils.ReadFile(path, size=_DEVICE_READ_SIZE)
return True
except EnvironmentError:
logging.warning("Can't read from device %s", path, exc_info=True)
return False
|
NoahFlowa/glowing-spoon
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py
|
356
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
|
ortylp/scipy
|
refs/heads/master
|
scipy/misc/tests/test_pilutil.py
|
46
|
from __future__ import division, print_function, absolute_import
import os.path
import tempfile
import shutil
import numpy as np
import warnings
from numpy.testing import (assert_, assert_equal, dec, decorate_methods,
TestCase, run_module_suite, assert_allclose)
from scipy import misc
try:
import PIL.Image
except ImportError:
_have_PIL = False
else:
_have_PIL = True
# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')
datapath = os.path.dirname(__file__)
class TestPILUtil(TestCase):
def test_imresize(self):
im = np.random.random((10,20))
for T in np.sctypes['float'] + [float]:
# 1.1 rounds to below 1.1 for float16, 1.101 works
im1 = misc.imresize(im,T(1.101))
assert_equal(im1.shape,(11,22))
def test_imresize2(self):
im = np.random.random((20,30))
im2 = misc.imresize(im, (30,40), interp='bicubic')
assert_equal(im2.shape, (30,40))
def test_imresize3(self):
im = np.random.random((15,30))
im2 = misc.imresize(im, (30,60), interp='nearest')
assert_equal(im2.shape, (30,60))
def test_imresize4(self):
im = np.array([[1, 2],
[3, 4]])
# Check that resizing by target size, float and int are the same
im2 = misc.imresize(im, (4,4), mode='F') # output size
im3 = misc.imresize(im, 2., mode='F') # fraction
im4 = misc.imresize(im, 200, mode='F') # percentage
assert_equal(im2, im3)
assert_equal(im2, im4)
def test_bytescale(self):
x = np.array([0,1,2], np.uint8)
y = np.array([0,1,2])
assert_equal(misc.bytescale(x), x)
assert_equal(misc.bytescale(y), [0,127,255])
def test_bytescale_keywords(self):
x = np.array([40, 60, 120, 200, 300, 500])
res_lowhigh = misc.bytescale(x, low=10, high=143)
assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143])
res_cmincmax = misc.bytescale(x, cmin=60, cmax=300)
assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255])
assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4])
def test_imsave(self):
with warnings.catch_warnings(record=True): # PIL ResourceWarning
img = misc.imread(os.path.join(datapath, 'data', 'icon.png'))
tmpdir = tempfile.mkdtemp()
try:
fn1 = os.path.join(tmpdir, 'test.png')
fn2 = os.path.join(tmpdir, 'testimg')
with warnings.catch_warnings(record=True): # PIL ResourceWarning
misc.imsave(fn1, img)
misc.imsave(fn2, img, 'PNG')
with warnings.catch_warnings(record=True): # PIL ResourceWarning
data1 = misc.imread(fn1)
data2 = misc.imread(fn2)
assert_allclose(data1, img)
assert_allclose(data2, img)
finally:
shutil.rmtree(tmpdir)
def tst_fromimage(filename, irange):
fp = open(filename, "rb")
img = misc.fromimage(PIL.Image.open(fp))
fp.close()
imin,imax = irange
assert_(img.min() >= imin)
assert_(img.max() <= imax)
@_pilskip
def test_fromimage():
# Test generator for parametric tests
data = {'icon.png':(0,255),
'icon_mono.png':(0,2),
'icon_mono_flat.png':(0,1)}
for fn, irange in data.items():
yield tst_fromimage, os.path.join(datapath,'data',fn), irange
decorate_methods(TestPILUtil, _pilskip)
if __name__ == "__main__":
run_module_suite()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.