code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from qpid_dispatch_internal.policy.policy_util import HostAddr
from qpid_dispatch_internal.policy.policy_util import HostStruct
from qpid_dispatch_internal.policy.policy_util import PolicyError
from qpid_dispatch_internal.policy.policy_util import PolicyAppConnectionMgr
from qpid_dispatch_internal.policy.policy_local import PolicyLocal
from system_test import TestCase, main_module
class PolicyHostAddrTest(TestCase):
def expect_deny(self, badhostname, msg):
denied = False
try:
xxx = HostStruct(badhostname)
except PolicyError:
denied = True
self.assertTrue(denied, ("%s" % msg))
def check_hostaddr_match(self, tHostAddr, tString, expectOk=True):
# check that the string is a match for the addr
# check that the internal struct version matches, too
ha = HostStruct(tString)
if expectOk:
self.assertTrue( tHostAddr.match_str(tString) )
self.assertTrue( tHostAddr.match_bin(ha) )
else:
self.assertFalse( tHostAddr.match_str(tString) )
self.assertFalse( tHostAddr.match_bin(ha) )
def test_policy_hostaddr_ipv4(self):
# Create simple host and range
aaa = HostAddr("192.168.1.1")
bbb = HostAddr("1.1.1.1,1.1.1.255")
# Verify host and range
self.check_hostaddr_match(aaa, "192.168.1.1")
self.check_hostaddr_match(aaa, "1.1.1.1", False)
self.check_hostaddr_match(aaa, "192.168.1.2", False)
self.check_hostaddr_match(bbb, "1.1.1.1")
self.check_hostaddr_match(bbb, "1.1.1.254")
self.check_hostaddr_match(bbb, "1.1.1.0", False)
self.check_hostaddr_match(bbb, "1.1.2.0", False)
def test_policy_hostaddr_ipv6(self):
if not HostAddr.has_ipv6:
self.skipTest("System IPv6 support is not available")
# Create simple host and range
aaa = HostAddr("::1")
bbb = HostAddr("::1,::ffff")
ccc = HostAddr("ffff::0,ffff:ffff::0")
# Verify host and range
self.check_hostaddr_match(aaa, "::1")
self.check_hostaddr_match(aaa, "::2", False)
self.check_hostaddr_match(aaa, "ffff:ffff::0", False)
self.check_hostaddr_match(bbb, "::1")
self.check_hostaddr_match(bbb, "::fffe")
self.check_hostaddr_match(bbb, "::1:0", False)
self.check_hostaddr_match(bbb, "ffff::0", False)
self.check_hostaddr_match(ccc, "ffff::1")
self.check_hostaddr_match(ccc, "ffff:fffe:ffff:ffff::ffff")
self.check_hostaddr_match(ccc, "ffff:ffff::1", False)
self.check_hostaddr_match(ccc, "ffff:ffff:ffff:ffff::ffff", False)
def test_policy_hostaddr_ipv4_wildcard(self):
aaa = HostAddr("*")
self.check_hostaddr_match(aaa,"0.0.0.0")
self.check_hostaddr_match(aaa,"127.0.0.1")
self.check_hostaddr_match(aaa,"255.254.253.252")
def test_policy_hostaddr_ipv6_wildcard(self):
if not HostAddr.has_ipv6:
self.skipTest("System IPv6 support is not available")
aaa = HostAddr("*")
self.check_hostaddr_match(aaa,"::0")
self.check_hostaddr_match(aaa,"::1")
self.check_hostaddr_match(aaa,"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
def test_policy_malformed_hostaddr_ipv4(self):
self.expect_deny( "0.0.0.0.0", "Name or service not known")
self.expect_deny( "1.1.1.1,2.2.2.2,3.3.3.3", "arg count")
self.expect_deny( "9.9.9.9,8.8.8.8", "a > b")
def test_policy_malformed_hostaddr_ipv6(self):
if not HostAddr.has_ipv6:
self.skipTest("System IPv6 support is not available")
self.expect_deny( "1::2::3", "Name or service not known")
self.expect_deny( "::1,::2,::3", "arg count")
self.expect_deny( "0:ff:0,0:fe:ffff:ffff::0", "a > b")
class QpidDispatch(object):
def qd_dispatch_policy_c_counts_alloc(self):
return 100
def qd_dispatch_policy_c_counts_refresh(self, cstats, entitymap):
pass
class MockAgent(object):
def __init__(self):
self.qd = QpidDispatch()
def add_implementation(self, entity, cfg_obj_name):
pass
class MockPolicyManager(object):
def __init__(self):
self.agent = MockAgent()
def log_debug(self, text):
print("DEBUG: %s" % text)
def log_info(self, text):
print("INFO: %s" % text)
def log_trace(self, text):
print("TRACE: %s" % text)
def log_error(self, text):
print("ERROR: %s" % text)
def get_agent(self):
return self.agent
class PolicyFile(TestCase):
manager = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_policy1_test_zeke_ok(self):
p1 = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 1)
self.assertTrue(p1 == 'test')
upolicy = {}
self.assertTrue(
PolicyFile.policy.lookup_settings('photoserver', p1, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'private')
self.assertTrue(upolicy['sources'] == 'private')
def test_policy1_test_zeke_bad_IP(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '10.18.0.1', 'photoserver', "connid", 2) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '72.135.2.9', 'photoserver', "connid", 3) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '127.0.0.1', 'photoserver', "connid", 4) == '')
def test_policy1_test_zeke_bad_app(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '192.168.100.5','galleria', "connid", 5) == '')
def test_policy1_test_users_same_permissions(self):
zname = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 6)
yname = PolicyFile.policy.lookup_user('ynot', '10.48.255.254', 'photoserver', '192.168.100.5:33334', 7)
self.assertTrue( zname == yname )
def test_policy1_lookup_unknown_application(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('unknown', 'doesntmatter', upolicy)
)
def test_policy1_lookup_unknown_usergroup(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('photoserver', 'unknown', upolicy)
)
class PolicyFileApplicationFallback(TestCase):
manager = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_bad_app_fallback(self):
# Show that with no fallback the user cannot connect
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
# Enable the fallback defaultVhost and show the same user can now connect
self.policy.set_default_vhost('photoserver')
settingsname = self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5)
self.assertTrue(settingsname == 'test')
# Show that the fallback settings are returned
upolicy = {}
self.assertTrue(
self.policy.lookup_settings('phony*app*name', settingsname, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'private')
self.assertTrue(upolicy['sources'] == 'private')
# Disable fallback and show failure again
self.policy.set_default_vhost('')
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
class PolicyAppConnectionMgrTests(TestCase):
def test_policy_app_conn_mgr_fail_by_total(self):
stats = PolicyAppConnectionMgr(1, 2, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('application connection limit' in diags[0])
def test_policy_app_conn_mgr_fail_by_user(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per user' in diags[0])
def test_policy_app_conn_mgr_fail_by_hosts(self):
stats = PolicyAppConnectionMgr(3, 2, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per host' in diags[0])
def test_policy_app_conn_mgr_fail_by_user_hosts(self):
stats = PolicyAppConnectionMgr(3, 1, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 2)
self.assertTrue('per user' in diags[0] or 'per user' in diags[1])
self.assertTrue('per host' in diags[0] or 'per host' in diags[1])
def test_policy_app_conn_mgr_update(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per user' in diags[0])
diags = []
stats.update(3, 2, 2)
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
def test_policy_app_conn_mgr_disconnect(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
self.assertTrue(len(diags) == 1)
self.assertTrue('per user' in diags[0])
diags = []
stats.disconnect("10.10.10.10:10000", 'chuck', '10.10.10.10')
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags))
def test_policy_app_conn_mgr_create_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(-3, 1, 2)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_update_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(0, 0, 0)
except PolicyError:
denied = True
self.assertFalse(denied, "Should allow all zeros.")
try:
stats.update(0, -1, 0)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_larger_counts(self):
stats = PolicyAppConnectionMgr(10000, 10000, 10000)
diags = []
for i in range(0, 10000):
self.assertTrue(stats.can_connect('1.1.1.1:' + str(i), 'chuck', '1.1.1.1', diags))
self.assertTrue(len(diags) == 0)
self.assertFalse(stats.can_connect('1.1.1.1:10000', 'chuck', '1.1.1.1', diags))
self.assertTrue(len(diags) == 3)
self.assertTrue(stats.connections_active == 10000)
self.assertTrue(stats.connections_approved == 10000)
self.assertTrue(stats.connections_denied == 1)
if __name__ == '__main__':
unittest.main(main_module())
| dskarbek/qpid-dispatch | tests/router_policy_test.py | Python | apache-2.0 | 13,738 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications: models with automatic loading of pre-trained weights.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras._impl.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras._impl.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras._impl.keras.applications.resnet50 import ResNet50
from tensorflow.python.keras._impl.keras.applications.vgg16 import VGG16
from tensorflow.python.keras._impl.keras.applications.vgg19 import VGG19
from tensorflow.python.keras._impl.keras.applications.xception import Xception
| eadgarchen/tensorflow | tensorflow/python/keras/_impl/keras/applications/__init__.py | Python | apache-2.0 | 1,450 |
#
# Copyright (C) 2018 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 3 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""URL config for business tool"""
from django.conf.urls import url
from nav.web.business import views
urlpatterns = [
url(r'^$', views.BusinessView.as_view(),
name='business-index'),
url('^device_availability/$', views.DeviceAvailabilityReport.as_view(),
name='business-report-device-availability'),
url('^link_availability/$', views.LinkAvailabilityReport.as_view(),
name='business-report-link-availability'),
url('^save_report_subscription', views.save_report_subscription,
name='save-report-subscription'),
url('^render_report_subscriptions', views.render_report_subscriptions,
name='render-report-subscriptions'),
url('^remove_report_subscription', views.remove_report_subscription,
name='remove-report-subscription')
]
| UNINETT/nav | python/nav/web/business/urls.py | Python | gpl-2.0 | 1,451 |
#
# Sphinx documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("../src"))
from setup import project as meta
from confluencer import __main__
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon',
'sphinx.ext.intersphinx', 'sphinx_click.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = meta["name"]
copyright = ' '.join([i for i in meta["long_description"].splitlines() if "Copyright" in i][0].split()[2:])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = meta["version"]
# The short X.Y version.
version = '.'.join(re.split("[^\d]+", release)[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"*~",
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Napoleon settings
napoleon_numpy_docstring = False
# -- Options for HTML output ---------------------------------------------------
if not on_rtd:
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_style = 'css/custom.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = dict(
)
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/img/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/img/logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', project + '.tex', project + u' Documentation', meta["author"], 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', project, project + u' Documentation', [meta["author"]], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', project, project + u' Documentation', meta["author"], project, meta["description"], 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 1and1/confluencer | docs/conf.py | Python | apache-2.0 | 8,358 |
#
# Copyright (c) 2014-2015 The developers of Aqualid project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .aql_option_types import *
from .aql_option_value import *
from .aql_options import *
from .aql_builtin_options import *
| aqualid/aqualid | aql/options/__init__.py | Python | mit | 1,247 |
"""Support for Z-Wave sensors."""
from homeassistant.components.sensor import DEVICE_CLASS_BATTERY, DOMAIN, SensorEntity
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ZWaveDeviceEntity, const
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Sensor from Config Entry."""
@callback
def async_add_sensor(sensor):
"""Add Z-Wave Sensor."""
async_add_entities([sensor])
async_dispatcher_connect(hass, "zwave_new_sensor", async_add_sensor)
def get_device(node, values, **kwargs):
"""Create Z-Wave entity device."""
# Generic Device mappings
if values.primary.command_class == const.COMMAND_CLASS_BATTERY:
return ZWaveBatterySensor(values)
if node.has_command_class(const.COMMAND_CLASS_SENSOR_MULTILEVEL):
return ZWaveMultilevelSensor(values)
if (
node.has_command_class(const.COMMAND_CLASS_METER)
and values.primary.type == const.TYPE_DECIMAL
):
return ZWaveMultilevelSensor(values)
if node.has_command_class(const.COMMAND_CLASS_ALARM) or node.has_command_class(
const.COMMAND_CLASS_SENSOR_ALARM
):
return ZWaveAlarmSensor(values)
return None
class ZWaveSensor(ZWaveDeviceEntity, SensorEntity):
"""Representation of a Z-Wave sensor."""
def __init__(self, values):
"""Initialize the sensor."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self.update_properties()
def update_properties(self):
"""Handle the data changes for node values."""
self._state = self.values.primary.data
self._units = self.values.primary.units
@property
def force_update(self):
"""Return force_update."""
return True
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement the value is expressed in."""
return self._units
class ZWaveMultilevelSensor(ZWaveSensor):
"""Representation of a multi level sensor Z-Wave sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._units in ("C", "F"):
return round(self._state, 1)
if isinstance(self._state, float):
return round(self._state, 2)
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._units == "C":
return TEMP_CELSIUS
if self._units == "F":
return TEMP_FAHRENHEIT
return self._units
class ZWaveAlarmSensor(ZWaveSensor):
"""Representation of a Z-Wave sensor that sends Alarm alerts.
Examples include certain Multisensors that have motion and vibration
capabilities. Z-Wave defines various alarm types such as Smoke, Flood,
Burglar, CarbonMonoxide, etc.
This wraps these alarms and allows you to use them to trigger things, etc.
COMMAND_CLASS_ALARM is what we get here.
"""
class ZWaveBatterySensor(ZWaveSensor):
"""Representation of Z-Wave device battery level."""
@property
def device_class(self):
"""Return the class of this device."""
return DEVICE_CLASS_BATTERY
| w1ll1am23/home-assistant | homeassistant/components/zwave/sensor.py | Python | apache-2.0 | 3,433 |
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
import sys
class EncProtocolEntity(ProtocolEntity):
TYPE_PKMSG = "pkmsg"
TYPE_MSG = "msg"
TYPE_SKMSG = "skmsg"
TYPES = (TYPE_PKMSG, TYPE_MSG, TYPE_SKMSG)
def __init__(self, type, version, data, mediaType = None, jid = None):
assert type in self.__class__.TYPES, "Unknown message enc type %s" % type
super(EncProtocolEntity, self).__init__("enc")
self.type = type
self.version = int(version)
self.data = data
self.mediaType = mediaType
self.jid = jid
def getType(self):
return self.type
def getVersion(self):
return self.version
def getData(self):
return self.data
def getMediaType(self):
return self.mediaType
def getJid(self):
return self.jid
def toProtocolTreeNode(self):
attribs = {"type": self.type, "v": str(self.version)}
if self.mediaType:
attribs["mediatype"] = self.mediaType
encNode = ProtocolTreeNode("enc", attribs, data = self.data)
if self.jid:
return ProtocolTreeNode("to", {"jid": self.jid}, [encNode])
return encNode
@staticmethod
def fromProtocolTreeNode(node):
return EncProtocolEntity(node["type"], node["v"], node.data, node["mediatype"])
| tgalal/yowsup | yowsup/layers/axolotl/protocolentities/enc.py | Python | gpl-3.0 | 1,354 |
# -*- coding: utf-8 -*-
# Copyright: See the LICENSE file.
"""Tests for factory_boy/SQLAlchemy interactions."""
import factory
from .compat import unittest
from .compat import mock
import warnings
from factory.alchemy import SQLAlchemyModelFactory
from .alchemyapp import models
class StandardFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: n)
foo = factory.Sequence(lambda n: 'foo%d' % n)
class NonIntegerPkFactory(SQLAlchemyModelFactory):
class Meta:
model = models.NonIntegerPk
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: 'foo%d' % n)
class NoSessionFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = None
id = factory.Sequence(lambda n: n)
class SQLAlchemyPkSequenceTestCase(unittest.TestCase):
def setUp(self):
super(SQLAlchemyPkSequenceTestCase, self).setUp()
StandardFactory.reset_sequence(1)
NonIntegerPkFactory._meta.sqlalchemy_session.rollback()
def test_pk_first(self):
std = StandardFactory.build()
self.assertEqual('foo1', std.foo)
def test_pk_many(self):
std1 = StandardFactory.build()
std2 = StandardFactory.build()
self.assertEqual('foo1', std1.foo)
self.assertEqual('foo2', std2.foo)
def test_pk_creation(self):
std1 = StandardFactory.create()
self.assertEqual('foo1', std1.foo)
self.assertEqual(1, std1.id)
StandardFactory.reset_sequence()
std2 = StandardFactory.create()
self.assertEqual('foo0', std2.foo)
self.assertEqual(0, std2.id)
def test_pk_force_value(self):
std1 = StandardFactory.create(id=10)
self.assertEqual('foo1', std1.foo) # sequence and pk are unrelated
self.assertEqual(10, std1.id)
StandardFactory.reset_sequence()
std2 = StandardFactory.create()
self.assertEqual('foo0', std2.foo) # Sequence doesn't care about pk
self.assertEqual(0, std2.id)
class SQLAlchemySessionPersistenceTestCase(unittest.TestCase):
def setUp(self):
super(SQLAlchemySessionPersistenceTestCase, self).setUp()
self.mock_session = mock.NonCallableMagicMock(spec=models.session)
def test_flushing(self):
class FlushingPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = 'flush'
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
FlushingPersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_called_once_with()
def test_committing(self):
class CommittingPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = 'commit'
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
CommittingPersistenceFactory.create()
self.mock_session.commit.assert_called_once_with()
self.mock_session.flush.assert_not_called()
def test_noflush_nocommit(self):
class InactivePersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = None
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
InactivePersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
def test_type_error(self):
with self.assertRaises(TypeError):
class BadPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session_persistence = 'invalid_persistence_option'
model = models.StandardModel
def test_force_flush_deprecation(self):
with warnings.catch_warnings(record=True) as warning_list:
class OutdatedPersistenceFactory(StandardFactory):
class Meta:
force_flush = True
sqlalchemy_session = self.mock_session
# There should be *1* DeprecationWarning
self.assertEqual(len(warning_list), 1)
warning = warning_list[0]
self.assertTrue(issubclass(warning.category, DeprecationWarning))
# The warning text should point to the class declaration.
text = warnings.formatwarning(warning.message, warning.category, warning.filename, warning.lineno)
self.assertIn('test_alchemy.py', text)
self.assertIn('class OutdatedPersistenceFactory', text)
# However, we shall keep the old-style behavior.
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
OutdatedPersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_called_once_with()
class SQLAlchemyNonIntegerPkTestCase(unittest.TestCase):
def setUp(self):
super(SQLAlchemyNonIntegerPkTestCase, self).setUp()
NonIntegerPkFactory.reset_sequence()
NonIntegerPkFactory._meta.sqlalchemy_session.rollback()
def test_first(self):
nonint = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint.id)
def test_many(self):
nonint1 = NonIntegerPkFactory.build()
nonint2 = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint1.id)
self.assertEqual('foo1', nonint2.id)
def test_creation(self):
nonint1 = NonIntegerPkFactory.create()
self.assertEqual('foo0', nonint1.id)
NonIntegerPkFactory.reset_sequence()
nonint2 = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint2.id)
def test_force_pk(self):
nonint1 = NonIntegerPkFactory.create(id='foo10')
self.assertEqual('foo10', nonint1.id)
NonIntegerPkFactory.reset_sequence()
nonint2 = NonIntegerPkFactory.create()
self.assertEqual('foo0', nonint2.id)
class SQLAlchemyNoSessionTestCase(unittest.TestCase):
def test_create_raises_exception_when_no_session_was_set(self):
with self.assertRaises(RuntimeError):
NoSessionFactory.create()
def test_build_does_not_raises_exception_when_no_session_was_set(self):
inst0 = NoSessionFactory.build()
inst1 = NoSessionFactory.build()
self.assertEqual(inst0.id, 0)
self.assertEqual(inst1.id, 1)
| rrauenza/factory_boy | tests/test_alchemy.py | Python | mit | 6,790 |
from datetime import timedelta as td
import signal
import time
from threading import Thread
from django.core.management.base import BaseCommand
from django.utils import timezone
from hc.api.models import Check, Flip
from statsd.defaults.env import statsd
SENDING_TMPL = "Sending alert, status=%s, code=%s\n"
SEND_TIME_TMPL = "Sending took %.1fs, code=%s\n"
def notify(flip_id, stdout):
flip = Flip.objects.get(id=flip_id)
check = flip.owner
# Set the historic status here but *don't save it*.
# It would be nicer to pass the status explicitly, as a separate parameter.
check.status = flip.new_status
# And just to make sure it doesn't get saved by a future coding accident:
setattr(check, "save", None)
stdout.write(SENDING_TMPL % (flip.new_status, check.code))
# Set or clear dates for followup nags
check.project.update_next_nag_dates()
# Send notifications
send_start = timezone.now()
for ch, error, secs in flip.send_alerts():
label = "OK"
if error:
label = "ERROR"
elif secs > 5:
label = "SLOW"
s = " * %-5s %4.1fs %-10s %s %s\n" % (label, secs, ch.kind, ch.code, error)
stdout.write(s)
send_time = timezone.now() - send_start
stdout.write(SEND_TIME_TMPL % (send_time.total_seconds(), check.code))
statsd.timing("hc.sendalerts.dwellTime", send_start - flip.created)
statsd.timing("hc.sendalerts.sendTime", send_time)
def notify_on_thread(flip_id, stdout):
t = Thread(target=notify, args=(flip_id, stdout))
t.start()
class Command(BaseCommand):
help = "Sends UP/DOWN email alerts"
def add_arguments(self, parser):
parser.add_argument(
"--no-loop",
action="store_false",
dest="loop",
default=True,
help="Do not keep running indefinitely in a 2 second wait loop",
)
parser.add_argument(
"--no-threads",
action="store_false",
dest="use_threads",
default=False,
help="Send alerts synchronously, without using threads",
)
def process_one_flip(self, use_threads=True):
""" Find unprocessed flip, send notifications. """
# Order by processed, otherwise Django will automatically order by id
# and make the query less efficient
q = Flip.objects.filter(processed=None).order_by("processed")
flip = q.first()
if flip is None:
return False
q = Flip.objects.filter(id=flip.id, processed=None)
num_updated = q.update(processed=timezone.now())
if num_updated != 1:
# Nothing got updated: another worker process got there first.
return True
if use_threads:
notify_on_thread(flip.id, self.stdout)
else:
notify(flip.id, self.stdout)
return True
def handle_going_down(self):
""" Process a single check going down. """
now = timezone.now()
q = Check.objects.filter(alert_after__lt=now).exclude(status="down")
# Sort by alert_after, to avoid unnecessary sorting by id:
check = q.order_by("alert_after").first()
if check is None:
return False
old_status = check.status
q = Check.objects.filter(id=check.id, status=old_status)
try:
status = check.get_status()
except Exception as e:
# Make sure we don't trip on this check again for an hour:
# Otherwise sendalerts may end up in a crash loop.
q.update(alert_after=now + td(hours=1))
# Then re-raise the exception:
raise e
if status != "down":
# It is not down yet. Update alert_after
q.update(alert_after=check.going_down_after())
return True
# Atomically update status
flip_time = check.going_down_after()
num_updated = q.update(alert_after=None, status="down")
if num_updated != 1:
# Nothing got updated: another worker process got there first.
return True
flip = Flip(owner=check)
flip.created = flip_time
flip.old_status = old_status
flip.new_status = "down"
flip.save()
return True
def on_sigterm(self, *args):
self.stdout.write("Received SIGTERM, finishing...\n")
self.sigterm = True
def handle(self, use_threads=True, loop=True, *args, **options):
self.sigterm = False
signal.signal(signal.SIGTERM, self.on_sigterm)
self.stdout.write("sendalerts is now running\n")
i, sent = 0, 0
while not self.sigterm:
# Create flips for any checks going down
while not self.sigterm and self.handle_going_down():
pass
# Process the unprocessed flips
while not self.sigterm and self.process_one_flip(use_threads):
sent += 1
if not loop:
break
# Sleep for 2 seconds before looking for more work
if not self.sigterm:
i += 2
time.sleep(2)
# Print "-- MARK --" approx. every minute so the logs
# have evidence sendalerts is still running:
if i % 60 == 0:
timestamp = timezone.now().isoformat()
self.stdout.write("-- MARK %s --\n" % timestamp)
return f"Sent {sent} alert(s)."
| healthchecks/healthchecks | hc/api/management/commands/sendalerts.py | Python | bsd-3-clause | 5,516 |
# Author: Hubert Kario, (c) 2019
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Test for ECDSA support in Certificate Verify"""
from __future__ import print_function
import traceback
import sys
import getopt
from itertools import chain, islice
from random import sample
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, \
CertificateGenerator, CertificateVerifyGenerator, \
AlertGenerator, TCPBufferingEnable, TCPBufferingDisable, \
TCPBufferingFlush
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectCertificateRequest, \
ExpectApplicationData, ExpectServerKeyExchange
from tlslite.extensions import SignatureAlgorithmsExtension, \
SignatureAlgorithmsCertExtension, SupportedGroupsExtension, \
ECPointFormatsExtension
from tlslite.constants import CipherSuite, AlertDescription, \
HashAlgorithm, SignatureAlgorithm, ExtensionType, GroupName, \
ECPointFormat, AlertLevel, AlertDescription
from tlslite.utils.keyfactory import parsePEMKey
from tlslite.x509 import X509
from tlslite.x509certchain import X509CertChain
from tlsfuzzer.utils.lists import natural_sort_keys
from tlsfuzzer.helpers import RSA_SIG_ALL, ECDSA_SIG_ALL
version = 4
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -x probe-name expect the probe to fail. When such probe passes despite being marked like this")
print(" it will be reported in the test summary and the whole script will fail.")
print(" May be specified multiple times.")
print(" -X message expect the `message` substring in exception raised during")
print(" execution of preceding expected failure probe")
print(" usage: [-x probe-name] [-X exception], order is compulsory!")
print(" -n num run 'num' or all(if 0) tests instead of default(10)")
print(" (excluding \"sanity\" tests)")
print(" -k file.pem file with private key for client")
print(" -c file.pem file with certificate for client")
print(" --help this message")
def main():
"""check if obsolete signature algorithm is rejected by server"""
host = "localhost"
port = 4433
num_limit = 10
run_exclude = set()
expected_failures = {}
last_exp_tmp = None
private_key = None
cert = None
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:x:X:n:k:c:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-x':
expected_failures[arg] = None
last_exp_tmp = str(arg)
elif opt == '-X':
if not last_exp_tmp:
raise ValueError("-x has to be specified before -X")
expected_failures[last_exp_tmp] = str(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
elif opt == '-k':
text_key = open(arg, 'rb').read()
if sys.version_info[0] >= 3:
text_key = str(text_key, 'utf-8')
private_key = parsePEMKey(text_key, private=True)
elif opt == '-c':
text_cert = open(arg, 'rb').read()
if sys.version_info[0] >= 3:
text_cert = str(text_cert, 'utf-8')
cert = X509()
cert.parse(text_cert)
else:
raise ValueError("Unknown option: {0}".format(opt))
if not private_key:
raise ValueError("Specify private key file using -k")
if not cert:
raise ValueError("Specify certificate file using -c")
if args:
run_only = set(args)
else:
run_only = None
conversations = {}
# sanity check for Client Certificates
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create(ECDSA_SIG_ALL + RSA_SIG_ALL),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(ECDSA_SIG_ALL + RSA_SIG_ALL),
ExtensionType.supported_groups :
SupportedGroupsExtension().create([
GroupName.secp256r1, GroupName.secp384r1, GroupName.secp521r1]),
ExtensionType.ec_point_formats :
ECPointFormatsExtension().create([ECPointFormat.uncompressed])}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectCertificateRequest())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator(X509CertChain([cert])))
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(CertificateVerifyGenerator(private_key))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(b"GET / HTTP/1.0\n\n"))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertDescription.close_notify))
node = node.add_child(ExpectClose())
node.next_sibling = ExpectAlert()
node.next_sibling.add_child(ExpectClose())
conversations["sanity"] = conversation
# force MD5 signature on CertificateVerify
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create(ECDSA_SIG_ALL + RSA_SIG_ALL),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(ECDSA_SIG_ALL + RSA_SIG_ALL),
ExtensionType.supported_groups :
SupportedGroupsExtension().create([
GroupName.secp256r1, GroupName.secp384r1, GroupName.secp521r1]),
ExtensionType.ec_point_formats :
ECPointFormatsExtension().create([ECPointFormat.uncompressed])}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectCertificateRequest())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator(X509CertChain([cert])))
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(TCPBufferingEnable())
sig_type = (HashAlgorithm.md5, SignatureAlgorithm.ecdsa)
node = node.add_child(CertificateVerifyGenerator(private_key,
msg_alg=sig_type
))
# the other side can close connection right away, add options to handle it
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
# we expect closure or Alert and then closure of socket
node = node.add_child(ExpectAlert(AlertLevel.fatal,
AlertDescription.illegal_parameter))
node.add_child(ExpectClose())
conversations["md5+ecdsa forced"] = conversation
for h_alg in ["sha512", "sha384", "sha256", "sha224", "sha1"]:
for real_h_alg in ["sha512", "sha384", "sha256", "sha224", "sha1"]:
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
ext = {ExtensionType.signature_algorithms :
SignatureAlgorithmsExtension().create(ECDSA_SIG_ALL +
RSA_SIG_ALL),
ExtensionType.signature_algorithms_cert :
SignatureAlgorithmsCertExtension().create(ECDSA_SIG_ALL +
RSA_SIG_ALL),
ExtensionType.supported_groups :
SupportedGroupsExtension().create([
GroupName.secp256r1, GroupName.secp384r1,
GroupName.secp521r1]),
ExtensionType.ec_point_formats :
ECPointFormatsExtension().create([
ECPointFormat.uncompressed])}
node = node.add_child(ClientHelloGenerator(ciphers,
extensions=ext))
node = node.add_child(ExpectServerHello(version=(3, 3)))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectCertificateRequest())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(CertificateGenerator(X509CertChain([cert])))
node = node.add_child(ClientKeyExchangeGenerator())
alg = (getattr(HashAlgorithm, h_alg), SignatureAlgorithm.ecdsa)
real_alg = (getattr(HashAlgorithm, real_h_alg),
SignatureAlgorithm.ecdsa)
if alg == real_alg:
node = node.add_child(CertificateVerifyGenerator(
private_key,
msg_alg=alg,
sig_alg=real_alg))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(
b"GET / HTTP/1.0\n\n"))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(
AlertDescription.close_notify))
node = node.add_child(ExpectClose())
node.next_sibling = ExpectAlert()
node.next_sibling.add_child(ExpectClose())
conversations["make {0}+ecdsa signature in CertificateVerify"
.format(h_alg)] = conversation
else:
node = node.add_child(TCPBufferingEnable())
node = node.add_child(CertificateVerifyGenerator(
private_key,
msg_alg=alg,
sig_alg=real_alg))
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(TCPBufferingDisable())
node = node.add_child(TCPBufferingFlush())
node = node.add_child(ExpectAlert(
AlertLevel.fatal,
AlertDescription.decrypt_error))
node = node.add_child(ExpectClose())
conversations["make {0}+ecdsa signature, advertise it as "
"{1}+ecdsa in CertificateVerify"
.format(h_alg, real_h_alg)] = conversation
# run the conversation
good = 0
bad = 0
xfail = 0
xpass = 0
failed = []
xpassed = []
if not num_limit:
num_limit = len(conversations)
# make sure that sanity test is run first and last
# to verify that server was running and kept running throught
sanity_tests = [('sanity', conversations['sanity'])]
if run_only:
if num_limit > len(run_only):
num_limit = len(run_only)
regular_tests = [(k, v) for k, v in conversations.items() if k in run_only]
else:
regular_tests = [(k, v) for k, v in conversations.items() if
(k != 'sanity') and k not in run_exclude]
sampled_tests = sample(regular_tests, min(num_limit, len(regular_tests)))
ordered_tests = chain(sanity_tests, sampled_tests, sanity_tests)
for c_name, c_test in ordered_tests:
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
exception = None
try:
runner.run()
except Exception as exp:
exception = exp
print("Error while processing")
print(traceback.format_exc())
res = False
if c_name in expected_failures:
if res:
xpass += 1
xpassed.append(c_name)
print("XPASS-expected failure but test passed\n")
else:
if expected_failures[c_name] is not None and \
expected_failures[c_name] not in str(exception):
bad += 1
failed.append(c_name)
print("Expected error message: {0}\n"
.format(expected_failures[c_name]))
else:
xfail += 1
print("OK-expected failure\n")
else:
if res:
good += 1
print("OK\n")
else:
bad += 1
failed.append(c_name)
print("Test support for ECDSA signatures in CertificateVerify\n")
print("Test end")
print(20 * '=')
print("version: {0}".format(version))
print(20 * '=')
print("TOTAL: {0}".format(len(sampled_tests) + 2*len(sanity_tests)))
print("SKIP: {0}".format(len(run_exclude.intersection(conversations.keys()))))
print("PASS: {0}".format(good))
print("XFAIL: {0}".format(xfail))
print("FAIL: {0}".format(bad))
print("XPASS: {0}".format(xpass))
print(20 * '=')
sort = sorted(xpassed ,key=natural_sort_keys)
if len(sort):
print("XPASSED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
sort = sorted(failed, key=natural_sort_keys)
if len(sort):
print("FAILED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
if bad or xpass:
sys.exit(1)
if __name__ == "__main__":
main()
| tomato42/tlsfuzzer | scripts/test-ecdsa-in-certificate-verify.py | Python | gpl-2.0 | 15,620 |
# import urllib2
# fin = open('rawcsv/stockdata/stockdata.csv')
# fin.read()
#clean and merge data in one csv
from os import listdir
from os.path import isfile, join
mypath = './USstockHistory167Mb/'
onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) and f[-3:] == 'csv' ]
print onlyfiles
ticker = 'blh'
foutfinal = open('./USstockHistory167Mb_output_all/stockdata.csv', 'w')
foutmeta = open('./USstockHistory167Mb_output_all/stockdatameta.csv', 'w')
def firstheader(line):
global ticker
start = line.index('[') + 1
end = line.index(']')
ticker = line[start:end]
foutmeta.write(line) # get the stock ticker and description
foutfinal.write('stockdata_84_06_eod' + '\n')
def secondheader(line):
csvlist = line.replace('# ', '').split(' ')
csvlist.insert(0, 'TICKER')
csvlist.insert(3, 'CLOSE')
csvline = ','.join(csvlist)
foutfinal.write(csvline)
foutmeta.write(csvline) # get column names
foutfinal.write('VARCHAR(10),DATE')
for i in xrange(len(csvlist) - 2):
foutfinal.write(',FLOAT4')
foutfinal.write('\n')
def findTicker(line):
global ticker
start = line.index('[') + 1
end = line.index(']')
ticker = line[start:end]
for index,f in enumerate(onlyfiles):
fin = open(mypath + onlyfiles[index], 'r')
if index == 0:
firstheader(fin.readline())
secondheader(fin.readline())
else:
findTicker(fin.readline())
fin.next()
for line in fin:
foutfinal.write(ticker + ',' + line)
fin.close()
| shanshanzhu/Data-Scrappers | USstock/stockcleanerOneFile.py | Python | mit | 1,444 |
import os
import ConfigParser
import snapbill
global currentConnection
currentConnection = None
def setConnection(connection):
global currentConnection
currentConnection = connection
def ensureConnection(connection):
'Ensure an api connection (use current if available)'
# If a connection was provided, use than one
if connection:
return connection
# If there was a connection already open, use that
elif currentConnection:
return currentConnection
# Finally try create a new connection
else:
return snapbill.Connection()
def fetchPassword(username=None):
paths = [
os.path.expanduser('~/.snapbill.cfg'),
".snapbill.cfg",
]
config = ConfigParser.RawConfigParser()
for path in paths: config.read(path)
section = username if username else 'default'
if username is None:
username = config.get(section, 'username')
password = config.get(section, 'password')
return (username, password)
def classname(name):
"""
Converts an api object name into its associate class
Capitilises the first letter, and each letter after an underscore
"""
return '_'.join([x[:1].upper() + x[1:].lower() for x in name.split('_')])
class Error(Exception):
def __init__(self, message, errors):
self.message = message
self.errors = errors
def __str__(self):
s = self.message
for (key, message) in self.errors.items():
if key: message = "[%s]: %s" % (key, message)
s+= "\n - %s" % message
return s
| snapbill/snapbill-pyapi | snapbill/util.py | Python | mit | 1,489 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from flask import send_from_directory
from pkg_resources import get_distribution
from netman.api.api_utils import to_response
from netman.api.objects import info
class NetmanApi(object):
def __init__(self, switch_factory=None, get_distribution_callback=get_distribution):
self.switch_factory = switch_factory
self.app = None
self.get_distribution = get_distribution_callback
@property
def logger(self):
return logging.getLogger(__name__)
def hook_to(self, server):
self.app = server
server.add_url_rule('/netman/info',endpoint="netman_info",view_func=self.get_info, methods=['GET'])
server.add_url_rule('/netman/apidocs/', endpoint="netman_apidocs" ,view_func=self.api_docs, methods=['GET'])
server.add_url_rule('/netman/apidocs/<path:filename>', endpoint="netman_apidocs" ,view_func=self.api_docs, methods=['GET'])
@to_response
def get_info(self):
"""
Informations about the current deployment and state and generates a log entry on the netman.api logger \
that says : ``/info requested this is a logging test``
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_info.json
:language: json
"""
logging.getLogger("netman.api").info("/info requested this is a logging test")
return 200, info.to_api(
status='running',
version=self.get_distribution('netman').version,
lock_provider=_class_fqdn(self.switch_factory.lock_factory)
)
def api_docs(self, filename=None):
"""
Shows this documentation
"""
return send_from_directory(os.path.dirname(__file__) + "/doc/html/", filename or "index.html")
def _class_fqdn(obj):
return "{}.{}".format(obj.__module__, obj.__class__.__name__)
| idjaw/netman | netman/api/netman_api.py | Python | apache-2.0 | 2,474 |
import os
from shutil import copyfile
baseFolder = '/home/sangram/Desktop/Challenge/train'
source = baseFolder + 'test1'
for fileName in os.listdir(source):
category = fileName.split('_')[2]
destination = baseFolder + 'val' #+ category
copyfile(source + '/' + fileName, destination + '/' + category + '.' + fileName.split('_')[1] + '.jpg')
| hellosangram/imageclassifiercaffe | deeplearning-medical-images/code/createData.py | Python | gpl-3.0 | 355 |
from linux_story.common import get_story_file
shelves = {
"name": "shelves",
"children":
[
{
"name": "redwall",
"contents": get_story_file("redwall")
},
{
"name": "watership-down",
"contents": get_story_file("watership-down")
},
{
"name": "alice-in-wonderland",
"contents": get_story_file("alice-in-wonderland")
},
{
"name": "comic-book",
"contents": get_story_file("comic-book")
},
{
"name": "note",
"contents": get_story_file("note_my-room")
}
]
}
wardrobe = {
"name": "wardrobe",
"children": [
{
"name": "cap",
"contents": get_story_file("cap")
},
{
"name": "dress",
"contents": get_story_file("dress")
},
{
"name": "jumper",
"contents": get_story_file("jumper")
},
{
"name": "shirt",
"contents": get_story_file("shirt")
},
{
"name": "skirt",
"contents": get_story_file("skirt")
},
{
"name": "t-shirt",
"contents": get_story_file("t-shirt")
},
{
"name": "trousers",
"contents": get_story_file("trousers")
}
]
}
chest = {
"name": ".chest",
"challenges": [
{
"challenge": 15,
"step": 1
}
],
"children": [
{
"name": "CAT",
"contents": get_story_file("CAT")
},
{
"name": "CD",
"contents": get_story_file("CD")
},
{
"name": "LS",
"contents": get_story_file("LS")
},
{
"name": ".note",
"contents": get_story_file(".note")
}
]
}
my_room = {
"name": "my-room",
"children": [
{
"name": "alarm",
"contents": get_story_file("alarm")
},
{
"name": "chair",
"contents": get_story_file("chair")
},
{
"name": "computer",
"contents": get_story_file("computer")
},
{
"name": "desk",
"contents": get_story_file("desk")
},
{
"name": "bed",
"contents": get_story_file("bed_my-room")
},
shelves,
wardrobe,
chest
]
} | KanoComputing/terminal-quest | linux_story/story/trees/my_room.py | Python | gpl-2.0 | 2,652 |
"""A class to store tables.
Sample Usage:
table = SgTable()
table.Append([1, 2, 3])
table.Append([2, 4, 6])
table.Append([3, 6, 9])
for row in table:
print(row)
print(table[1])
table[1] = [2, 2, 2]
print(table[1])
table.SetFields(["a", "b", "c"])
print(table.GetVals("a"))
print(table.GetVals("b"))
print(table.GetVals("c"))
print(table[1:])
print(table[:2])
print(table[0:2:2])
"""
import itertools
class EscapeHtml:
MAPPING = {u"&": u"&",
u"<": u"<",
u">": u">",
u"\"": u""",
u"\'": u"'",
u"\n": u"<br>\n"}
@classmethod
def Escape(cls, ch):
return cls.MAPPING[ch] if cls.MAPPING.has_key(ch) else ch
@classmethod
def EscapeUnicodeStr(cls, unicode_str):
ret = u""
for ch in unicode_str:
ret += cls.Escape(ch)
return ret
class SgTable:
"""A class to store tables."""
def __init__(self):
self._fields = []
self._table = []
def __len__(self):
return len(self._table)
def __iter__(self):
for row in self._table:
yield row
def __getitem__(self, key):
if isinstance(key, slice):
return self._table[key.start:key.stop:key.step]
else:
if not ((type(key) == int or type(key) == long) and key >= 0 and key < len(self._table)):
raise ValueError("Index illegal")
else:
return self._table[key]
def __setitem__(self, key, value):
if not ((type(key) == int or type(key) == long) and key >= 0 and key < len(self._table)):
raise ValueError("Index illegal")
else:
self._table[key] = value
def __str__(self):
ret = str(self._fields)
for row in self._table:
ret += "\n" + str(row)
return ret
def __HasCommaOutOfString(self, val):
in_string = False
is_escaping = False
for ch in val:
if in_string:
if is_escaping:
is_escaping = False
elif ch == u"\\":
is_escaping = True
elif ch in (u"\"", u"\'"):
in_string = False
else:
if ch == u",":
return True
elif ch in (u"\"", u"\'"):
in_string = True
return False
def _GetCsvRepr(self, val):
if isinstance(val, list):
return u",".join(itertools.imap(self._GetCsvRepr, val))
else:
if isinstance(val, unicode):
if self.__HasCommaOutOfString(val) or u"\n" in val:
return u"\"" + val + u"\""
else:
return val
else:
return unicode(str(val), "utf-8")
def InCsv(self):
ret = self._GetCsvRepr(self._fields)
for row in self._table:
ret += u"\n" + self._GetCsvRepr(row)
return ret
def InHtml(self):
ret = u"<html>\n<head><meta charset=\"utf-8\">\n<title>SQLGitHub Result</title>\n</head>\n<body>\n"
ret += u"<table border=1>"
ret += u"<tr>"
for field in self._fields:
ret += u"<td>" + EscapeHtml.EscapeUnicodeStr(field) + u"</td>"
ret += u"</tr>\n"
for row in self._table:
ret += u"<tr>"
for val in row:
unicode_str = val if isinstance(val, unicode) else unicode(str(val), "utf-8")
ret += u"<td>" + EscapeHtml.EscapeUnicodeStr(unicode_str) + u"</td>"
ret += u"</tr>\n"
ret += u"</table>\n</html>"
return ret
def GetVals(self, field):
idx = [i for i, f in enumerate(self._fields) if f == field][0]
return [row[idx] for row in self._table]
def Copy(self, table):
self.SetFields(table.GetFields())
self.SetTable(table.GetTable())
def Append(self, row):
self._table.append(row)
def GetTable(self):
return self._table
def SetTable(self, table):
self._table = table
def GetFields(self):
return self._fields
def SetFields(self, fields):
self._fields = fields
def SliceCol(self, start, end):
table = SgTable()
table.SetFields(self._fields[start:end])
for row in self._table:
table.Append(row[start:end])
return table
def Chain(self, table):
res_table = SgTable()
res_table.SetFields(self._fields + table.GetFields())
rows = min(len(self._table), len(table))
for i in range(rows):
res_table.Append(self._table[i] + table[i])
return res_table
| lnishan/SQLGitHub | components/table.py | Python | mit | 4,814 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import redis, frappe, re
import cPickle as pickle
from frappe.utils import cstr
class RedisWrapper(redis.Redis):
"""Redis client that will automatically prefix conf.db_name"""
def make_key(self, key, user=None):
if user:
if user == True:
user = frappe.session.user
key = "user:{0}:{1}".format(user, key)
return "{0}|{1}".format(frappe.conf.db_name, key).encode('utf-8')
def set_value(self, key, val, user=None, expires_in_sec=None):
"""Sets cache value.
:param key: Cache key
:param val: Value to be cached
:param user: Prepends key with User
:param expires_in_sec: Expire value of this key in X seconds
"""
key = self.make_key(key, user)
if not expires_in_sec:
frappe.local.cache[key] = val
try:
if expires_in_sec:
self.setex(key, pickle.dumps(val), expires_in_sec)
else:
self.set(key, pickle.dumps(val))
except redis.exceptions.ConnectionError:
return None
def get_value(self, key, generator=None, user=None, expires=False):
"""Returns cache value. If not found and generator function is
given, it will call the generator.
:param key: Cache key.
:param generator: Function to be called to generate a value if `None` is returned.
:param expires: If the key is supposed to be with an expiry, don't store it in frappe.local
"""
original_key = key
key = self.make_key(key, user)
if key in frappe.local.cache:
val = frappe.local.cache[key]
else:
val = None
try:
val = self.get(key)
except redis.exceptions.ConnectionError:
pass
if val is not None:
val = pickle.loads(val)
if not expires:
if val is None and generator:
val = generator()
self.set_value(original_key, val, user=user)
else:
frappe.local.cache[key] = val
return val
def get_all(self, key):
ret = {}
for k in self.get_keys(key):
ret[key] = self.get_value(k)
return ret
def get_keys(self, key):
"""Return keys starting with `key`."""
try:
key = self.make_key(key + "*")
return self.keys(key)
except redis.exceptions.ConnectionError:
regex = re.compile(cstr(key).replace("|", "\|").replace("*", "[\w]*"))
return [k for k in frappe.local.cache.keys() if regex.match(k)]
def delete_keys(self, key):
"""Delete keys with wildcard `*`."""
try:
self.delete_value(self.get_keys(key), make_keys=False)
except redis.exceptions.ConnectionError:
pass
def delete_key(self, *args, **kwargs):
self.delete_value(*args, **kwargs)
def delete_value(self, keys, user=None, make_keys=True):
"""Delete value, list of values."""
if not isinstance(keys, (list, tuple)):
keys = (keys, )
for key in keys:
if make_keys:
key = self.make_key(key)
try:
self.delete(key)
except redis.exceptions.ConnectionError:
pass
if key in frappe.local.cache:
del frappe.local.cache[key]
def hset(self, name, key, value):
if not name in frappe.local.cache:
frappe.local.cache[name] = {}
frappe.local.cache[name][key] = value
try:
super(redis.Redis, self).hset(self.make_key(name), key, pickle.dumps(value))
except redis.exceptions.ConnectionError:
pass
def hgetall(self, name):
return {key: pickle.loads(value) for key, value in
super(redis.Redis, self).hgetall(self.make_key(name)).iteritems()}
def hget(self, name, key, generator=None):
if not name in frappe.local.cache:
frappe.local.cache[name] = {}
if key in frappe.local.cache[name]:
return frappe.local.cache[name][key]
value = None
try:
value = super(redis.Redis, self).hget(self.make_key(name), key)
except redis.exceptions.ConnectionError:
pass
if value:
value = pickle.loads(value)
frappe.local.cache[name][key] = value
elif generator:
value = generator()
try:
self.hset(name, key, value)
except redis.exceptions.ConnectionError:
pass
return value
def hdel(self, name, key):
if name in frappe.local.cache:
if key in frappe.local.cache[name]:
del frappe.local.cache[name][key]
try:
super(redis.Redis, self).hdel(self.make_key(name), key)
except redis.exceptions.ConnectionError:
pass
def hdel_keys(self, name_starts_with, key):
"""Delete hash names with wildcard `*` and key"""
for name in frappe.cache().get_keys(name_starts_with):
name = name.split("|", 1)[1]
self.hdel(name, key)
def hkeys(self, name):
try:
return super(redis.Redis, self).hkeys(self.make_key(name))
except redis.exceptions.ConnectionError:
return []
| Amber-Creative/amber-frappe | frappe/utils/redis_wrapper.py | Python | mit | 4,605 |
# Persimmon imports
from persimmon.view.blackboard import BlackBoard, Blocks # MYPY HACK
from persimmon.view.util import PlayButton, Notification
# Kivy imports
from kivy.app import App
from kivy.config import Config
from kivy.factory import Factory
from kivy.properties import ObjectProperty
# Kivy Widgets
from kivy.uix.image import Image
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.floatlayout import FloatLayout
# Others
import logging
from kivy.lang import Builder
logger = logging.getLogger(__name__)
Config.read('persimmon/config.ini')
class ViewApp(App):
background = ObjectProperty()
def build(self):
print('Building main view')
self.title = 'Persimmon'
self.background = Image(source='persimmon/background.png').texture
class Backdrop(FloatLayout):
""" Class on top of blackboard where the main control panel and buttons are
displayed. Unaffected by zoom/movement of the underlying scatter. """
play_button = ObjectProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add_hint()
def on_graph_executed(self):
self.play_button.ready()
def remove_hint(self):
self.remove_widget(self.hint)
self.hint = None
def add_hint(self):
self.hint = Factory.Hint()
self.add_widget(self.hint, index=len(self.children))
| AlvarBer/Persimmon | persimmon/view/view.py | Python | mit | 1,377 |
# Written by Petru Paler
# see LICENSE.txt for license information
def decode_int(x, f):
f += 1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
def test_bdecode():
try:
bdecode('0:0:')
assert 0
except ValueError:
pass
try:
bdecode('ie')
assert 0
except ValueError:
pass
try:
bdecode('i341foo382e')
assert 0
except ValueError:
pass
assert bdecode('i4e') == 4L
assert bdecode('i0e') == 0L
assert bdecode('i123456789e') == 123456789L
assert bdecode('i-10e') == -10L
try:
bdecode('i-0e')
assert 0
except ValueError:
pass
try:
bdecode('i123')
assert 0
except ValueError:
pass
try:
bdecode('')
assert 0
except ValueError:
pass
try:
bdecode('i6easd')
assert 0
except ValueError:
pass
try:
bdecode('35208734823ljdahflajhdf')
assert 0
except ValueError:
pass
try:
bdecode('2:abfdjslhfld')
assert 0
except ValueError:
pass
assert bdecode('0:') == ''
assert bdecode('3:abc') == 'abc'
assert bdecode('10:1234567890') == '1234567890'
try:
bdecode('02:xy')
assert 0
except ValueError:
pass
try:
bdecode('l')
assert 0
except ValueError:
pass
assert bdecode('le') == []
try:
bdecode('leanfdldjfh')
assert 0
except ValueError:
pass
assert bdecode('l0:0:0:e') == ['', '', '']
try:
bdecode('relwjhrlewjh')
assert 0
except ValueError:
pass
assert bdecode('li1ei2ei3ee') == [1, 2, 3]
assert bdecode('l3:asd2:xye') == ['asd', 'xy']
assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
try:
bdecode('d')
assert 0
except ValueError:
pass
try:
bdecode('defoobar')
assert 0
except ValueError:
pass
assert bdecode('de') == {}
assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
try:
bdecode('d3:fooe')
assert 0
except ValueError:
pass
try:
bdecode('di1e0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:b0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:a0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('i03e')
assert 0
except ValueError:
pass
try:
bdecode('l01:ae')
assert 0
except ValueError:
pass
try:
bdecode('9999:x')
assert 0
except ValueError:
pass
try:
bdecode('l0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:')
assert 0
except ValueError:
pass
try:
bdecode('00:')
assert 0
except ValueError:
pass
try:
bdecode('l-3:e')
assert 0
except ValueError:
pass
try:
bdecode('i-03e')
assert 0
except ValueError:
pass
bdecode('d0:i3ee')
from types import StringType, IntType, LongType, DictType, ListType, TupleType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[type(Bencached(0))] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
try:
from types import BooleanType
encode_func[BooleanType] = encode_int
except ImportError:
pass
def bencode(x):
r = []
encode_func[type(x)](x, r)
return ''.join(r)
def test_bencode():
assert bencode(4) == 'i4e'
assert bencode(0) == 'i0e'
assert bencode(-10) == 'i-10e'
assert bencode(12345678901234567890L) == 'i12345678901234567890e'
assert bencode('') == '0:'
assert bencode('abc') == '3:abc'
assert bencode('1234567890') == '10:1234567890'
assert bencode([]) == 'le'
assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
assert bencode({}) == 'de'
assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
assert bencode(Bencached(bencode(3))) == 'i3e'
try:
bencode({1: 'foo'})
except TypeError:
return
assert 0
try:
import psyco
psyco.bind(bdecode)
psyco.bind(bencode)
except ImportError:
pass
| linuxmint/mint4win | src/bittorrent/bencode.py | Python | gpl-2.0 | 7,052 |
'''
Sendkeys module moved back to ctypes.
For x64 systems, for example.
(c) 2009 Igor S. Mandrigin, Agnitum Ltd.
'''
from ctypes import windll
# from the internet
KEYEVENTF_KEYUP = 2
VK_NUMLOCK = 144
KEYEVENTF_EXTENDEDKEY = 1
KEYEVENTF_KEYUP = 2
def _key_down( vk ) :
scan = windll.user32.MapVirtualKeyA( vk, 0 )
windll.user32.keybd_event( vk, scan, 0, 0 )
def _key_up( vk ) :
scan = windll.user32.MapVirtualKeyA( vk, 0 )
windll.user32.keybd_event( vk, scan, KEYEVENTF_KEYUP, 0 )
def toggle_numlock( turn_on ) :
'''
toggle_numlock(int) -> int
Turns NUMLOCK on or off and returns whether
it was originally on or off.
'''
is_on = 0
keys = [];
is_on = windll.user32.GetKeyState( VK_NUMLOCK ) & 1
if is_on != turn_on :
windll.user32.keybd_event(VK_NUMLOCK,
69,
KEYEVENTF_EXTENDEDKEY | 0,
0);
windll.user32.keybd_event(VK_NUMLOCK,
69,
KEYEVENTF_EXTENDEDKEY | KEYEVENTF_KEYUP,
0);
return is_on
def char2keycode( char ) :
'''
char2keycode(char) -> int
Converts character to virtual key code
'''
vk = windll.user32.VkKeyScanA( ord( char ) )
return vk
def key_down( key ) :
'''
key_down(int) -> None
Generates a key pressed event. Takes a
virtual key code.
'''
vk = key
# XXX exception if >= 256
_key_down( vk )
def key_up( key ) :
'''
key_up(int) -> None
Generates a key released event. Takes a
virtual key code.
'''
vk = key
# XXX exception if >= 256
_key_up( vk )
| savionok/RemoteHID | test/SendKeys-ctypes-0.2/_sendkeys.py | Python | apache-2.0 | 1,787 |
# -*- coding: utf-8 -*-
# Scrapy settings for crawlstocks project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'crawlstocks'
SPIDER_MODULES = ['crawlstocks.spiders']
NEWSPIDER_MODULE = 'crawlstocks.spiders'
RETRY_TIMES = 1
# next page depth
DEPTH_LIMIT = 60
DOWNLOAD_TIMEOUT = 10
REDIRECT_ENABLED = False
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'crawlstocks (+https://qrsforever.github.io)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 1
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# 防止网站封爬虫
DOWNLOAD_DELAY = 2.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# 防止网站封爬虫
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'crawlstocks.middlewares.CrawlstocksSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# # 'crawlstocks.middlewares.CustomRandomMiddleware.RandomProxyMiddleware': 300,
# 'crawlstocks.middlewares.CustomRandomMiddleware.RandomUserAgentMiddleware': 543,
# 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# 300(优先级), 注释掉, 使用spider的custom_settings
# ITEM_PIPELINES = {
# 'crawlstocks.pipelines.CrawlListPipeline': 300,
# 'crawlstocks.pipelines.CrawlInfoPipeline': 400,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
# CRITICAL、 ERROR、WARNING、INFO、DEBUG
LOG_LEVEL = 'DEBUG'
####### 自定义
STOCK_LIST_FILE = 'stock_list.txt'
STOCK_INFO_FILE = 'stock_info.txt'
STOCK_URLS_FILE = 'stock_urls.txt'
STOCK_CWZB_FILE = 'stock_cwzb.txt'
# 雄安新区板块
# BLOCKINFO_XIONGANXINQUE = '003813'
# 京津冀一体化板块
# BLOCKINFO_JINGJINYIYITIHUA = '003684'
# random firfox chrome opera google safari
RANDOM_UA_TYPE = "opera"
USER_PROXY_LIST = [
'1.85.220.195:8118',
'60.255.186.169:8888',
'118.187.58.34:53281',
'116.224.191.141:8118',
'120.27.5.62:9090',
'119.132.250.156:53281',
'139.129.166.68:3128'
]
# 总有不通的问题, 改用Splash
USER_AGENT_LIST = [
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0',
'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
'Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
]
# MongoDB
DB_HOST = 'mongodb://localhost:27017/'
DB_NAME = 'stock'
DB_CODES_TABLE_NAME = 'codes'
DB_CHDDATA_TABLE_NAME = 'chddata'
# Request
DATETIME_START = '20100101'
DATETIME_END = '20190424'
| qrsforever/workspace | python/test/crawl_stocks/crawlstocks/settings.py | Python | mit | 8,876 |
import numpy as np
from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
# print(iris.feature_names)
# print(iris.target_names)
# print(iris.data[100])
# print(iris.target[100])
# for i in range(len(iris.target)):
# print("Example %d: label %s, features %s" % (i, iris.target[i], iris.data[i]))
test_idx = [0, 50, 100]
# training data
train_target = np.delete(iris.target, test_idx)
train_data = np.delete(iris.data, test_idx, axis=0)
# test data
test_target = iris.target[test_idx]
test_data = iris.data[test_idx]
# print(test_target)
# print(test_data)
#
# print(train_data)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(train_data, train_target)
print(test_target)
print(clf.predict(test_data))
# viz code
# from sklearn.externals.six import StringIO
import pydotplus
dot_data = tree.export_graphviz(clf,
out_file=None,
feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True,
impurity=False)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("iris_viz.pdf")
print(test_data[0], test_target[0])
print(test_data[1], test_target[1])
print(test_data[2], test_target[2])
print(iris.feature_names)
print(iris.target_names) | m-debnath/python-rookie | machine-learning-yt/machin-learn-2.py | Python | gpl-3.0 | 1,401 |
#!/usr/bin/python
#GUI Parameters
terminal_color = '#10fb72'
serial_active_color = '#1f4dbc'
serial_inactive_color = '#ff0000'
from Tkinter import *
import time
import os
import BeanSerialTransport
import logging
import numpy
import math
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
transport = BeanSerialTransport.Bean_Serial_Transport()
def print_to_gui(type, data):
# line converts all numbers to chars and then
# makes a single string of them
# python number and char type crazyness
data = ''.join(chr(c) for c in data)
terminal_output.insert(END, data)
terminal_output.yview(END)
transport.add_handler(transport.MSG_ID_SERIAL_DATA, print_to_gui)
def quit_app():
global transport
transport.close_port()
global app
app.quit()
def check_serial():
global count
global serial_is_active_label
global serial_is_active
if(transport.serial_port != None and transport.serial_port.isOpen()):
serial_is_active_label.configure(foreground=(serial_active_color))
serial_is_active.set("On")
else:
serial_is_active_label.configure(foreground=(serial_inactive_color))
serial_is_active.set("Off")
transport.parser()
app.after(50, check_serial)
min_x = 500
max_x = 600
min_y = 600
max_y = 1200
app = Tk()
app.wm_protocol ("WM_DELETE_WINDOW", quit_app)
app.minsize(min_x, min_y)
app.maxsize(max_x, max_y)
app.title('Bean Arduino Test Tool')
beanpic = PhotoImage(file="cutout-bean.gif")
beanpic = beanpic.subsample(8)
Label(image=beanpic).grid(row=0,sticky= N+S+E+W)
Label(text=' Bean Arduino Test Jig ').grid(row=1)
# Initialize main container/frames
content_frame = Frame(app)
content_frame.grid(row=3, sticky=W+E+N+S)
serial_chooser_frame = Frame(content_frame, padx=10)
serial_chooser_frame.grid(row=0)
# Serial Picker
Label(serial_chooser_frame, text='Serial Port:').grid(row=0, sticky=W)
serial_chooser = StringVar()
def choose_serial(port):
transport.close_port()
transport.open_port(port, 57600)
serial_chooser_menu = OptionMenu(serial_chooser_frame, serial_chooser,
*(transport.get_available_serial_ports()),
command=choose_serial)
serial_chooser_menu.config(width=60)
serial_chooser_menu.grid(row=1, sticky=W)
serial_is_active = StringVar()
serial_is_active.set("Off")
serial_is_active_label = Label(serial_chooser_frame, textvariable=serial_is_active)
serial_is_active_label.grid(row=1,column=1)
serial_is_active_label.configure(foreground=(serial_inactive_color))
Label(serial_chooser_frame,
text='___________________________________________________________________________').grid(row=2)
led_frame = Frame(content_frame, padx=10)
led_frame.grid(sticky=W)
# LED Viewer
# draw a little circle
# update the color of the circle whenever we get a message to write the LED
Label(led_frame, text='LED Color').grid(sticky=W, padx=10 )
led_canvas = Canvas(led_frame, width=200, height=50)
led_canvas.grid(sticky=W)
led_shape = led_canvas.create_oval(20,10,50,40, outline="black", fill="black", width=2)
led_color_text = led_canvas.create_text(100, 25, text= '0xFF00FF')
def RGBToHTMLColor(rgb_tuple):
return '#%02x%02x%02x' % rgb_tuple
def RGBToString(rgb_tuple):
return '0x%02X%02X%02X' % rgb_tuple
def led_set_color(r, g, b):
global led_shape
global led_canvas
color_tuple = (r, g, b)
color = RGBToHTMLColor(color_tuple)
led_canvas.itemconfigure(led_shape, fill=color)
led_canvas.itemconfigure(led_color_text, text=RGBToString((r,g,b)))
led_red = 0
led_green = 0
led_blue = 0
def led_set_color_to_current():
global led_red
global led_green
global led_blue
led_set_color(led_red, led_green, led_blue)
def handle_led_write_all(type, data):
global led_red
global led_green
global led_blue
led_red = data[0]
led_green = data[1]
led_blue = data[2]
led_set_color_to_current()
def handle_led_write_single(type, data):
global led_red
global led_green
global led_blue
if(data[0] == 0):
led_red = data[1]
elif(data[0] == 1):
led_green = data[1]
elif(data[0] == 2):
led_blue = data[1]
led_set_color_to_current()
transport.add_handler(transport.MSG_ID_CC_LED_WRITE, handle_led_write_single)
transport.add_handler(transport.MSG_ID_CC_LED_WRITE_ALL, handle_led_write_all)
# Accelerometer View/Control
accel_frame = Frame(content_frame, padx=10)
accel_frame.grid(sticky=W)
Label(accel_frame, text='Accelerometer').grid(sticky=W, padx=10 )
accel_x_control = Scale(accel_frame, from_=(511), to=-511, label='X')
accel_x_control.grid(row=1, column=0, sticky=W)
accel_y_control = Scale(accel_frame, from_=(511), to=-511, label='Y')
accel_y_control.grid(row=1, column=1, sticky=W)
accel_z_control = Scale(accel_frame, from_=(511), to=-511, label='Z')
accel_z_control.grid(row=1, column=2, sticky=W)
def z_sin_callback():
if(enable_z_sin.get()):
app.after(100, z_sin_callback)
accel_z_control.set(511 * math.sin(time.time()))
else:
pass
enable_z_sin = IntVar()
accel_enable_z_sin = Checkbutton(accel_frame, text="zSin", variable=enable_z_sin, command=z_sin_callback)
accel_enable_z_sin.grid(row=1, column=3, sticky=W)
def y_sin_callback():
if(enable_y_sin.get()):
app.after(100, y_sin_callback)
accel_y_control.set(511 * math.sin(time.time() * 2))
else:
pass
enable_y_sin = IntVar()
accel_enable_y_sin = Checkbutton(accel_frame, text="ySin", variable=enable_y_sin, command=y_sin_callback)
accel_enable_y_sin.grid(row=1, column=4, sticky=W)
def x_sin_callback():
if(enable_x_sin.get()):
app.after(100, x_sin_callback)
accel_x_control.set(511 * math.sin(time.time() * 3))
else:
pass
enable_x_sin = IntVar()
accel_enable_x_sin = Checkbutton(accel_frame, text="xSin", variable=enable_x_sin, command=x_sin_callback)
accel_enable_x_sin.grid(row=1, column=5, sticky=W)
def handle_accel_read_all(type, data):
message = numpy.array([accel_x_control.get(),
accel_y_control.get(),
accel_z_control.get()], numpy.int16)
message.dtype = numpy.uint8
transport.send_message(transport.MSG_ID_CC_ACCEL_READ_RSP, message)
transport.add_handler(transport.MSG_ID_CC_ACCEL_READ, handle_accel_read_all)
# Radio Settings
radio_frame = Frame(content_frame, padx=10);
radio_frame.grid(sticky=W)
Label(accel_frame, text='Radio').grid(sticky=W, padx=10 )
conn_int = Scale(radio_frame, from_=300, to=0, label='Conn Int')
conn_int.grid(row=1, sticky=W)
adv_int = Scale(radio_frame, from_=300, to=0, label='Adv Int');
adv_int.grid(row=1, column=1, sticky=W)
# 'terminal' output
terminal_frame = Frame(content_frame)
terminal_frame.grid(row=100, sticky=S)
Label(terminal_frame,
text='___________________________________________________________________________').grid(row=0)
terminal_output = Text(terminal_frame, wrap=WORD, height=6, width=75, background=(terminal_color), state='normal')
terminal_output.grid(row=1, sticky=S)
scrollbar = Scrollbar(terminal_frame, command=terminal_output.yview)
terminal_output.config(yscrollcommand=scrollbar.set)
scrollbar.grid(row=1, column=1, sticky='ns')
Button(terminal_frame, text='Clear Terminal', command=lambda: terminal_output.delete(1.0, END) ).grid(row=2, sticky=E)
# app.after(1000, led_set_color, 0, 0xFF, 0x00)
# app.after(2000, led_set_color, 0, 0, 0xFF)
# app.after(3000, led_set_color, 0xFF, 0, 0)
app.after(4000, led_set_color_to_current)
app.after_idle(check_serial)
app.mainloop() | PunchThrough/PunchThrough-BEAN-Arduino-Firmware | beanModuleEmulator/BeanModuleEmulator.py | Python | lgpl-2.1 | 7,613 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core import signals
from indico.util.signals import named_objects_from_signal
def get_field_types():
"""Gets a dict containing all field types"""
return named_objects_from_signal(signals.event.get_survey_fields.send(), plugin_attr='plugin')
@signals.event.get_survey_fields.connect
def _get_fields(sender, **kwargs):
from .simple import TextField, NumberField, BoolField
from .choices import SingleChoiceField, MultiSelectField
yield TextField
yield NumberField
yield BoolField
yield SingleChoiceField
yield MultiSelectField
@signals.app_created.connect
def _check_field_definitions(app, **kwargs):
# This will raise RuntimeError if the field names are not unique
get_field_types()
| XeCycle/indico | indico/modules/events/surveys/fields/__init__.py | Python | gpl-3.0 | 1,513 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
import logging
from typing import Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
logger = logging.getLogger(__name__)
@register_model("bart")
class BARTModel(TransformerModel):
__jit_unused_properties__ = ["supported_targets"]
@classmethod
def hub_models(cls):
return {
"bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz",
"bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz",
"bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz",
"bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz",
"bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz",
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
if hasattr(self.encoder, "dictionary"):
self.eos: int = self.encoder.dictionary.eos()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--spectral-norm-classification-head",
action="store_true",
help="Apply spectral normalization on the classification head",
)
@property
def supported_targets(self):
return {"self"}
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
features_only: bool = False,
classification_head_name: Optional[str] = None,
token_embeddings: Optional[torch.Tensor] = None,
return_all_hiddens: bool = True,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
token_embeddings=token_embeddings,
return_all_hiddens=return_all_hiddens,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
eos: int = self.eos
if classification_head_name is not None:
sentence_representation = x[src_tokens.eq(eos), :].view(
x.size(0), -1, x.size(-1)
)[:, -1, :]
for k, head in self.classification_heads.items():
# for torch script only supports iteration
if k == classification_head_name:
x = head(sentence_representation)
break
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="gpt2",
sample_break_mode="eos",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
sample_break_mode=sample_break_mode,
**kwargs,
)
return BARTHubInterface(x["args"], x["task"], x["models"][0])
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
logger.info("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
input_dim=self.args.encoder_embed_dim,
inner_dim=inner_dim or self.args.encoder_embed_dim,
num_classes=num_classes,
activation_fn=self.args.pooler_activation_fn,
pooler_dropout=self.args.pooler_dropout,
do_spectral_norm=getattr(
self.args, "spectral_norm_classification_head", False
),
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
def truncate_emb(key):
if key in state_dict:
state_dict[key] = state_dict[key][:-1, :]
# When finetuning on translation task, remove last row of
# embedding matrix that corresponds to mask_idx token.
loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0)
if (
loaded_dict_size == len(self.encoder.dictionary) + 1
and "<mask>" not in self.encoder.dictionary
):
truncate_emb("encoder.embed_tokens.weight")
truncate_emb("decoder.embed_tokens.weight")
truncate_emb("encoder.output_projection.weight")
truncate_emb("decoder.output_projection.weight")
# When continued pretraining on new set of languages for mbart,
# add extra lang embeddings at the end of embed_tokens.
# Note: newly added languages are assumed to have been added at the end.
if self.args.task == "multilingual_denoising" and loaded_dict_size < len(
self.encoder.dictionary
):
logger.info(
"Adding extra language embeddings not found in pretrained model for "
"continued pretraining of MBART on new set of languages."
)
loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][
-1, :
]
num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size
embed_dim = state_dict["encoder.embed_tokens.weight"].size(1)
new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim)
nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim**-0.5)
new_lang_embed_to_add = new_lang_embed_to_add.to(
dtype=state_dict["encoder.embed_tokens.weight"].dtype,
)
state_dict["encoder.embed_tokens.weight"] = torch.cat(
[
state_dict["encoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
state_dict["decoder.embed_tokens.weight"] = torch.cat(
[
state_dict["decoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
do_spectral_norm=False,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
if do_spectral_norm:
self.out_proj = torch.nn.utils.spectral_norm(self.out_proj)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture("bart", "bart_large")
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.1)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.max_source_positions = getattr(args, "max_source_positions", 1024)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
@register_model_architecture("bart", "bart_base")
def bart_base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_large")
def mbart_large_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_base")
def mbart_base_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_base_architecture(args)
@register_model_architecture("bart", "mbart_base_wmt20")
def mbart_base_wmt20_architecture(args):
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
mbart_base_architecture(args)
| pytorch/fairseq | fairseq/models/bart/model.py | Python | mit | 15,516 |
#!/usr/bin/python
import sys, os, re
class GPNode:
def __init__(self, name, parent):
self.name = name
self.parent = parent
self.params = {}
self.params_list = [] #This is here to capture the ordering
self.param_comments = {}
self.children = {}
self.children_list = [] #This is here to capture the ordering
self.comments = []
""" Print this node and it's children """
def Print(self, prefix=''):
print prefix + self.name
for comment in self.comments:
print '# ' + comment
for param in self.params_list:
comment = ''
if param in self.param_comments:
comment = '# ' + self.param_comments[param]
print prefix + self.name + '/' + param + ": " + str(self.params[param]) + ' | ' + comment
for child in self.children_list:
self.children[child].Print(prefix + self.name + '/')
##
# Perform a fuzzy search for a node name
# @return The node object if any part of a node key is in the supplied name
def getNode(self, name):
node = None
if name in self.children:
node = self.children[name]
else:
for key, value in self.children.iteritems():
node = value.getNode(name)
if node != None:
break
return node
def fullName(self, no_root=False):
if self.parent == None:
if no_root and self.name == 'root':
return ''
else:
return self.name
else:
return self.parent.fullName(no_root) + '/' + self.name
##
# Build a string suitable for writing to a raw input file
# @param level The indentation level to apply to the string
def createString(self, level = 0):
# String to be returned
output = ''
# Write the block headings
if level == 0:
output += '[' + self.name + ']\n'
elif level > 0:
output += ' '*2*level + '[./' + self.name + ']\n'
# Write the parameters
for param in self.params_list:
output += ' '*2*(level + 1) + param + " = '" + str(self.params[param]) + "'\n"
# Write the children
for child in self.children_list:
output += self.children[child].createString(level + 1) + '\n'
# Write the block closing
if level == 0:
output += '[]\n'
elif level > 0:
output += ' '*2*level + '[../]'
# Return the data
return output
class ParseException(Exception):
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
class ParseGetPot:
def __init__(self, file_name):
self.file_name = file_name
self.file = open(file_name)
self.unique_keys = set() #The full path to each key to ensure that no duplicates are supplied
self.root_node = GPNode('root', None)
self.section_begin_re = re.compile(r"\s*\[\s*(\./)?([^(\.\./) \t\n\r\f\v]+)\s*]")
self.section_end_re = re.compile(r"\s*\[\s*(\.\./)?\s*\]")
self.parameter_res = [re.compile(r"\s*([\w\-]+)\s*=\s*'([^\n]+)'"), # parameter value in two single ticks
re.compile(r'\s*([\w\-]+)\s*=\s*"([^\n]+)"'), # parameter value in two double ticks
re.compile(r"\s*([\w\-]+)\s*=\s*'([^'\n]+)"), # parameter value with a single single tick
re.compile(r'\s*([\w\-]+)\s*=\s*"([^"\n]+)'), # parameter value with a single double tick
re.compile(r"\s*(\w+)\s*=\s*([^#'""\n\[\]\s]+)")] # parameter value with no double/single tick
self.comment_re = re.compile(r"\s*(?:'.*')?\s*#\s*(.*)")
self.unmatched_ticks_re = [[re.compile(r"[^']*'[^']*\n"), re.compile(r"\s*([^'\n]+)"), "'"], # unmatched single tick and appropriate data re
[re.compile(r'[^"]*"[^"]*\n'), re.compile(r'\s*([^"\n]+)'), '"']] # unmatched double tick and appropriate data re
self._parseFile()
def _recursiveParseFile(self, current_node, lines, current_line, current_position):
param_name = '' # We need to store the name of the last parameter that has been identified to
# properly assign comments. If param_name=='', we assign them to the section.
while current_line < len(lines):
if current_position >= len(lines[current_line]) or current_position == -1:
# reached end of current line
current_line += 1
current_position = 0
param_name = '' # comments that are not preceded by any parameter belong to the section
if current_line == len(lines):
# file traversal finished
return len(lines), len(lines[-1])
# we are only interested in any part of the line that has not been parsed yet
line = lines[current_line][current_position:]
#print current_line, current_position, 'of', len(lines[current_line]), line
m = self.section_begin_re.match(line)
if m:
current_position += m.end()
child_name = m.group(2)
if child_name in current_node.children:
child = current_node.children[child_name]
else:
child = GPNode(child_name, current_node)
current_node.children[child_name] = child
current_node.children_list.append(child_name)
current_line, current_position = self._recursiveParseFile(child, lines, current_line, current_position)
continue
# Look for a parameter on this line
for re_param in self.parameter_res:
m = re_param.match(line)
if m:
current_position += m.end()
param_name = m.group(1)
param_value = m.group(2)
# See if the value of this parameter has an unmatched single tick
for re_tick in self.unmatched_ticks_re:
# Only look at the part before the comment (if there is one)
m_tick = re_tick[0].match(line.partition('#')[0])
if m_tick:
current_line += 1
current_position = 0
found_it = False
# in case of a multiline parameter, we have to remove the leading single/double tick
param_value = param_value.lstrip(re_tick[2])
# Keep eating lines until we find its mate
while current_line < len(lines):
line = lines[current_line]
# While we're eating lines keep appending data to the value for this parameter
m_data = re_tick[1].match(line)
if m_data:
param_value += ' ' + m_data.group(1)
m_tick = re_tick[0].match(line.partition('#')[0]) # Don't include the comment
if m_tick:
found_it = True
break
current_line += 1
if not found_it:
raise ParseException("SyntaxError", "Unmatched token in Parser")
break # do not continue searching for unmatched ticks
unique_key = current_node.fullName(True) + '/' + param_name
if unique_key in self.unique_keys:
raise ParseException("DuplicateSymbol", 'Duplicate Section Name "' + os.getcwd() + '/' + self.file_name + ":" + unique_key + '"')
self.unique_keys.add(unique_key)
current_node.params[param_name] = param_value
current_node.params_list.append(param_name)
break
if m:
continue # with outer loop since we found a parameter and have to remove it from the current line before continuing
# Comment in the block (not after a parameter or section header)
m = self.comment_re.match(line)
if m:
current_position = -1 # remainder of line ignored
if param_name=='':
current_node.comments.append(m.group(1))
else:
current_node.param_comments[param_name] = m.group(1)
continue
# Is this section over?
m = self.section_end_re.match(line)
if m:
current_position += m.end()
return current_line, current_position
# did not find anything else in this line
current_position = -1
return current_line, current_position
def _parseFile(self):
lines = self.file.readlines()
self._recursiveParseFile(self.root_node, lines, 0, 0)
def readInputFile(file_name):
pgp = ParseGetPot(file_name)
# pgp.root_node.Print()
return pgp.root_node
if __name__ == '__main__':
if (len(sys.argv) > 1):
filename = sys.argv[1]
else:
filename = '2d_diffusion_test.i'
pgp = ParseGetPot(filename)
print 'Printing tree'
pgp.root_node.Print()
| apc-llc/moose | python/FactorySystem/ParseGetPot.py | Python | lgpl-2.1 | 8,436 |
from __future__ import division
import inspect
import re
from functools import wraps, partial
from collections import defaultdict
from pdb import set_trace
from copy import copy
from step import piped as step_into
from stop_as_final_func import piped as stop_as_final_func
__all__ = ('verbose', 'endverbose', 'step', 'endstep', 'stop', 'setstate', 'getstate',
'stack', '_', 'B', 'passthrough', 'tee', 'show')
NOT_PRESENT = '__not__present__'
MAX_OPERAND_STR_LEN = 70
def identity(x):
return x
def get_name(f):
try:
try:
if isinstance(f, partial):
return 'partial({})'.format(get_name(f.func))
if f.__name__ == '<lambda>':
matches = re.findall(r'\((lambda[^\|]*)\)', inspect.getsourcelines(f)[0][0])
if len(matches) == 1:
return matches[0]
return f.__name__
except AttributeError:
return type(f).__name__
except AttributeError:
return '<function>'
class Options():
def __init__(self, options, info=''):
self.options = defaultdict(bool, options)
self.info = info
def __call__(self, pipe):
pipe.options.update(self.options)
print(self.info)
verbose = Options({'verbose': True}, 'The pipe will now print each function and its output.')
endverbose = Options({'verbose': False}, 'The pipe will no longer print each function and its output.')
step = Options({'step': True}, 'The pipe will now break before each function call.')
endstep = Options({'step': False}, 'The pipe will no longer break before each function call.')
stop = Options({'stop': True}, 'The pipe will break before the next function call.')
class setstate():
def __init__(self, key, func=None):
self.key = key
if func is None:
self.func = identity
else:
self.func = func
@property
def __name__(self):
if self.func is identity:
return 'setstate({})'.format(self.key)
else:
return 'setstate({}, {})'.format(self.key, get_name(self.func))
class getstate():
def __init__(self, key):
self.key = key
def __call__(self, operand):
return operand, self.state[self.key]
@property
def __name__(self):
return 'getstate({})'.format(self.key)
class Stack():
pass
stack = Stack()
class Bookend():
def __init__(self, options=None):
self.options = defaultdict(bool, options or {})
def __or__(self, operand):
return Pipe(operand, self.options)
_ = Bookend()
B = _
_.v = Bookend({'verbose': True})
_.s = Bookend({'step': True})
_.vs = Bookend({'verbose': True, 'step': True})
_.sv = _.vs
class Pipe():
def __init__(self, operand, options):
self.operand = operand
self.options = copy(options)
self._stack = [(None, shorten(self.operand))]
self.state = {}
if self.options['verbose']:
newline()
_print(None, self.operand)
def __or__(self, f):
if isinstance(f, tuple):
f, args = f[0], f[1:]
return self.__or__(partial(f, *args))
if isinstance(f, Bookend):
if self.options['stop']:
self.operand = stop_as_final_func(self.operand, self)
if self.options['verbose']:
newline()
return self.operand
if isinstance(f, Options):
self.options.update(f.options)
if 'verbose' in f.options:
newline()
return self
if isinstance(f, Stack):
print('Stack:')
self.stack()
return self
if isinstance(f, setstate):
self.state[f.key] = f.func(self.operand)
self._stack.append((get_name(f), NOT_PRESENT))
return self
if isinstance(f, getstate):
f.state = self.state
try:
if self.options['step'] or self.options['stop']:
self.options['stop'] = False
self.operand = step_into(f, self.operand, self)
else:
self.operand = f(self.operand)
except Exception as e:
self._stack.append((get_name(f), '** {}: {} **'.format(type(e).__name__, e.message)))
print('Pipe traceback:')
self.stack()
raise
if not getattr(f, 'is_passthrough', False):
name = get_name(f)
self._stack.append((name, shorten(self.operand)))
if self.options['verbose']:
_print(name, self.operand)
return self
def stack(self):
[_print(func, output) for func, output in self._stack]
newline()
def verbose(self): verbose(self)
def endverbose(self): endverbose(self)
def step(self): step(self)
def endstep(self): endstep(self)
def stop(self): stop(self)
def passthrough(f):
"""E.g. for log or inc."""
@wraps(f)
def wrapped(operand):
f(operand)
return operand
wrapped.is_passthrough = True
return wrapped
tee = passthrough
@passthrough
def show(operand):
print('Operand:')
_print(None, shorten(operand))
newline()
def _print(function_name, operand):
bar = '|'
arrow = '=>'
if function_name is None:
print('{} {}'.format(bar, operand))
elif operand == NOT_PRESENT:
print('{} {}'.format(bar, function_name))
else:
max_line_length = 70
line_length = sum(map(len, map(str, [bar, function_name, arrow, operand])))
if line_length > max_line_length:
delimeter = '\n' + (' ' * 6)
else:
delimeter = ' '
print('{} {} {}{}{}'.format(bar, function_name, arrow, delimeter, operand))
def newline():
print('')
def shorten(operand):
operand = str(operand)
if len(operand) > MAX_OPERAND_STR_LEN:
segment_len = int(MAX_OPERAND_STR_LEN / 2) - 2
operand = '{}...{}'.format(operand[:segment_len], operand[-segment_len:])
return operand
| berrytj/bookends | bookends/bookends.py | Python | mit | 5,522 |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
try:
from c7n_azure.function_package import FunctionPackage
from c7n_azure.functionapp_utils import FunctionAppUtilities
from c7n_azure.policy import AzureFunctionMode
from c7n_azure.session import Session
from c7n_azure.utils import StringUtils
from c7n.utils import local_session
except ImportError:
FunctionPackage = None
pass
def cache_path():
return os.path.join(os.path.dirname(__file__), 'cache')
def build_function_package(config, function_name):
schedule = config.get('function_schedule', '0 */10 * * * *')
cache_override_path = cache_path()
# Build package
package = FunctionPackage(
function_name,
os.path.join(os.path.dirname(__file__), 'function.py'),
cache_override_path=cache_override_path)
package.build(None,
modules=['c7n', 'c7n-azure', 'c7n-mailer'],
non_binary_packages=['pyyaml', 'pycparser', 'tabulate', 'jmespath',
'datadog', 'MarkupSafe', 'simplejson', 'pyrsistent'],
excluded_packages=['azure-cli-core', 'distlib', 'future', 'futures'])
package.pkg.add_contents(
function_name + '/function.json',
contents=package.get_function_config({'mode':
{'type': 'azure-periodic',
'schedule': schedule}}))
# Add mail templates
for d in set(config['templates_folders']):
if not os.path.exists(d):
continue
for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:
with open(os.path.join(d, t)) as fh:
package.pkg.add_contents(function_name + '/msg-templates/%s' % t, fh.read())
function_config = copy.deepcopy(config)
function_config['templates_folders'] = [function_name + '/msg-templates/']
package.pkg.add_contents(
function_name + '/config.json',
contents=json.dumps(function_config))
package.close()
return package
def provision(config):
log = logging.getLogger('c7n_mailer.azure.deploy')
function_name = config.get('function_name', 'mailer')
function_properties = config.get('function_properties', {})
# service plan is parse first, because its location might be shared with storage & insights
service_plan = AzureFunctionMode.extract_properties(function_properties,
'servicePlan',
{
'name': 'cloud-custodian',
'location': 'eastus',
'resource_group_name': 'cloud-custodian',
'sku_tier': 'Dynamic', # consumption plan
'sku_name': 'Y1'
})
location = service_plan.get('location', 'eastus')
rg_name = service_plan['resource_group_name']
sub_id = local_session(Session).get_subscription_id()
suffix = StringUtils.naming_hash(rg_name + sub_id)
storage_account = AzureFunctionMode.extract_properties(function_properties,
'storageAccount',
{'name': 'mailerstorage' + suffix,
'location': location,
'resource_group_name': rg_name})
app_insights = AzureFunctionMode.extract_properties(function_properties,
'appInsights',
{'name': service_plan['name'],
'location': location,
'resource_group_name': rg_name})
function_app_name = FunctionAppUtilities.get_function_name(
'-'.join([service_plan['name'], function_name]), suffix)
FunctionAppUtilities.validate_function_name(function_app_name)
params = FunctionAppUtilities.FunctionAppInfrastructureParameters(
app_insights=app_insights,
service_plan=service_plan,
storage_account=storage_account,
function_app_resource_group_name=service_plan['resource_group_name'],
function_app_name=function_app_name)
FunctionAppUtilities.deploy_function_app(params)
log.info("Building function package for %s" % function_app_name)
package = build_function_package(config, function_name)
log.info("Function package built, size is %dMB" % (package.pkg.size / (1024 * 1024)))
FunctionAppUtilities.publish_functions_package(params, package)
| ocampocj/cloud-custodian | tools/c7n_mailer/c7n_mailer/azure_mailer/deploy.py | Python | apache-2.0 | 5,600 |
from pipes import quote
import logging
import os
import sys
from mock import patch
import pytest
from fabric.api import env, hide, lcd, local, settings
from fabric.state import connections
from fabtools.vagrant import version as _vagrant_version
HERE = os.path.dirname(__file__)
VAGRANT_VERSION = _vagrant_version()
MIN_VAGRANT_VERSION = (1, 3)
@pytest.fixture(scope='session', autouse=True)
def setup_package(request):
_check_vagrant_version()
vagrant_box = os.environ.get('FABTOOLS_TEST_BOX')
if not vagrant_box:
pytest.skip("Set FABTOOLS_TEST_BOX to choose a Vagrant base box for functional tests")
vagrant_provider = os.environ.get('FABTOOLS_TEST_PROVIDER')
reuse_vm = os.environ.get('FABTOOLS_TEST_REUSE_VM')
_configure_logging()
_allow_fabric_to_access_the_real_stdin()
if not reuse_vm:
_stop_vagrant_machine()
_init_vagrant_machine(vagrant_box)
_start_vagrant_machine(vagrant_provider)
_target_vagrant_machine()
_set_optional_http_proxy()
_update_package_index()
if not reuse_vm:
request.addfinalizer(_stop_vagrant_machine)
def _check_vagrant_version():
if VAGRANT_VERSION is None:
pytest.skip("Vagrant is required for functional tests")
elif VAGRANT_VERSION < MIN_VAGRANT_VERSION:
pytest.skip("Vagrant >= %s is required for functional tests" % ".".join(map(str, MIN_VAGRANT_VERSION)))
def _configure_logging():
logger = logging.getLogger('paramiko')
logger.setLevel(logging.WARN)
def _allow_fabric_to_access_the_real_stdin():
patcher = patch('fabric.io.sys')
mock_sys = patcher.start()
mock_sys.stdin = sys.__stdin__
def _init_vagrant_machine(base_box):
with lcd(HERE):
with settings(hide('stdout')):
local('rm -f Vagrantfile')
local('vagrant init %s' % quote(base_box))
def _start_vagrant_machine(provider):
if provider:
options = ' --provider %s' % quote(provider)
else:
options = ''
with lcd(HERE):
with settings(hide('stdout')):
local('vagrant up' + options)
def _stop_vagrant_machine():
with lcd(HERE):
with settings(hide('stdout', 'stderr', 'warnings'), warn_only=True):
local('vagrant halt')
local('vagrant destroy -f')
def _target_vagrant_machine():
config = _vagrant_ssh_config()
_set_fabric_env(
host=config['HostName'],
port=config['Port'],
user=config['User'],
key_filename=config['IdentityFile'].strip('"'),
)
_clear_fabric_connection_cache()
def _vagrant_ssh_config():
with lcd(HERE):
with settings(hide('running')):
output = local('vagrant ssh-config', capture=True)
config = {}
for line in output.splitlines()[1:]:
key, value = line.strip().split(' ', 2)
config[key] = value
return config
def _set_fabric_env(host, port, user, key_filename):
env.host_string = env.host = "%s:%s" % (host, port)
env.user = user
env.key_filename = key_filename
env.disable_known_hosts = True
env.abort_on_prompts = True
def _set_optional_http_proxy():
http_proxy = os.environ.get('FABTOOLS_HTTP_PROXY')
if http_proxy is not None:
env.shell_env['http_proxy'] = http_proxy
def _clear_fabric_connection_cache():
if env.host_string in connections:
del connections[env.host_string]
def _update_package_index():
from fabtools.system import distrib_family
family = distrib_family()
if family == 'debian':
from fabtools.require.deb import uptodate_index
uptodate_index()
| datascopeanalytics/fabtools | fabtools/tests/functional_tests/conftest.py | Python | bsd-2-clause | 3,621 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from resource_management.libraries.functions.default import default
from resource_management import *
import status_params
ibm_distribution_knox_dir = '/usr/iop/current/knox-server'
ibm_distribution_knox_var = '/var'
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version = format_stack_version(stack_version_unformatted)
knox_bin = ibm_distribution_knox_dir + '/bin/gateway.sh'
ldap_bin = ibm_distribution_knox_dir + '/bin/ldap.sh'
knox_client_bin = ibm_distribution_knox_dir + '/bin/knoxcli.sh'
namenode_hosts = default("/clusterHostInfo/namenode_host", None)
if type(namenode_hosts) is list:
namenode_host = namenode_hosts[0]
else:
namenode_host = namenode_hosts
has_namenode = not namenode_host == None
namenode_http_port = "50070"
namenode_rpc_port = "8020"
if has_namenode:
if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
namenode_http_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpc_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.rpc-address'])
rm_hosts = default("/clusterHostInfo/rm_host", None)
if type(rm_hosts) is list:
rm_host = rm_hosts[0]
else:
rm_host = rm_hosts
has_rm = not rm_host == None
jt_rpc_port = "8050"
rm_port = "8080"
if has_rm:
if 'yarn.resourcemanager.address' in config['configurations']['yarn-site']:
jt_rpc_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.address'])
if 'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
hive_http_port = default('/configurations/hive-site/hive.server2.thrift.http.port', "10001")
hive_http_path = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
hive_server_hosts = default("/clusterHostInfo/hive_server_host", None)
if type(hive_server_hosts) is list:
hive_server_host = hive_server_hosts[0]
else:
hive_server_host = hive_server_hosts
templeton_port = default('/configurations/webhcat-site/templeton.port', "50111")
webhcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", None)
if type(webhcat_server_hosts) is list:
webhcat_server_host = webhcat_server_hosts[0]
else:
webhcat_server_host = webhcat_server_hosts
hbase_master_port = default('/configurations/hbase-site/hbase.rest.port', "8080")
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", None)
if type(hbase_master_hosts) is list:
hbase_master_host = hbase_master_hosts[0]
else:
hbase_master_host = hbase_master_hosts
oozie_server_hosts = default("/clusterHostInfo/oozie_server", None)
if type(oozie_server_hosts) is list:
oozie_server_host = oozie_server_hosts[0]
else:
oozie_server_host = oozie_server_hosts
has_oozie = not oozie_server_host == None
oozie_server_port = "11000"
if has_oozie:
if 'oozie.base.url' in config['configurations']['oozie-site']:
oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
# Knox managed properties
knox_managed_pid_symlink= "/usr/iop/current/knox-server/pids"
#
#Hbase master port
#
hbase_master_ui_port = default('/configurations/hbase-site/hbase.master.info.port', "60010");
#Spark
spark_historyserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", None)
if type(spark_historyserver_hosts) is list:
spark_historyserver_host = spark_historyserver_hosts[0]
else:
spark_historyserver_host = spark_historyserver_hosts
spark_historyserver_ui_port = default("/configurations/spark-defaults/spark.history.ui.port", "18080")
# Solr
solr_host=default("/configurations/solr/hostname", None)
solr_port=default("/configuration/solr/solr-env/solr_port","8983")
# JobHistory mapreduce
mr_historyserver_address = default("/configurations/mapred-site/mapreduce.jobhistory.webapp.address", None)
# server configurations
knox_conf_dir = ibm_distribution_knox_dir + '/conf'
knox_data_dir = ibm_distribution_knox_dir + '/data'
knox_logs_dir = ibm_distribution_knox_var + '/log/knox'
knox_pid_dir = status_params.knox_pid_dir
knox_user = default("/configurations/knox-env/knox_user", "knox")
knox_group = default("/configurations/knox-env/knox_group", "knox")
mode = 0644
knox_pid_file = status_params.knox_pid_file
ldap_pid_file = status_params.ldap_pid_file
knox_master_secret = config['configurations']['knox-env']['knox_master_secret']
knox_master_secret_path = ibm_distribution_knox_dir + '/data/security/master'
knox_cert_store_path = ibm_distribution_knox_dir + '/data/security/keystores/gateway.jks'
knox_host_name = config['clusterHostInfo']['knox_gateway_hosts'][0]
knox_host_name_in_cluster = config['hostname']
knox_host_port = config['configurations']['gateway-site']['gateway.port']
topology_template = config['configurations']['topology']['content']
gateway_log4j = config['configurations']['gateway-log4j']['content']
ldap_log4j = config['configurations']['ldap-log4j']['content']
users_ldif = config['configurations']['users-ldif']['content']
java_home = config['hostLevelParams']['java_home']
security_enabled = config['configurations']['cluster-env']['security_enabled']
smokeuser = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
if security_enabled:
knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
_hostname_lowercase = config['hostname'].lower()
knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)
| arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/params.py | Python | apache-2.0 | 7,178 |
# -*- coding: utf-8 -*-
"""
django-twitter
~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
__title__ = 'django-twitter'
__version__ = '0.1.0'
__author__ = 'Antonio Hinojo'
__license__ = 'MIT'
| ahmontero/django-twitter | twitter/__init__.py | Python | mit | 212 |
import collections
from syn.base_utils import rand_dict, get_fullname, tuple_prepend, \
get_typename, escape_for_eval
from .base import Type, serialize, hashable, rstr, estr, SER_KEYS, \
deserialize, safe_sorted, primitive_form, collect
from .numeric import Int
from .sequence import list_enumval
from .set import set_enumval
from .ne import KeyDifferences, DiffersAtKey
from itertools import islice
#-------------------------------------------------------------------------------
# Utilities
def dict_enumval(x, **kwargs):
keys = list(set_enumval(x, **kwargs))
values = list_enumval(x, **kwargs)
N = min(len(keys), len(values))
return dict(zip(keys[:N], values[:N]))
#-------------------------------------------------------------------------------
# Mapping
class Mapping(Type):
type = collections.Mapping
def __init__(self, *args, **kwargs):
super(Mapping, self).__init__(*args, **kwargs)
self.visit_buffer = []
self.visit_iter = iter(self.obj)
def _collect(self, func, **kwargs):
ret = {key: collect(val, func, **kwargs)
for key, val in self.obj.items()}
return func(ret, **kwargs)
@classmethod
def deserialize(cls, dct, **kwargs):
if dct.get(SER_KEYS.is_type, False):
return super(Mapping, cls).deserialize(dct, **kwargs)
for key in SER_KEYS.values():
if key in dct:
del dct[key]
ret = {key: deserialize(value, **kwargs) for key, value in dct.items()}
return cls.type(ret)
@classmethod
def _enumeration_value(cls, x, **kwargs):
return cls.type(dict_enumval(x, **kwargs))
def estr(self, **kwargs):
parts = ["{}: {}".format(estr(key, **kwargs), estr(value, **kwargs))
for key, value in self.obj.items()]
ret = '{' + ', '.join(parts) + '}'
ret = '{}({})'.format(get_typename(self.obj), ret)
return escape_for_eval(ret)
def _find_ne(self, other, func, **kwargs):
for key, value in self.obj.items():
if key not in other:
return KeyDifferences(self.obj, other)
oval = other[key]
if not func(value, oval):
return DiffersAtKey(self.obj, other, key)
return KeyDifferences(self.obj, other)
def _hashable(self, **kwargs):
tup = tuple((hashable(key, **kwargs),
hashable(value, **kwargs))
for key, value in self.obj.items())
return tuple_prepend(get_fullname(self.obj), tup)
def _rstr(self, **kwargs):
# TODO: add pretty option
parts = ["{}: {}".format(rstr(key, **kwargs), rstr(value, **kwargs))
for key, value in self.obj.items()]
ret = '{' + ', '.join(parts) + '}'
return ret
def _serialize(self, dct, **kwargs):
for key, value in self.obj.items():
dct[key] = serialize(value)
def _visit(self, k, **kwargs):
if not self.visit_buffer:
self.visit_buffer = safe_sorted(list(self.obj.items()))
return self.visit_buffer[k]
def _visit_len(self, **kwargs):
return len(self.obj)
#-------------------------------------------------------------------------------
# Mappings
class Dict(Mapping):
type = dict
@classmethod
def _generate(cls, **kwargs):
return rand_dict(**kwargs)
# NOTE: we don't need these right now; when we do, they can be implemented
# class OrderedDict(Dict):
# type = collections.OrderedDict
# gen_type = dict
# class DefaultDict(Dict):
# type = collections.defaultdict
# gen_types = (int, dict)
#-------------------------------------------------------------------------------
# __all__
__all__ = ('Mapping',
'Dict',)
#-------------------------------------------------------------------------------
| mbodenhamer/syn | syn/types/a/mapping.py | Python | mit | 3,893 |
"""
Support for SolarEdge Monitoring API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.solaredge/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY, CONF_MONITORED_CONDITIONS, CONF_NAME, POWER_WATT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['solaredge==0.0.2']
# Config for solaredge monitoring api requests.
CONF_SITE_ID = "site_id"
UPDATE_DELAY = timedelta(minutes=10)
SCAN_INTERVAL = timedelta(minutes=10)
# Supported sensor types:
# Key: ['json_key', 'name', unit, icon]
SENSOR_TYPES = {
'lifetime_energy': ['lifeTimeData', "Lifetime energy", 'Wh',
'mdi:solar-power'],
'energy_this_year': ['lastYearData', "Energy this year", 'Wh',
'mdi:solar-power'],
'energy_this_month': ['lastMonthData', "Energy this month", 'Wh',
'mdi:solar-power'],
'energy_today': ['lastDayData', "Energy today", 'Wh',
'mdi:solar-power'],
'current_power': ['currentPower', "Current Power", POWER_WATT,
'mdi:solar-power']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SITE_ID): cv.string,
vol.Optional(CONF_NAME, default='SolarEdge'): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=['current_power']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)])
})
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the SolarEdge Monitoring API sensor."""
import solaredge
from requests.exceptions import HTTPError, ConnectTimeout
api_key = config[CONF_API_KEY]
site_id = config[CONF_SITE_ID]
platform_name = config[CONF_NAME]
# Create new SolarEdge object to retrieve data
api = solaredge.Solaredge(api_key)
# Check if api can be reached and site is active
try:
response = api.get_details(site_id)
if response['details']['status'].lower() != 'active':
_LOGGER.error("SolarEdge site is not active")
return
_LOGGER.debug("Credentials correct and site is active")
except KeyError:
_LOGGER.error("Missing details data in solaredge response")
return
except (ConnectTimeout, HTTPError):
_LOGGER.error("Could not retrieve details from SolarEdge API")
return
# Create solaredge data service which will retrieve and update the data.
data = SolarEdgeData(hass, api, site_id)
# Create a new sensor for each sensor type.
entities = []
for sensor_key in config[CONF_MONITORED_CONDITIONS]:
sensor = SolarEdgeSensor(platform_name, sensor_key, data)
entities.append(sensor)
add_entities(entities, True)
class SolarEdgeSensor(Entity):
"""Representation of an SolarEdge Monitoring API sensor."""
def __init__(self, platform_name, sensor_key, data):
"""Initialize the sensor."""
self.platform_name = platform_name
self.sensor_key = sensor_key
self.data = data
self._state = None
self._json_key = SENSOR_TYPES[self.sensor_key][0]
self._unit_of_measurement = SENSOR_TYPES[self.sensor_key][2]
@property
def name(self):
"""Return the name."""
return "{} ({})".format(self.platform_name,
SENSOR_TYPES[self.sensor_key][1])
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the sensor icon."""
return SENSOR_TYPES[self.sensor_key][3]
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest data from the sensor and update the state."""
self.data.update()
self._state = self.data.data[self._json_key]
class SolarEdgeData:
"""Get and update the latest data."""
def __init__(self, hass, api, site_id):
"""Initialize the data object."""
self.hass = hass
self.api = api
self.data = {}
self.site_id = site_id
@Throttle(UPDATE_DELAY)
def update(self):
"""Update the data from the SolarEdge Monitoring API."""
from requests.exceptions import HTTPError, ConnectTimeout
try:
data = self.api.get_overview(self.site_id)
overview = data['overview']
except KeyError:
_LOGGER.error("Missing overview data, skipping update")
return
except (ConnectTimeout, HTTPError):
_LOGGER.error("Could not retrieve data, skipping update")
return
self.data = {}
for key, value in overview.items():
if 'energy' in value:
self.data[key] = value['energy']
elif 'power' in value:
self.data[key] = value['power']
_LOGGER.debug("Updated SolarEdge overview data: %s", self.data)
| nugget/home-assistant | homeassistant/components/sensor/solaredge.py | Python | apache-2.0 | 5,332 |
# -*- coding: utf-8 -*-
#
# RedPipe documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 19 13:22:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import os
import sys
from os import path
ROOTDIR = path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, ROOTDIR)
import redpipe # noqa
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RedPipe'
copyright = u'2017, John Loehrer'
author = u'John Loehrer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = redpipe.__version__
# The full version, including alpha/beta/rc tags.
release = redpipe.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'redpipe-logo.gif',
'github_banner': True,
'github_user': '72squared',
'github_repo': 'redpipe',
'travis_button': True,
'analytics_id': 'UA-98626018-1',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RedPipedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RedPipe.tex', u'%s Documentation' % project,
u'John Loehrer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, u'%s Documentation' % project,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'%s Documentation' % project,
author, project, 'making redis pipelines easy in python',
'Miscellaneous'),
]
suppress_warnings = ['image.nonlocal_uri']
| 72squared/redpipe | docs/conf.py | Python | mit | 5,400 |
#
# Copyright (C) 2013 Savoir-Faire Linux Inc.
#
# This file is part of Sageo
#
# Sageo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sageo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sageo. If not, see <http://www.gnu.org/licenses/>
from .filter import Filter
from sqlalchemy import *
from sqlalchemy.orm import *
from flask.ext.wtf import RadioField
from flask.ext.babelex import gettext, ngettext
_ = gettext
class FilterTristate(Filter):
def __init__(self, name, title, descr, default, column):
Filter.__init__(self, name, title, descr)
self.default = default
self.column_names = column
self.form_def = [RadioField(choices=[('1',_(u'Yes')),('0',_(u'No')),('-1',_(u'Ignore'))], default=default)]
def get_col_def(self):
return [Column(self.name, Enum('1', '0', '-1'), default=self.default)]
| smlacombe/sageo | app/model/filters/filter_tristate.py | Python | gpl-3.0 | 1,339 |
# coding: utf8
import asyncio
import logging
from mtypes import Document as _Document
from .utils import to_snake_case
from .fields import Field
from .hooks import (validate_columns_before_save,
log_modified_after_save)
from .errors import DocumentNotFound
class DocumentMetaClass(type):
def __new__(cls, name, bases, attrs):
if name == 'Document':
return type.__new__(cls, name, bases, attrs)
fields = []
mapping = {}
indexes = set()
for n, attr in attrs.items():
if isinstance(attr, Field):
mapping[n] = attr
fields.append(n)
if attr.index:
indexes.add((n, attr.index))
for n in mapping:
attrs.pop(n)
attrs['__mapping__'] = mapping
attrs['__indexes__'] = indexes
return type.__new__(cls, name, bases, attrs)
class Document(_Document, metaclass=DocumentMetaClass):
__logger__ = logging.getLogger(__name__)
__connection__ = None
__collection__ = None
__after_save_hooks__ = [(log_modified_after_save, )]
__before_save_hooks__ = [(validate_columns_before_save, )]
__after_find_hooks__ = []
__before_find_hooks__ = []
__after_update_hooks__ = []
__before_update_hooks__ = []
__loop__ = None
__collection_name__ = None
def __new__(cls, *args, **kwargs):
if cls.__collection__:
return super().__new__(cls, *args, **kwargs)
connection = cls.__connection__
# database_name = connection.get_default_database().name
collection_name = cls.__collection_name__ or to_snake_case(cls.__name__)
collection = connection \
.get_default_database() \
.get_collection(collection_name)
codec_options = collection \
.codec_options \
._replace(document_class=cls)
cls.__collection__ = collection \
.with_options(codec_options=codec_options)
cls.__database_name__ = cls.__collection__.database.name
cls.__collection_name__ = collection_name
return super().__new__(cls, *args, **kwargs)
@classmethod
def set_event_loop(cls, loop):
cls.__loop__ = loop
@classmethod
def set_connection(cls, connection, loop=None):
cls.set_event_loop(loop or asyncio.get_event_loop())
cls.__connection__ = connection
@classmethod
def set_application(cls, app):
cls.__app__ = app
@app.listener('before_server_start')
def before_server_start(app, loop):
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
MONGO_URI = app.config.get('MONGO')
cls.set_connection(MongoClient(MONGO_URI, io_loop=loop), loop)
@app.listener('before_server_stop')
def before_server_stop(app, loop):
cls.__loop__ = None
cls.__app__ = None
cls.__connection__ = None
@classmethod
async def find_one(cls, *args, **kwargs):
m = cls()
await m._apply_before_hooks('find', *args, **kwargs)
result = await cls.__collection__.find_one(*args, **kwargs)
m._apply_after_hooks('find', result, *args, **kwargs)
return result
async def find_one(self, *args, **kwargs): # noqa
query, *args = args or (None, None)
with_filter = True
if not query:
query = self
with_filter = False
await self._apply_before_hooks('find', query, *args, **kwargs)
result = await self.__collection__.find_one(query, *args, **kwargs)
self._apply_after_hooks('find', result, query, *args, **kwargs)
if not result:
raise DocumentNotFound(query)
if with_filter:
return result
self.update(result)
query = find_one
@classmethod
async def find(cls, *args, **kwargs):
await cls._apply_before_hooks('find', *args, **kwargs)
result = cls.__collection__.find(*args, **kwargs)
cls._apply_after_hooks('find', result, *args, **kwargs)
return result
@classmethod
async def update_one(cls, *args, **kwargs):
query, *args = args or ({}, ())
m = cls(query)
await m._apply_before_hooks('update', m, args, kwargs)
result = await m.__collection__.update_one(m, *args, **kwargs)
if result.modified_count == 1 and result.upserted_id:
doc = await m.__collection__ \
.find_one({'_id': result.upserted_id})
else:
doc = await m.__collection__ \
.find_one(m)
m.update(doc)
# TODO: if query is modified by upset, doc will be empty
return m
async def update_one(self, *args, **kwargs): # noqa
await self._apply_before_hooks('update', self, args, kwargs)
result = await self.__collection__.update_one(self, *args, **kwargs)
self._apply_after_hooks('update', result, self)
return result
async def save(self):
await self._apply_before_hooks('save', self)
if not self.get('_id'):
insert_result = await self.__collection__.insert_one(self)
self._id = insert_result.inserted_id
else:
await self.__collection__.update_one(
{'_id': self._id},
{'$set': self}
)
self._apply_after_hooks('save', self)
return self
@classmethod
def register_before_hook(cls, act, *funcs):
return getattr(cls, '__before_{act}_hooks__'.format(act=act)) \
.extend(funcs)
@classmethod
def register_after_hook(cls, act, *funcs):
return getattr(cls, '__after_{act}_hooks__'.format(act=act)) \
.extend(funcs)
@classmethod
def before_save_hook(cls, *funcs):
return cls.register_before_hook('save', *funcs)
@classmethod
def after_save_hook(cls, *funcs):
return cls.register_after_hook('save', *funcs)
@classmethod
def before_update_hook(cls, *funcs):
return cls.register_before_hook('update', *funcs)
@classmethod
def after_update_hook(cls, *funcs):
return cls.register_after_hook('update', *funcs)
@classmethod
def before_find_hook(cls, *funcs):
return cls.register_before_hook('find', *funcs)
@classmethod
def after_find_hook(cls, *funcs):
return cls.register_after_hook('find', *funcs)
@classmethod
def _apply_after_hooks(
cls,
act,
result,
*query_args,
**query_kwargs
):
for hook, *hook_args in getattr(cls, '__after_{}_hooks__'.format(act)):
if asyncio.iscoroutinefunction(hook):
future = asyncio.ensure_future(
hook(hook_args, query_args, query_kwargs)
)
future.add_done_callback(cls._done)
else:
cls.__loop__ \
.call_soon_threadsafe(
hook,
result,
hook_args,
query_args,
query_kwargs
)
return True
@classmethod
async def _apply_before_hooks(cls, act, *query_args, **query_kwargs):
for hook, *args in getattr(cls, '__before_{}_hooks__'.format(act)):
if asyncio.iscoroutinefunction(hook):
await hook(args, query_args, query_kwargs)
else:
hook(args, query_args, query_kwargs)
return True
| ioimop/mMongo | mmongo/document.py | Python | gpl-3.0 | 7,617 |
"""Build an Apache Beam pipeline for keras inference to BigQuery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tempfile
import apache_beam as beam
from apache_beam.io.gcp.bigquery import BigQueryDisposition
from apache_beam.io.gcp.bigquery import WriteToBigQuery
from apache_beam.io.tfrecordio import ReadFromTFRecord
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from datathon_etl_pipelines.utils import get_setup_file
import numpy as np
import tensorflow as tf
def default_normalize(image):
return (1.0 / 255.0) * tf.cast(image, tf.float32)
class ExampleWithImageBytesToInput(object):
"""Convert a TFExample into an image to input into Model.
Args:
image_format (Union['jpg', 'png']): the format of the images to decode.
image_process_fn (Callable[[tf.Tensor], tf.Tensor]): A function to apply to
the decoded image. Defaults to dividing by 255.0 to normalize the pixel
intensities to be in [0, 1].
feature_name (Optional[str]): the name of the feature that contains the JPG
or PNG bytes to decode. Defaults to (jpg|png)_bytes, according to
image_format.
"""
def __init__(self,
image_format,
image_process_fn=default_normalize,
feature_name=None):
if image_format == 'jpeg':
image_format = 'jpg'
if image_format not in ('jpg', 'png'):
raise ValueError('Unrecognized image format ' + image_format)
if feature_name is None:
self.feature_name = '{}_bytes'.format(image_format)
else:
self.feature_name = feature_name
self.image_format = image_format
self._image_process_fn = image_process_fn
self._session = None
self._input_bytes_tensor = None
self._image = None
self.initialized = False
def initialize(self):
"""Initialize the tensorflow graph and session on this worker."""
self._input_bytes_tensor = tf.placeholder(tf.string, [])
if self.image_format == 'jpg':
decode_fn = tf.image.decode_jpeg
elif self.image_format == 'png':
decode_fn = tf.image.decode_png
else:
raise ValueError('Unrecognized image format ' + self.image_format)
self._image = self._image_process_fn(decode_fn(self._input_bytes_tensor))
self._session = tf.Session()
self.initialized = True
def __call__(self, example):
"""Convert a TFExample into a array of pixel intensities.
Args:
example (tf.train.Example): A TFExample with a 'jpg_bytes' string feature.
Returns:
np.array: a HWC array of the image, with pixel intensities in [0, 1].
"""
if not self.initialized:
# Initialize non-serializable data once on each worker
self.initialize()
image = self._session.run(
self._image, {
self._input_bytes_tensor:
example.features.feature[self.feature_name].bytes_list.value[0]
})
return image
class Predict(beam.DoFn):
"""Perform inference on a single example.
Args:
keras_model_uri (str): The GCS uri, beginning with gs://, or a local path.
This specifies the saved Keras model to use for inference. This model is
created with `tf.keras.models.Model.save`.
example_to_row (Callable[[tf.train.Example], Dict[str, Any]]): A function
that maps the input tf.train.Example to a dictionary of column names and
values, which identify each instance.
example_to_input(Callable[[tf.train.Example], np.array]): A function that
maps the input tf.train.Example to a numpy array. This array is the input
for the model. The batch dimension should not be included.
output_to_row(Callable[[tf.train.Example], Dict[str, Any]]): A function that
maps the output of the model to a dictionary of column names and values.
This is combined with the ids to form the output of this DoFn.
Returns:
Dict[str, Any]: A dictionary identifiers and output labels. Maps column
names to column values.
"""
def __init__(self, keras_model_uri, example_to_row, example_to_input,
output_to_row):
super(Predict, self).__init__()
self.keras_model_uri = keras_model_uri
self._example_to_row = example_to_row
self._example_to_input = example_to_input
self._output_to_row = output_to_row
self.initialized = False
self._model = None
def initialize(self):
"""Initialize the keras model on this worker."""
# Download the model from GCS. If the model is a local file, this also
# doesn't hurt.
with tempfile.NamedTemporaryFile('wb') as local_model_file:
with tf.io.gfile.GFile(self.keras_model_uri, 'rb') as gcs_model_file:
local_model_file.write(gcs_model_file.read())
local_model_file.flush()
# Keras models don't need to be compiled for inference.
self._model = tf.keras.models.load_model(
local_model_file.name, compile=False)
self.initialized = True
def process(self, element):
"""Overrides beam.DoFn.process.
Args:
element (tf.train.Example): The instance to run inference on.
Yields:
Dict[str, Any]: A column-name -> value dictionary, with ids and prediction
results.
"""
if not self.initialized:
self.initialize()
output_row = self._example_to_row(element)
input_array = np.expand_dims(self._example_to_input(element), axis=0)
output_array = self._model.predict_on_batch(input_array)
# remove batch dimension before passing to self._output_to_row
output_row.update(self._output_to_row(output_array[0, ...]))
yield output_row
def build_and_run_pipeline(pipeline_options, tfrecord_pattern, predict_dofn,
output_bq_table, bq_table_schema):
"""Build and run a Keras batch inference pipeline to BigQuery pipeline.
Args:
pipeline_options (beam.options.pipeline_options import PipelineOptions):
Commandline arguments for this pipeline.
tfrecord_pattern (str): A file glob pattern to read TFRecords from.
predict_dofn (beam.DoFn): A DoFn that transforms TFExamples into
dictionaries describing BigQuery rows.
output_bq_table (str): A string of the form `project:dataset.table_name`.
This table will be overwritten if it already exists.
bq_table_schema (Union[str, TableSchema]): A BigQuery schema in the format
used by `apache_beam.io.gcp.bigquery.WriteToBigQuery`.
"""
with beam.Pipeline(options=pipeline_options) as p:
_ = (
p
| ReadFromTFRecord(
tfrecord_pattern, coder=beam.coders.ProtoCoder(tf.train.Example))
| beam.ParDo(predict_dofn)
| WriteToBigQuery(
table=output_bq_table,
schema=bq_table_schema,
write_disposition=BigQueryDisposition.WRITE_TRUNCATE))
def get_commandline_args(description):
"""Generate command line arguments used by inference to BigQuery scripts.
Args:
description (str): The description of the script, which will appear at the
top of the --help documentation.
Returns:
Tuple[Namespace, PipelineOptions]: 1) The commandline options with fields
`input_tfrecord_pattern`, `keras_model`, and `bigquery_table` 2) The
apache beam pipeline options.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'--input_tfrecord_pattern',
type=str,
help='A file glob pattern that specifies the TFRecord files to read from.'
)
parser.add_argument(
'--keras_model',
type=str,
help='The GCS uri, beginning with gs://, or a local path. This specifies '
'the saved Keras model to use for inference. This model is created '
'with `tf.keras.models.Model.save`.')
parser.add_argument(
'--bigquery_table',
type=str,
help='The table to store the labelled predictions in. This is a string '
'of the form `project:dataset.table_name`. This table will be '
'overwritten if it already exists.')
args, pipeline_args = parser.parse_known_args()
beam_options = PipelineOptions(pipeline_args)
beam_options.view_as(SetupOptions).save_main_session = True
beam_options.view_as(SetupOptions).setup_file = get_setup_file()
return args, beam_options
| GoogleCloudPlatform/healthcare | datathon/datathon_etl_pipelines/generic_imaging/inference_to_bigquery.py | Python | apache-2.0 | 8,351 |
from django.template.defaultfilters import get_digit
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_values(self):
self.assertEqual(get_digit(123, 1), 3)
self.assertEqual(get_digit(123, 2), 2)
self.assertEqual(get_digit(123, 3), 1)
self.assertEqual(get_digit(123, 4), 0)
self.assertEqual(get_digit(123, 0), 123)
def test_string(self):
self.assertEqual(get_digit('xyz', 0), 'xyz')
| DONIKAN/django | tests/template_tests/filter_tests/test_get_digit.py | Python | bsd-3-clause | 477 |
class GridType:
_type = None
def __eq__(self, that):
return isinstance(that, self.__class__) or str(self) == str(that)
def __str__(self):
return self._type
def __repr__(self):
return "%s()" % self.__class__.__name__
class GridTypeRectilinear(GridType):
_type = "rectilinear"
class GridTypeStructured(GridType):
_type = "structured"
class GridTypeUnstructured(GridType):
_type = "unstructured"
| csdms/pymt | pymt/grids/grid_type.py | Python | mit | 454 |
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2014, The Open Aggregator
# GNU General Public License, Ver. 3 (see docs/license.txt)
################################################################################
"""Probability Features File
The probability features file has the following format::
dpc1,<p-header-1>,<p-header-2>,...
<x-value-1>,g_1(y | x_1),g_2(y | x_1),...
<x-value-2>,g_1(y | x_2),g_2(y | x_2),...
...
``<p-header>`` headers can be any of the following, with the
corresponding values in their rows (``<p-value-ij>``).
* ``mean``: E y|x_i
* ``var``: E (y|x_i - E y|x_i)^2
* ``sdev``: \sqrt{E (y|x_i - E y|x_i)^2}
* ``skew``: E ((y|x_i - E y|x_i) / sqrt{E (y|x_i - E y|x_i)^2})^3
* ``mode``: \max f(y | x_i)
* numeric (0 - 1): F^{-1}(p_j|x_i)
The row headers (``<x-value>``) can be numeric, in which case a
continuous spline bridges them, or categorical strings.
Below is a sample features file::
dpc1,mean,var
treated,0,1
control,4,4
"""
__copyright__ = "Copyright 2014, The Open Aggregator"
__license__ = "GPL"
__author__ = "James Rising"
__credits__ = ["James Rising", "Solomon Hsiang", "Bob Kopp"]
__maintainer__ = "James Rising"
__email__ = "jar2234@columbia.edu"
__status__ = "Production"
__version__ = "$Revision$"
# $Source$
import csv, math, random, copy
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.stats import norm, expon
from scipy.optimize import brentq, minimize
from spline_model import SplineModel, SplineModelConditional
class FeaturesInterpreter:
@staticmethod
def init_from_feature_file(spline, file, delimiter, limits, status_callback=None):
reader = csv.reader(file, delimiter=delimiter)
header = reader.next()
if header[0] != "dpc1":
raise ValueError("Unknown format for %s" % (header[0]))
spline.scaled = True
while header[-1] == '':
header = header[:-1]
# Convert header p-values into numerics
for ii in range(1, len(header)):
try:
val = float(header[ii])
header[ii] = val
except ValueError:
pass
spline.xx = []
spline.xx_text = []
spline.xx_is_categorical = False
spline.conditionals = []
last_row = None
last_conditional = None
for row in reader:
if last_row and last_row[1:] == row[1:]:
conditional = last_conditional.copy()
else:
conditional = FeaturesInterpreter.make_conditional_respecting(header, row, limits)
last_row = row
last_conditional = conditional
spline.add_conditional(row[0], conditional)
if status_callback:
status_callback("Parsing...", reader.line_num / (reader.line_num + 3.0))
return spline
# limits is tuple of (low, high)
@staticmethod
def features_to_gaussian(header, row, limits):
# Does this look like a mean-variance feature file?
if len(header) == 3:
mean = None
if 'mean' in header:
mean = float(row[header.index('mean')])
if 'mode' in header:
mean = float(row[header.index('mode')])
if .5 in header:
mean = float(row[header.index(.5)])
if mean is None:
return None
if 'var' in header:
var = float(row[header.index('var')])
elif 'sdev' in header:
var = float(row[header.index('sdev')]) * float(row[header.index('sdev')])
else:
return None
if np.isnan(var) or var == 0:
return SplineModelConditional.make_single(mean, mean, [])
# This might be uniform
if mean - 2*var < limits[0] or mean + 2*var > limits[1]:
return None
return SplineModelConditional.make_gaussian(limits[0], limits[1], mean, var)
elif len(header) == 4:
# Does this look like a mean and evenly spaced p-values?
header = header[1:] # Make a copy of the list
row = row[1:]
mean = None
if 'mean' in header:
mean = float(row.pop(header.index('mean')))
header.remove('mean')
elif 'mode' in header:
mean = float(row.pop(header.index('mode')))
header.remove('mode')
elif .5 in header:
mean = float(row.pop(header.index(.5)))
header.remove(.5)
else:
return None
# Check that the two other values are evenly spaced p-values
row = map(float, row[0:2])
if np.all(np.isnan(row)):
return SplineModelConditional.make_single(mean, mean, [])
if header[1] == 1 - header[0] and abs(row[1] - mean - (mean - row[0])) < abs(row[1] - row[0]) / 100.0:
print "HERE"
lowp = min(header)
lowv = np.array(row)[np.array(header) == lowp][0]
if lowv == mean:
return SplineModelConditional.make_single(mean, mean, [])
lowerbound = 1e-4 * (mean - lowv)
upperbound = np.sqrt((mean - lowv) / lowp)
sdev = brentq(lambda sdev: norm.cdf(lowv, mean, sdev) - lowp, lowerbound, upperbound)
if float(limits[0]) < mean - 3*sdev and float(limits[1]) > mean + 3*sdev:
return SplineModelConditional.make_gaussian(limits[0], limits[1], mean, sdev*sdev)
else:
return None
else:
# Heuristic best curve: known tails, fit to mean
lowp = min(header)
lowv = np.array(row)[np.array(header) == lowp][0]
lowerbound = 1e-4 * (mean - lowv)
upperbound = np.log((mean - lowv) / lowp)
low_sdev = brentq(lambda sdev: norm.cdf(lowv, mean, sdev) - lowp, lowerbound, upperbound)
if float(limits[0]) > mean - 3*low_sdev:
return None
low_segment = SplineModelConditional.make_gaussian(float(limits[0]), lowv, mean, low_sdev*low_sdev)
highp = max(header)
highv = np.array(row)[np.array(header) == highp][0]
lowerbound = 1e-4 * (highv - mean)
upperbound = np.log((highv - mean) / (1 - highp))
high_scale = brentq(lambda scale: .5 + expon.cdf(highv, mean, scale) / 2 - highp, lowerbound, upperbound)
if float(limits[1]) < mean + 3*high_scale:
return None
# Construct exponential, starting at mean, with full cdf of .5
high_segment = SplineModelConditional.make_single(highv, float(limits[1]), [np.log(1/high_scale) + np.log(.5) + mean / high_scale, -1 / high_scale])
sevenys = np.linspace(lowv, highv, 7)
ys = np.append(sevenys[0:2], [mean, sevenys[-2], sevenys[-1]])
lps0 = norm.logpdf(ys[0:2], mean, low_sdev)
lps1 = expon.logpdf([ys[-2], ys[-1]], mean, high_scale) + np.log(.5)
#bounds = [norm.logpdf(mean, mean, low_sdev), norm.logpdf(mean, mean, high_sdev)]
result = minimize(lambda lpmean: FeaturesInterpreter.skew_gaussian_evaluate(ys, np.append(np.append(lps0, [lpmean]), lps1), low_segment, high_segment, mean, lowp, highp), .5, method='Nelder-Mead')
print "Skew Gaussian"
print np.append(np.append(lps0, result.x), lps1)
return FeaturesInterpreter.skew_gaussian_construct(ys, np.append(np.append(lps0, result.x), lps1), low_segment, high_segment)
@staticmethod
def skew_gaussian_construct(ys, lps, low_segment, high_segment):
mid_segment = SplineModelConditional.make_conditional_from_spline(InterpolatedUnivariateSpline(ys, lps, k=2), (low_segment.y1s[0], high_segment.y0s[0]))
conditional = SplineModelConditional()
conditional.add_segment(low_segment.y0s[0], low_segment.y1s[0], copy.copy(low_segment.coeffs[0]))
for ii in range(mid_segment.size()):
conditional.add_segment(mid_segment.y0s[ii], mid_segment.y1s[ii], mid_segment.coeffs[ii])
conditional.add_segment(high_segment.y0s[0], high_segment.y1s[0], copy.copy(high_segment.coeffs[0]))
try:
conditional.rescale()
except:
return None
return conditional
@staticmethod
def skew_gaussian_evaluate(ys, lps, low_segment, high_segment, mean, lowp, highp):
conditional = FeaturesInterpreter.skew_gaussian_construct(ys, lps, low_segment, high_segment)
if conditional is None:
return SplineModel.posinf
error = 0
# Discontinuities:
error += np.square(conditional.evaluate(0, low_segment.y1s[0]) - conditional.evaluate(1, low_segment.y1s[0]))
error += np.square(conditional.evaluate(conditional.size() - 2, high_segment.y0s[0]) - conditional.evaluate(conditional.size() - 1, high_segment.y0s[0]))
# Mean:
error += np.square(mean - conditional.approximate_mean((low_segment.y0s[0], high_segment.y1s[0])))
# lps
error += np.square(conditional.cdf(low_segment.y1s[0]) - lowp)
error += np.square(conditional.cdf(high_segment.y0s[0]) - highp)
return error
@staticmethod
def features_to_exponential(header, row, limits):
if len(header) != 2:
return None
if 'mean' not in header:
return None
mean = float(row[header.index('mean')])
# Is it one-sided?
if mean > limits[0] and limits[0] + (mean - limits[0]) * 3 < limits[1]:
# positive exponential
return SplineModelConditional.make_single(limits[0], limits[1], [limits[0] / (mean - limits[0]), -1/(mean - limits[0])]).rescale()
if mean < limits[1] or limits[1] - (limits[1] - mean) * 3 > limits[0]:
# negative exponential
return SplineModelConditional.make_single(limits[0], limits[1], [-limits[1] / (limits[1] - mean), 1/(limits[1] - mean)]).rescale()
else:
return None
@staticmethod
def features_to_uniform(header, row, limits):
if len(header) != 1:
return None
return SplineModelConditional.make_single(limits[0], limits[1], [1/(limits[1] - limits[0])])
# Only for scaled distributions
@staticmethod
def make_conditional_respecting(header, row, limits):
low = high = None
if 0 in header:
low = float(row[header.index(0)])
header.remove(header.index(0))
if 1 in header:
high = float(row[header.index(1)])
header.remove(header.index(1))
if low is not None and high is not None:
model = FeaturesInterpreter.make_conditional(header, row, (low, high))
model.add_segment(limits[0], low, [SplineModel.neginf])
model.add_segment(high, limits[1], [SplineModel.neginf])
return model
if low is not None:
model = FeaturesInterpreter.make_conditional(header, row, (low, limits[1]))
model.add_segment(limits[0], low, [SplineModel.neginf])
return model
if high is not None:
model = FeaturesInterpreter.make_conditional(header, row, (limits[0], high))
model.add_segment(high, limits[1], [SplineModel.neginf])
return model
return FeaturesInterpreter.make_conditional(header, row, limits)
# Only for scaled distributions
@staticmethod
def make_conditional(header, row, limits):
# Look for a special case
conditional = FeaturesInterpreter.features_to_gaussian(header, row, limits)
if conditional is not None:
return conditional
conditional = FeaturesInterpreter.features_to_exponential(header, row, limits)
if conditional is not None:
return conditional
conditional = FeaturesInterpreter.features_to_uniform(header, row, limits)
if conditional is not None:
return conditional
spline = FeaturesInterpreter.best_spline(header, row, limits)
conditional = SplineModelConditional.make_conditional_from_spline(spline, limits)
return conditional.rescale()
@staticmethod
def best_knot(knots, newknots):
"""Find the knot furthest from existing knots"""
if len(knots) == 0:
return newknots[0]
scores = np.zeros(len(newknots))
for ii in range(len(newknots)):
scores[ii] = min(abs(np.array(knots) - newknots[0]))
return newknots[np.argmax(scores)]
@staticmethod
def best_spline(header, row, limits):
print "Best Spline"
# Do general solution
best_ys = []
best_lps = []
for ii in range(1, len(header)):
if isinstance(header[ii], float):
best_ys.append(float(row[ii]))
best_lps.append(-6*abs(.5 - header[ii]))
elif header[ii] == 'mode':
best_ys.append(float(row[ii]))
best_lps.append(0)
elif header[ii] == 'var':
if 'mean' in header:
best_ys.append(FeaturesInterpreter.best_knot(best_ys, [float(row[header.index('mean')]) + math.sqrt(float(row[ii])),
float(row[header.index('mean')]) - math.sqrt(float(row[ii]))]))
else:
best_ys.append(FeaturesInterpreter.best_knot(best_ys, [math.sqrt(float(row[ii])),
-math.sqrt(float(row[ii]))]))
best_lps.append(-1)
elif header[ii] == 'skew':
if 'var' in header and 'mean' in header:
best_ys.append(FeaturesInterpreter.best_knot(best_ys, [float(row[header.index('mean')]) +
math.sqrt(float(row[header.index('var')])) * math.pow(float(row[ii]), 1.0/3)]))
else:
best_ys.append(FeaturesInterpreter.best_knot(best_ys, [math.pow(float(row[ii]), 1.0/3)]))
best_lps.append(-1.5)
elif header[ii] == 'mean' and 'var' not in header and 'skew' not in header:
best_ys.append(FeaturesInterpreter.best_knot(best_ys, [float(row[ii])]))
best_lps.append(0)
indexes = [elt[0] for elt in sorted(enumerate(best_ys), key=lambda elt: elt[1])]
best_ys = [best_ys[index] for index in indexes]
best_lps = [best_lps[index] for index in indexes]
best_spline = InterpolatedUnivariateSpline(best_ys, best_lps, k=2)
best_error = FeaturesInterpreter.evaluate_spline(header, row, best_spline, limits)
# Search space for spline that fits criteria
print "Searching..."
for attempt in range(100):
ys = best_ys + np.random.normal(0, max(best_ys) - min(best_ys), len(best_ys))
lps = best_lps + np.random.normal(0, max(best_lps) - min(best_lps) + 1, len(best_lps))
ys = ys.tolist()
lps = lps.tolist()
# I use 1 - random() here because random() range is [0, 1).
if limits[0] == SplineModel.neginf:
ys.insert(0, ys[0] - 1/(1 - random.random()))
lps.insert(0, -7)
elif limits[0] < ys[0]:
ys.insert(0, limits[0])
lps.insert(0, -6 - 1/(1 - random.random()))
if limits[1] == SplineModel.posinf:
ys.insert(0, ys[-1] + 1/(1 - random.random()))
lps.insert(0, -7)
elif limits[1] > ys[-1]:
ys.insert(0, limits[1])
lps.insert(0, -6 - 1/(1 - random.random()))
spline = InterpolatedUnivariateSpline(ys, lps, k=2)
error = FeaturesInterpreter.evaluate_spline(header, row, spline, limits)
if error < best_error:
best_spline = spline
best_error = error
return best_spline
@staticmethod
def evaluate_spline(header, row, spline, limits):
limits = (max(min(spline.get_knots()), float(limits[0])), min(max(spline.get_knots()), float(limits[1])))
ys = np.linspace(limits[0], limits[1], len(header) * SplineModel.samples)
ps = np.exp(spline(ys)) * (limits[1] - limits[0]) / (len(header) * SplineModel.samples)
ps = ps / sum(ps)
cfs = np.cumsum(ps)
if 'mean' in header or 'var' in header or 'skew' in header:
mean = sum(ps * ys)
if 'var' in header or 'skew' in header:
var = sum(ps * np.square(ys - mean))
error = 0
for ii in range(1, len(header)):
if isinstance(header[ii], float):
error = error + np.abs(SplineModelConditional.find_nearest(cfs, header[ii], ys) - float(row[ii]))
elif header[ii] == 'mean':
error = error + np.abs(mean - float(row[ii]))
elif header[ii] == 'mode':
mode = ys[ps.argmax()]
error = error + np.abs(mode - float(row[ii]))
elif header[ii] == 'var':
error = error + np.sqrt(np.abs(var - float(row[ii])))
elif header[ii] == 'skew':
skew = sum(ps * np.pow((ys - mean) / sqrt(var), 3))
error = error + np.pow(np.abs(skew - float(row[ii])), 1.0/3)
return error
| jrising/open-estimate | models/features_interpreter.py | Python | gpl-3.0 | 17,777 |
#
#
# Copyright (C) 2008, 2009, 2010 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base classes for worker pools.
"""
import logging
import threading
import heapq
import itertools
from ganeti import compat
from ganeti import errors
_TERMINATE = object()
_DEFAULT_PRIORITY = 0
class DeferTask(Exception):
"""Special exception class to defer a task.
This class can be raised by L{BaseWorker.RunTask} to defer the execution of a
task. Optionally, the priority of the task can be changed.
"""
def __init__(self, priority=None):
"""Initializes this class.
@type priority: number
@param priority: New task priority (None means no change)
"""
Exception.__init__(self)
self.priority = priority
class NoSuchTask(Exception):
"""Exception raised when a task can't be found.
"""
class BaseWorker(threading.Thread, object):
"""Base worker class for worker pools.
Users of a worker pool must override RunTask in a subclass.
"""
# pylint: disable=W0212
def __init__(self, pool, worker_id):
"""Constructor for BaseWorker thread.
@param pool: the parent worker pool
@param worker_id: identifier for this worker
"""
super(BaseWorker, self).__init__(name=worker_id)
self.pool = pool
self._worker_id = worker_id
self._current_task = None
assert self.getName() == worker_id
def ShouldTerminate(self):
"""Returns whether this worker should terminate.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
return self.pool._ShouldWorkerTerminateUnlocked(self)
finally:
self.pool._lock.release()
def GetCurrentPriority(self):
"""Returns the priority of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(priority, _, _, _) = self._current_task
return priority
finally:
self.pool._lock.release()
def SetTaskName(self, taskname):
"""Sets the name of the current task.
Should only be called from within L{RunTask}.
@type taskname: string
@param taskname: Task's name
"""
if taskname:
name = "%s/%s" % (self._worker_id, taskname)
else:
name = self._worker_id
# Set thread name
self.setName(name)
def _HasRunningTaskUnlocked(self):
"""Returns whether this worker is currently running a task.
"""
return (self._current_task is not None)
def _GetCurrentOrderAndTaskId(self):
"""Returns the order and task ID of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(_, order_id, task_id, _) = self._current_task
return (order_id, task_id)
finally:
self.pool._lock.release()
def run(self):
"""Main thread function.
Waits for new tasks to show up in the queue.
"""
pool = self.pool
while True:
assert self._current_task is None
defer = None
try:
# Wait on lock to be told either to terminate or to do a task
pool._lock.acquire()
try:
task = pool._WaitForTaskUnlocked(self)
if task is _TERMINATE:
# Told to terminate
break
if task is None:
# Spurious notification, ignore
continue
self._current_task = task
# No longer needed, dispose of reference
del task
assert self._HasRunningTaskUnlocked()
finally:
pool._lock.release()
(priority, _, _, args) = self._current_task
try:
# Run the actual task
assert defer is None
logging.debug("Starting task %r, priority %s", args, priority)
assert self.getName() == self._worker_id
try:
self.RunTask(*args) # pylint: disable=W0142
finally:
self.SetTaskName(None)
logging.debug("Done with task %r, priority %s", args, priority)
except DeferTask, err:
defer = err
if defer.priority is None:
# Use same priority
defer.priority = priority
logging.debug("Deferring task %r, new priority %s",
args, defer.priority)
assert self._HasRunningTaskUnlocked()
except: # pylint: disable=W0702
logging.exception("Caught unhandled exception")
assert self._HasRunningTaskUnlocked()
finally:
# Notify pool
pool._lock.acquire()
try:
if defer:
assert self._current_task
# Schedule again for later run
(_, _, task_id, args) = self._current_task
pool._AddTaskUnlocked(args, defer.priority, task_id)
if self._current_task:
self._current_task = None
pool._worker_to_pool.notifyAll()
finally:
pool._lock.release()
assert not self._HasRunningTaskUnlocked()
logging.debug("Terminates")
def RunTask(self, *args):
"""Function called to start a task.
This needs to be implemented by child classes.
"""
raise NotImplementedError()
class WorkerPool(object):
"""Worker pool with a queue.
This class is thread-safe.
Tasks are guaranteed to be started in the order in which they're
added to the pool. Due to the nature of threading, they're not
guaranteed to finish in the same order.
@type _tasks: list of tuples
@ivar _tasks: Each tuple has the format (priority, order ID, task ID,
arguments). Priority and order ID are numeric and essentially control the
sort order. The order ID is an increasing number denoting the order in
which tasks are added to the queue. The task ID is controlled by user of
workerpool, see L{AddTask} for details. The task arguments are C{None} for
abandoned tasks, otherwise a sequence of arguments to be passed to
L{BaseWorker.RunTask}). The list must fulfill the heap property (for use by
the C{heapq} module).
@type _taskdata: dict; (task IDs as keys, tuples as values)
@ivar _taskdata: Mapping from task IDs to entries in L{_tasks}
"""
def __init__(self, name, num_workers, worker_class):
"""Constructor for worker pool.
@param num_workers: number of workers to be started
(dynamic resizing is not yet implemented)
@param worker_class: the class to be instantiated for workers;
should derive from L{BaseWorker}
"""
# Some of these variables are accessed by BaseWorker
self._lock = threading.Lock()
self._pool_to_pool = threading.Condition(self._lock)
self._pool_to_worker = threading.Condition(self._lock)
self._worker_to_pool = threading.Condition(self._lock)
self._worker_class = worker_class
self._name = name
self._last_worker_id = 0
self._workers = []
self._quiescing = False
self._active = True
# Terminating workers
self._termworkers = []
# Queued tasks
self._counter = itertools.count()
self._tasks = []
self._taskdata = {}
# Start workers
self.Resize(num_workers)
# TODO: Implement dynamic resizing?
def _WaitWhileQuiescingUnlocked(self):
"""Wait until the worker pool has finished quiescing.
"""
while self._quiescing:
self._pool_to_pool.wait()
def _AddTaskUnlocked(self, args, priority, task_id):
"""Adds a task to the internal queue.
@type args: sequence
@param args: Arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
"""
assert isinstance(args, (tuple, list)), "Arguments must be a sequence"
assert isinstance(priority, (int, long)), "Priority must be numeric"
assert task_id is None or isinstance(task_id, (int, long)), \
"Task ID must be numeric or None"
task = [priority, self._counter.next(), task_id, args]
if task_id is not None:
assert task_id not in self._taskdata
# Keep a reference to change priority later if necessary
self._taskdata[task_id] = task
# A counter is used to ensure elements are processed in their incoming
# order. For processing they're sorted by priority and then counter.
heapq.heappush(self._tasks, task)
# Notify a waiting worker
self._pool_to_worker.notify()
def AddTask(self, args, priority=_DEFAULT_PRIORITY, task_id=None):
"""Adds a task to the queue.
@type args: sequence
@param args: arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
@note: The task ID can be essentially anything that can be used as a
dictionary key. Callers, however, must ensure a task ID is unique while a
task is in the pool or while it might return to the pool due to deferring
using L{DeferTask}.
"""
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
self._AddTaskUnlocked(args, priority, task_id)
finally:
self._lock.release()
def AddManyTasks(self, tasks, priority=_DEFAULT_PRIORITY, task_id=None):
"""Add a list of tasks to the queue.
@type tasks: list of tuples
@param tasks: list of args passed to L{BaseWorker.RunTask}
@type priority: number or list of numbers
@param priority: Priority for all added tasks or a list with the priority
for each task
@type task_id: list
@param task_id: List with the ID for each task
@note: See L{AddTask} for a note on task IDs.
"""
assert compat.all(isinstance(task, (tuple, list)) for task in tasks), \
"Each task must be a sequence"
assert (isinstance(priority, (int, long)) or
compat.all(isinstance(prio, (int, long)) for prio in priority)), \
"Priority must be numeric or be a list of numeric values"
assert task_id is None or isinstance(task_id, (tuple, list)), \
"Task IDs must be in a sequence"
if isinstance(priority, (int, long)):
priority = [priority] * len(tasks)
elif len(priority) != len(tasks):
raise errors.ProgrammerError("Number of priorities (%s) doesn't match"
" number of tasks (%s)" %
(len(priority), len(tasks)))
if task_id is None:
task_id = [None] * len(tasks)
elif len(task_id) != len(tasks):
raise errors.ProgrammerError("Number of task IDs (%s) doesn't match"
" number of tasks (%s)" %
(len(task_id), len(tasks)))
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
assert compat.all(isinstance(prio, (int, long)) for prio in priority)
assert len(tasks) == len(priority)
assert len(tasks) == len(task_id)
for (args, prio, tid) in zip(tasks, priority, task_id):
self._AddTaskUnlocked(args, prio, tid)
finally:
self._lock.release()
def ChangeTaskPriority(self, task_id, priority):
"""Changes a task's priority.
@param task_id: Task ID
@type priority: number
@param priority: New task priority
@raise NoSuchTask: When the task referred by C{task_id} can not be found
(it may never have existed, may have already been processed, or is
currently running)
"""
assert isinstance(priority, (int, long)), "Priority must be numeric"
self._lock.acquire()
try:
logging.debug("About to change priority of task %s to %s",
task_id, priority)
# Find old task
oldtask = self._taskdata.get(task_id, None)
if oldtask is None:
msg = "Task '%s' was not found" % task_id
logging.debug(msg)
raise NoSuchTask(msg)
# Prepare new task
newtask = [priority] + oldtask[1:]
# Mark old entry as abandoned (this doesn't change the sort order and
# therefore doesn't invalidate the heap property of L{self._tasks}).
# See also <http://docs.python.org/library/heapq.html#priority-queue-
# implementation-notes>.
oldtask[-1] = None
# Change reference to new task entry and forget the old one
assert task_id is not None
self._taskdata[task_id] = newtask
# Add a new task with the old number and arguments
heapq.heappush(self._tasks, newtask)
# Notify a waiting worker
self._pool_to_worker.notify()
finally:
self._lock.release()
def SetActive(self, active):
"""Enable/disable processing of tasks.
This is different from L{Quiesce} in the sense that this function just
changes an internal flag and doesn't wait for the queue to be empty. Tasks
already being processed continue normally, but no new tasks will be
started. New tasks can still be added.
@type active: bool
@param active: Whether tasks should be processed
"""
self._lock.acquire()
try:
self._active = active
if active:
# Tell all workers to continue processing
self._pool_to_worker.notifyAll()
finally:
self._lock.release()
def _WaitForTaskUnlocked(self, worker):
"""Waits for a task for a worker.
@type worker: L{BaseWorker}
@param worker: Worker thread
"""
while True:
if self._ShouldWorkerTerminateUnlocked(worker):
return _TERMINATE
# If there's a pending task, return it immediately
if self._active and self._tasks:
# Get task from queue and tell pool about it
try:
task = heapq.heappop(self._tasks)
finally:
self._worker_to_pool.notifyAll()
(_, _, task_id, args) = task
# If the priority was changed, "args" is None
if args is None:
# Try again
logging.debug("Found abandoned task (%r)", task)
continue
# Delete reference
if task_id is not None:
del self._taskdata[task_id]
return task
logging.debug("Waiting for tasks")
# wait() releases the lock and sleeps until notified
self._pool_to_worker.wait()
logging.debug("Notified while waiting")
def _ShouldWorkerTerminateUnlocked(self, worker):
"""Returns whether a worker should terminate.
"""
return (worker in self._termworkers)
def _HasRunningTasksUnlocked(self):
"""Checks whether there's a task running in a worker.
"""
for worker in self._workers + self._termworkers:
if worker._HasRunningTaskUnlocked(): # pylint: disable=W0212
return True
return False
def HasRunningTasks(self):
"""Checks whether there's at least one task running.
"""
self._lock.acquire()
try:
return self._HasRunningTasksUnlocked()
finally:
self._lock.release()
def Quiesce(self):
"""Waits until the task queue is empty.
"""
self._lock.acquire()
try:
self._quiescing = True
# Wait while there are tasks pending or running
while self._tasks or self._HasRunningTasksUnlocked():
self._worker_to_pool.wait()
finally:
self._quiescing = False
# Make sure AddTasks continues in case it was waiting
self._pool_to_pool.notifyAll()
self._lock.release()
def _NewWorkerIdUnlocked(self):
"""Return an identifier for a new worker.
"""
self._last_worker_id += 1
return "%s%d" % (self._name, self._last_worker_id)
def _ResizeUnlocked(self, num_workers):
"""Changes the number of workers.
"""
assert num_workers >= 0, "num_workers must be >= 0"
logging.debug("Resizing to %s workers", num_workers)
current_count = len(self._workers)
if current_count == num_workers:
# Nothing to do
pass
elif current_count > num_workers:
if num_workers == 0:
# Create copy of list to iterate over while lock isn't held.
termworkers = self._workers[:]
del self._workers[:]
else:
# TODO: Implement partial downsizing
raise NotImplementedError()
#termworkers = ...
self._termworkers += termworkers
# Notify workers that something has changed
self._pool_to_worker.notifyAll()
# Join all terminating workers
self._lock.release()
try:
for worker in termworkers:
logging.debug("Waiting for thread %s", worker.getName())
worker.join()
finally:
self._lock.acquire()
# Remove terminated threads. This could be done in a more efficient way
# (del self._termworkers[:]), but checking worker.isAlive() makes sure we
# don't leave zombie threads around.
for worker in termworkers:
assert worker in self._termworkers, ("Worker not in list of"
" terminating workers")
if not worker.isAlive():
self._termworkers.remove(worker)
assert not self._termworkers, "Zombie worker detected"
elif current_count < num_workers:
# Create (num_workers - current_count) new workers
for _ in range(num_workers - current_count):
worker = self._worker_class(self, self._NewWorkerIdUnlocked())
self._workers.append(worker)
worker.start()
def Resize(self, num_workers):
"""Changes the number of workers in the pool.
@param num_workers: the new number of workers
"""
self._lock.acquire()
try:
return self._ResizeUnlocked(num_workers)
finally:
self._lock.release()
def TerminateWorkers(self):
"""Terminate all worker threads.
Unstarted tasks will be ignored.
"""
logging.debug("Terminating all workers")
self._lock.acquire()
try:
self._ResizeUnlocked(0)
if self._tasks:
logging.debug("There are %s tasks left", len(self._tasks))
finally:
self._lock.release()
logging.debug("All workers terminated")
| apyrgio/snf-ganeti | lib/workerpool.py | Python | bsd-2-clause | 19,287 |
#
# Copyright (C) 2018 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, traceback, time
from genericworker import *
class SpecificWorker(GenericWorker):
def __init__(self, proxy_map):
super(SpecificWorker, self).__init__(proxy_map)
self.timer.timeout.connect(self.compute)
self.imu = DataImu()
self.Period = 100
self.timer.start(self.Period)
print("Start with period: ", self.Period)
def setParams(self, params):
try:
self.puerto = open(params["device"], "r")
print ("Device opened:",)
except IOError:
print("Error opening serial port:", params["device"], "check device is connected")
sys.exit(-1)
return True
@QtCore.Slot()
def compute(self):
print ('SpecificWorker.compute...')
try:
line = self.puerto.readline()
values = line.strip().split(' ')
self.imu.rot.Yaw = float(values[0])
self.imu.rot.Roll = float(values[1])
self.imu.rot.Pitch = float(values[2])
print ("Data(y,r,p):", self.imu.rot.Yaw, self.imu.rot.Roll, self.imu.rot.Pitch)
self.imupub_proxy.publish(self.imu)
except Ice.Exception as e:
traceback.print_exc()
print(e)
return True
# IMU implementation
# resetImu
#
def resetImu(self):
print("ERROR: not implemented yet")
#
# getAngularVel
#
def getAngularVel(self):
ret = Gyroscope()
return ret
#
# getOrientation
#
def getOrientation(self):
ret = Orientation()
return ret
#
# getDataImu
#
def getDataImu(self):
return DataImu()
#
# getMagneticFields
#
def getMagneticFields(self):
ret = Magnetic()
return ret
#
# getAcceleration
#
def getAcceleration(self):
ret = Acceleration()
| robocomp/robocomp-robolab | components/hardware/imu/pyimu/src/specificworker.py | Python | gpl-3.0 | 2,340 |
# Copyright (c) 2015 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class CousteauGenericError(Exception):
"""Custom Exception class for cousteau general erorrs."""
pass
class APIResponseError(Exception):
"""Custom Exception class for errors in ATLAS API responses."""
pass
| danielquinn/ripe-atlas-cousteau | ripe/atlas/cousteau/exceptions.py | Python | gpl-3.0 | 892 |
from __future__ import print_function
from BigStash.base import BigStashAPIBase
from BigStash.decorators import json_response, no_content_response
from BigStash.error import BigStashError, ResourceNotModified
from cached_property import cached_property
from BigStash import models
from BigStash.serialize import model_to_json
from BigStash.sign import HTTPSignatureAuth
from itertools import chain
import logging
log = logging.getLogger('bigstash.api')
class BigStashAPI(BigStashAPIBase):
USER_DETAIL = "user"
UPLOAD_LIST = "uploads"
UPLOAD_DETAIL = "uploads/{id}"
ARCHIVE_LIST = "archives"
ARCHIVE_DETAIL = "archives/{id}"
ARCHIVE_FILES = "archives/{id}/files/"
ARCHIVE_UPLOAD = "archives/{id}/upload"
TOKEN_DETAIL = "tokens/{id}"
NOTIFICATION_LIST = "notifications"
def __init__(self, key=None, secret=None, *args, **kwargs):
"""Initialize a :class:`BigStashAPI <BigStashAPI>` object.
:param key: API key
:param secret: API secret
:param settings: optional :class:`BigStashAPISettings` instance to use.
Usage::
>>> from BigStash import BigStashAPI
>>> api = BigStashAPI('AHBFEXAMPLE', '12039898FADEXAMPLE')
>>> archives = api.GetArchives()
[{ }]
"""
headers = kwargs.setdefault('headers', {})
self.key = key
self.secret = secret
if key is None or secret is None:
raise TypeError("Must provide API key and secret.")
# setup auth
headers['X-Deepfreeze-Api-Key'] = self.key
signature_headers = ['(request-target)', 'date', 'host']
auth = HTTPSignatureAuth(key_id=self.key, secret=self.secret,
algorithm='hmac-sha256',
headers=signature_headers)
super(BigStashAPI, self).__init__(auth=auth, *args, **kwargs)
@cached_property
@json_response
def _root_resource(self):
return self.get('')
@property
def _root(self):
return self._root_resource[0] # we only care about the body
def _top_resource_url(self, resource):
msg = "invalid resource '{}'".format(resource)
try:
return self._root[resource]
except BigStashError:
log.debug("error getting top resource url", exc_info=True)
raise
except Exception:
log.error("error getting top resource url", exc_info=True)
raise BigStashError(msg)
@json_response
def _get_page(self, url):
return self.get(url)
def _get_top_list(self, model):
name = model.__name__.lower() + 's'
body = {'next': self._top_resource_url(name)}
while body['next'] is not None:
body, headers = self._get_page(body['next'])
for r in body['results']:
yield model(**r)
def _list_next(self, olist):
while olist.next is not None:
body, headers = self._get_page(olist.next)
olist.next = body['next']
for r in body['results']:
obj = olist.klass(**r)
olist.objects.append(obj)
yield obj
def get_all_objects(self, olist):
return chain(olist, self._list_next(olist))
def _refresh_resource(self, obj, **kwargs):
lm = obj.get_meta('last-modified')
if lm is not None:
hdrs = kwargs.setdefault('headers', {})
hdrs['If-Modified-Since'] = lm
try:
r, h = json_response(self.get)(obj.url, **kwargs)
return obj.__class__(meta=h, **r)
except ResourceNotModified:
return obj
def _add_pagination_param(self, params={}, page=None):
"""
Add the proper query parameters for pagination
"""
if page:
params.update({'page': page})
return params
def GetNotifications(self):
"""
Get all notifications
"""
return self._get_top_list(models.Notification)
def GetUploads(self):
"""
Get all uploads. Returns an ObjectList.
"""
return self._get_top_list(models.Upload)
def GetArchives(self):
"""
Get a list of archives. Returns an ObjectList
"""
return self._get_top_list(models.Archive)
@json_response
def _get_user(self):
return self.get(self.USER_DETAIL)
def GetUser(self):
"""Get the user resource"""
body, headers = self._get_user()
return models.User(meta=headers, **body)
def GetArchive(self, archive_id):
""" Get details for an archive
:param archive_id: the archive id
"""
body, headers = json_response(self.get)(
self.ARCHIVE_DETAIL.format(id=archive_id))
return models.Archive(meta=headers, **body)
@json_response
def GetArchiveFiles(self, archive_id):
""" Get archive files
:param archive_id: the archive id
"""
return self.get(self.ARCHIVE_FILES.format(id=archive_id))
@json_response
def GetUpload(self, upload_id):
""" Get details for an upload
:param upload_id: the upload id
"""
return self.get(self.UPLOAD_DETAIL.format(id=upload_id))
def CreateArchive(self, title=None, size=None, **kwargs):
""" Create a new archive. Returns an Archive instance.
:param title: the archive title
:param size: the archive size in bytes
"""
body, headers = json_response(self.post)(
self._top_resource_url('archives'),
json={'title': title, 'size': size}, **kwargs)
return models.Archive(meta=headers, **body)
def RefreshUploadStatus(self, upload):
log.debug("Refreshing upload {}".format(upload))
upload = self._refresh_resource(upload)
log.debug("Refreshed upload {}".format(upload))
return upload
def CreateUpload(self, archive=None, manifest=None, **kwargs):
""" Create a new upload for an archive
:param archive: the archive model instance
:param manifest: the upload manifest
"""
if archive is not None:
url = archive.upload
else:
url = self._top_resource_url('uploads')
kwargs['data'] = model_to_json(manifest)
body, headers = json_response(self.post)(url, **kwargs)
return models.Upload(meta=headers, **body)
@json_response
def UpdateUploadFiles(self, upload, files=None):
pass
def UpdateUploadStatus(self, upload, status):
""" Update an upload's status
:param upload_id: the upload id
:param status: the new upload status
"""
patch = {"status": status}
log.debug("Updating {} with status='{}'".format(upload, status))
body, headers = json_response(self.patch)(
upload.url, json=patch)
upload.update(patch, headers)
@no_content_response
def CancelUpload(self, upload_id):
""" Cancel an upload
:param upload_id: the upload id
"""
return self.delete(self.UPLOAD_DETAIL.format(id=upload_id))
@no_content_response
def DestroyAPIKey(self, token_id):
""" Delete an API key
:param token_id: the token id
"""
return self.delete(self.TOKEN_DETAIL.format(id=token_id))
if __name__ == "__main__":
import sys
from BigStash.models import ObjectList
from BigStash.conf import BigStashAPISettings
from BigStash.auth import get_api_credentials
settings = BigStashAPISettings.load_settings()
k, s = get_api_credentials(settings)
api = BigStashAPI(key=k, secret=s, settings=settings)
logging.basicConfig()
if len(sys.argv) > 1:
method = sys.argv[1]
args = sys.argv[2:]
if not hasattr(api, method):
print("No such method {}".format(method))
try:
r = getattr(api, method)(*args)
except BigStashError as e:
print("There was an error: {}".format(e))
sys.exit(1)
if not isinstance(r, ObjectList):
r = [r]
for obj in r:
print(obj)
else:
import IPython
IPython.embed(user_ns={
'api': api
})
| longaccess/bigstash-python | BigStash/api.py | Python | apache-2.0 | 8,337 |
from decimal import Decimal
_SCALE = 1000000
def _clean_up(value):
v = str(value)
if '.' in v:
v = v.rstrip('0')
if v[-1] == '.':
v = v[:-1]
return v
def create_amount(native):
class Amount(object):
def __init__(self, value, currency=native, issuer=None):
self.value = _clean_up(value)
self.currency = currency
self.issuer = issuer
@staticmethod
def from_json(amount):
if type(amount) != dict:
return Amount(str(Decimal(amount) / _SCALE))
else:
assert 'value' in amount
assert 'currency' in amount
assert 'issuer' in amount
return Amount(**amount)
def to_json(self):
if self.currency == native:
return str(int(Decimal(self.value) * _SCALE))
else:
return {
'value': self.value,
'currency': self.currency,
'issuer': self.issuer
}
return Amount
| johansten/rtxp-py | rtxp/core/amount.py | Python | bsd-3-clause | 845 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import sickbeard
import generic
from sickbeard import logger, exceptions, helpers
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
import xml.etree.cElementTree as etree
class WDTVMetadata(generic.GenericMetadata):
"""
Metadata generation class for WDTV
The following file structure is used:
show_root/folder.jpg (poster)
show_root/Season ##/folder.jpg (season thumb)
show_root/Season ##/filename.ext (*)
show_root/Season ##/filename.metathumb (episode thumb)
show_root/Season ##/filename.xml (episode metadata)
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'WDTV'
self._ep_nfo_extension = 'xml'
self.poster_name = "folder.jpg"
# web-ui metadata template
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\<i>filename</i>.xml"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "folder.jpg"
self.eg_banner = "<i>not supported</i>"
self.eg_episode_thumbnails = "Season##\\<i>filename</i>.metathumb"
self.eg_season_posters = "Season##\\folder.jpg"
self.eg_season_banners = "<i>not supported</i>"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# no show metadata generated, we abort this lookup function
return (None, None, None)
def create_show_metadata(self, show_obj, force=False):
pass
def update_show_indexer_metadata(self, show_obj):
pass
def get_show_file_path(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def create_banner(self, show_obj):
pass
def create_season_banners(self, show_obj):
pass
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
def get_episode_thumb_path(self, ep_obj):
"""
Returns the path where the episode thumbnail should be stored. Defaults to
the same path as the episode file but with a .metathumb extension.
ep_obj: a TVEpisode instance for which to create the thumbnail
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_filename = helpers.replaceExtension(ep_obj.location, 'metathumb')
else:
return None
return tbn_filename
def get_season_poster_path(self, show_obj, season):
"""
Season thumbs for WDTV go in Show Dir/Season X/folder.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if
ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))]
season_dir_regex = '^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG)
return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg')
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for a WDTV style episode.xml
and returns the resulting data object.
ep_obj: a TVShow instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
indexer_lang = ep_obj.show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(ep_obj.show.indexer).api_params.copy()
lINDEXER_API_PARMS['actors'] = True
if indexer_lang and not indexer_lang == 'en':
lINDEXER_API_PARMS['language'] = indexer_lang
if ep_obj.show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
myShow = t[ep_obj.show.indexerid]
except sickbeard.indexer_shownotfound as e:
raise exceptions.ShowNotFoundException(e.message)
except sickbeard.indexer_error as e:
logger.log(u"Unable to connect to " + sickbeard.indexerApi(
ep_obj.show.indexer).name + " while creating meta files - skipping - " + ex(e), logger.ERROR)
return False
rootNode = etree.Element("details")
# write an WDTV XML containing info for all matching episodes
for curEpToWrite in eps_to_write:
try:
myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(
curEpToWrite.episode) + " on " + sickbeard.indexerApi(
ep_obj.show.indexer).name + "... has it been removed? Should I delete from db?")
return None
if getattr(myEp, 'firstaired', None) is None and ep_obj.season == 0:
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if getattr(myEp, 'episodename', None) is None or getattr(myEp, 'firstaired', None) is None:
return None
if len(eps_to_write) > 1:
episode = etree.SubElement(rootNode, "details")
else:
episode = rootNode
# TODO: get right EpisodeID
episodeID = etree.SubElement(episode, "id")
episodeID.text = str(curEpToWrite.indexerid)
title = etree.SubElement(episode, "title")
title.text = ep_obj.prettyName()
seriesName = etree.SubElement(episode, "series_name")
if getattr(myShow, 'seriesname', None) is not None:
seriesName.text = myShow["seriesname"]
episodeName = etree.SubElement(episode, "episode_name")
if curEpToWrite.name != None:
episodeName.text = curEpToWrite.name
seasonNumber = etree.SubElement(episode, "season_number")
seasonNumber.text = str(curEpToWrite.season)
episodeNum = etree.SubElement(episode, "episode_number")
episodeNum.text = str(curEpToWrite.episode)
firstAired = etree.SubElement(episode, "firstaired")
if curEpToWrite.airdate != datetime.date.fromordinal(1):
firstAired.text = str(curEpToWrite.airdate)
year = etree.SubElement(episode, "year")
if getattr(myShow, 'firstaired', None) is not None:
try:
year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year)
if year_text:
year.text = year_text
except:
pass
runtime = etree.SubElement(episode, "runtime")
if curEpToWrite.season != 0:
if getattr(myShow, 'runtime', None) is not None:
runtime.text = myShow["runtime"]
genre = etree.SubElement(episode, "genre")
if getattr(myShow, 'genre', None) is not None:
genre.text = " / ".join([x for x in myShow["genre"].split('|') if x])
director = etree.SubElement(episode, "director")
director_text = getattr(myEp, 'director', None)
if director_text is not None:
director.text = director_text
if getattr(myShow, '_actors', None) is not None:
for actor in myShow['_actors']:
cur_actor = etree.SubElement(episode, "actor")
cur_actor_name = etree.SubElement(cur_actor, "name")
cur_actor_name.text = actor['name']
cur_actor_role = etree.SubElement(cur_actor, "role")
cur_actor_role_text = actor['role']
if cur_actor_role_text != None:
cur_actor_role.text = cur_actor_role_text
overview = etree.SubElement(episode, "overview")
if curEpToWrite.description != None:
overview.text = curEpToWrite.description
# Make it purdy
helpers.indentXML(rootNode)
data = etree.ElementTree(rootNode)
return data
# present a standard "interface" from the module
metadata_class = WDTVMetadata
| adam111316/SickGear | sickbeard/metadata/wdtv.py | Python | gpl-3.0 | 10,786 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 07:19
from __future__ import unicode_literals
from django.db import migrations, models
def forwards_func(apps, schema_editor):
正規化sheet表 = apps.get_model("臺灣言語平臺", "正規化sheet表")
正規化sheet表.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('臺灣言語平臺', '0006_remove_使用者表_維護團隊佮分數'),
]
operations = [
migrations.RunPython(forwards_func),
migrations.RemoveField(
model_name='正規化sheet表',
name='client_email',
),
migrations.RemoveField(
model_name='正規化sheet表',
name='private_key',
),
migrations.AddField(
model_name='正規化sheet表',
name='key_file_name',
field=models.CharField(default='', max_length=200),
),
]
| sih4sing5hong5/tai5-uan5_gian5-gi2_phing5-tai5 | 臺灣言語平臺/migrations/0007_正規化sheet表直接使用key_file_name建立登入憑證.py | Python | mit | 965 |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from treemap.lib.object_caches import role_permissions
from django.contrib.gis.db.models import Field
from treemap.models import InstanceUser, Role, Plot, MapFeature
"""
Tools to assist in resolving permissions, specifically when the type of
the thing you are checking against can vary.
When it is simple enough to add a model.can_do_thing method to
models.py, you should do so. However, when the same permission logic
is being executed on varying types, like User, InstanceUser, Role, use
this module as a dispatching service.
It seems likely and beneficial that all permission checking move to
this module over time.
CAVEATS:
Perm checking is complicated by several factors. Helper methods live
on several models, and auxilary functions live in template_tags,
object_caches, and probably views in the multiple packages.
"""
WRITE = 'write'
READ = 'read'
ALLOWS_WRITES = 'allows_writes'
ALLOWS_READS = 'allows_reads'
PHOTO_PERM_FIELDS = frozenset({'image', 'thumbnail', 'id', 'map_feature'})
def _allows_perm(role_related_obj, model_name,
predicate, perm_attr,
field=None,
fields=None,
feature_name=None):
"""
The main permission testing function. This function should
be called from exported (non-underscore) helper functions.
role_related_obj can be a Role or InstanceUser (Add more types as needed)
model_name is a ProperCamelCase string name of the model to test.
predicate is the function used to set the minimum requirement for
present permissions to pass the current test. Should be any, all,
or a custom value.
perm_attr is the minimum permission value necessary to consider
the perm present in this context. Should correspond to an attr
on the FieldPermission class.
field/fields is the fields to use in conjunction with predicate and
perm_attr.
Together they form a truth statement like:
"all of {'image', 'thumbnail'} have ALLOWS_WRITES"
"any of {'id', 'mapfeature'} have ALLOWS_READS"
feature_name checks if this feature is enabled for this instance. While
not exactly connected to permissions, it's convenient to check this here
as well.
"""
role = _get_role_from_related_object(role_related_obj)
if role is None:
return False
if feature_name and not role.instance.feature_enabled(feature_name):
return False
perms = {perm for perm in
role_permissions(role, role.instance, model_name)}
# process args
if field and fields:
raise ValueError("Cannot provide non-None values "
"to both 'field' and 'fields'. Pick One.")
elif field and not fields:
fields = {field}
elif not fields:
fields = set()
# forcibly convert fields to a set of names (strings)
# if they were passed in as objects.
fields = {field.name if isinstance(field, Field) else field
for field in fields}
if fields:
perms = {perm for perm in perms if perm.field_name in fields}
perm_attrs = {getattr(perm, perm_attr) for perm in perms}
# TODO: find a better way to support 'all'
# this is a hack around a quirk, that all([]) == True.
# Since all is such a common case, it's still nice to
# support it out of the box.
if predicate == all and not perm_attrs:
return False
else:
return predicate(perm_attrs)
def _get_role_from_related_object(role_related_obj):
if isinstance(role_related_obj, InstanceUser):
if _invalid_instanceuser(role_related_obj):
# TODO: in udf_write_level below, we do
# this same check, but instead of returning
# None, we go forward by assigning role
# to be the default role for the given instance.
# here, we won't always have instance in scope,
# but we should consider factoring udf_write_level
# into this method and optionally taking an instance
# so that one can go forward with the default role.
return None
else:
return role_related_obj.role
elif isinstance(role_related_obj, type(None)):
# almost certainly this is being called with
# last_effective_instance_user without checking
# first if it is None. Since we haven't received
# the instance along with it, we can't resolve
# the default role, so perms must be blocked entirely.
# this is not so bad, because this block is mostly
# to prevent 500 errors.
return None
elif isinstance(role_related_obj, Role):
return role_related_obj
else:
raise NotImplementedError("Please provide a condition for '%s'"
% type(role_related_obj))
def _invalid_instanceuser(instanceuser):
return (instanceuser is None or
instanceuser == '' or
instanceuser.user_id is None)
def is_read_or_write(perm_string):
return perm_string in [READ, WRITE]
def is_deletable(instanceuser, obj):
if _invalid_instanceuser(instanceuser):
return False
else:
# TODO: factor this off user and roll it into
# this module
return obj.user_can_delete(instanceuser.user)
def udf_write_level(instanceuser, udf):
# required in case non-existent udf
# is passed to this tag
if udf is None:
return None
if _invalid_instanceuser(instanceuser):
role = udf.instance.default_role
else:
role = instanceuser.role
kwargs = {
'role_related_obj': role,
'model_name': udf.model_type,
'predicate': any,
'field': 'udf:' + udf.name
}
if _allows_perm(perm_attr=ALLOWS_WRITES, **kwargs):
level = WRITE
elif _allows_perm(perm_attr=ALLOWS_READS, **kwargs):
level = READ
else:
level = None
return level
def plot_is_creatable(role_related_obj):
return _allows_perm(role_related_obj, 'Plot',
perm_attr=ALLOWS_WRITES,
fields=Plot()._fields_required_for_create(),
feature_name='add_plot',
predicate=all)
def map_feature_is_creatable(role_related_obj, Model):
return _allows_perm(role_related_obj, Model.__name__,
perm_attr=ALLOWS_WRITES,
fields=Model()._fields_required_for_create(),
predicate=all)
def map_feature_is_writable(role_related_obj, model_obj, field=None):
return _allows_perm(role_related_obj,
model_obj.__class__.__name__,
perm_attr=ALLOWS_WRITES,
predicate=any, field=field)
def map_feature_is_deletable(role_related_obj, model_obj):
return _allows_perm(role_related_obj,
model_obj.__class__.__name__,
perm_attr=ALLOWS_WRITES,
predicate=all)
def plot_is_writable(role_related_obj, field=None):
return _allows_perm(role_related_obj, 'Plot',
perm_attr=ALLOWS_WRITES,
predicate=any, field=field)
def any_resource_is_creatable(role_related_obj):
role = _get_role_from_related_object(role_related_obj)
if role is None:
return False
map_features = {MapFeature.get_subclass(m)
for m in role.instance.map_feature_types}
resources = map_features - {Plot}
return any(map_feature_is_creatable(role, Model) for Model in resources)
def geom_is_writable(instanceuser, model_name):
return _allows_perm(instanceuser, model_name,
perm_attr=ALLOWS_WRITES,
predicate=any, field='geom')
def treephoto_is_writable(role_related_obj):
return _allows_perm(role_related_obj, 'TreePhoto',
fields=PHOTO_PERM_FIELDS | {'tree'},
feature_name='tree_image_upload',
perm_attr=ALLOWS_WRITES,
predicate=all)
def mapfeaturephoto_is_writable(role_related_obj):
return _allows_perm(role_related_obj, 'MapFeaturePhoto',
fields=PHOTO_PERM_FIELDS,
feature_name='tree_image_upload',
perm_attr=ALLOWS_WRITES,
predicate=all)
| kdeloach/otm-core | opentreemap/treemap/lib/perms.py | Python | gpl-3.0 | 8,533 |
import matplotlib.pyplot as plt
import MySQLdb
import datetime
db = MySQLdb.connect(host="<censored>", user="<censored>", passwd="<censored>", db="<censored>")
class Attempt:
def __init__(self, row):
self.id=int(row[0])
self.teamID=int(row[1])
self.taskID=int(row[2])
self.flag=row[3]
self.result=row[4]==1
self.passed=(row[5]-startTime).total_seconds()
def __repr__(self):
return str((self.id, self.teamID, self.taskID, self.flag, self.result, "%02d:%02d:%02d"%(self.passed/3600,self.passed%3600/60,self.passed%60)))
class Task:
def __init__(self, row):
self.id=row[0]
self.point=row[1]
self.nsolvers=0
self.cat=row[2]
class Team:
def __init__(self, row):
self.id=row[0]
self.name=unicode(row[1], errors='replace')
self.point=0
self.last=0
self.points = [0]
self.solved = []
teams = {}
cur = db.cursor()
cur.execute("SELECT `id`, `name` FROM `teams` ORDER BY `id`")
for row in cur.fetchall() :
teams[int(row[0])]=Team(row)
cur.close()
tasks = {}
cur = db.cursor()
cur.execute("SELECT `id`, `point`, `cat` FROM `tasks` ORDER BY `id`")
for row in cur.fetchall() :
tasks[int(row[0])]=Task(row)
cur.close()
startTime=0
cur = db.cursor()
cur.execute("SELECT `startTime` FROM `contest`")
startTime=cur.fetchone()[0]
startTime=datetime.datetime.fromtimestamp(startTime-7*60*60)
cur.close()
attempts=[]
cur = db.cursor()
cur.execute("SELECT * FROM `attempts` ORDER BY `id`")
for row in cur.fetchall():
attempts.append(Attempt(row))
cur.close()
t=range(0,31)
for attempt in attempts:
if attempt.passed<0:
continue
while attempt.passed/3600>teams[attempt.teamID].last+1:
teams[attempt.teamID].points.append(teams[attempt.teamID].point)
teams[attempt.teamID].last+=1
if attempt.result and attempt.taskID not in teams[attempt.teamID].solved:
teams[attempt.teamID].point+=tasks[attempt.taskID].point+2*max(0,3-tasks[attempt.taskID].nsolvers)
tasks[attempt.taskID].nsolvers+=1
teams[attempt.teamID].solved.append(attempt.taskID)
for key in teams:
while len(teams[key].points)<len(t):
teams[key].points.append(teams[key].point)
board=[]
for key in teams:
board.append((teams[key].points, teams[key].name))
board.sort()
for b in board:
print b
colors=[]
for i in range(4):
for j in range(4):
for k in range(4):
colors.append((i*256/4,j*256/4,k*256/4))
colors[-1] = "".join([hex(colors[-1][i])[2:] if colors[-1][i]>15 else '0'+hex(colors[-1][i])[2:] for i in range(3)])
i=0
for key in teams:
team=teams[key]
lines = plt.plot(t, team.points, marker='o', color='#'+colors[i], label=team.name)
i+=1
plt.legend(bbox_to_anchor=(1.01, 1.11), loc=2, borderaxespad=0.)
plt.show()
| kadircet/HackMETU-15 | stats.py | Python | gpl-2.0 | 2,658 |
"""
Convenience functions for the construction of spatial weights based on
contiguity and distance criteria.
"""
__author__ = "Sergio J. Rey <srey@asu.edu> "
import pysal
from Contiguity import buildContiguity
from Distance import knnW, Kernel, DistanceBand
from util import get_ids, get_points_array_from_shapefile, min_threshold_distance
import numpy as np
__all__ = ['queen_from_shapefile', 'rook_from_shapefile', 'knnW_from_array',
'knnW_from_shapefile', 'threshold_binaryW_from_array',
'threshold_binaryW_from_shapefile', 'threshold_continuousW_from_array',
'threshold_continuousW_from_shapefile', 'kernelW', 'kernelW_from_shapefile',
'adaptive_kernelW', 'adaptive_kernelW_from_shapefile',
'min_threshold_dist_from_shapefile', 'build_lattice_shapefile']
def queen_from_shapefile(shapefile, idVariable=None, sparse=False):
"""
Queen contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable : string
name of a column in the shapefile's DBF to use for ids.
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"),"POLYID")
>>> "%.3f"%wq.pct_nonzero
'9.829'
>>> wq=queen_from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wq.sparse.nnz *1. / wq.n**2
>>> "%.3f"%pct_sp
'0.098'
Notes
-----
Queen contiguity defines as neighbors any pair of polygons that share at
least one vertex in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
"""
shp = pysal.open(shapefile)
w = buildContiguity(shp, criterion='queen')
if idVariable:
ids = get_ids(shapefile, idVariable)
w.remap_ids(ids)
else:
ids = None
shp.close()
w.set_shapefile(shapefile, idVariable)
if sparse:
w = pysal.weights.WSP(w.sparse, id_order=ids)
return w
def rook_from_shapefile(shapefile, idVariable=None, sparse=False):
"""
Rook contiguity weights from a polygon shapefile.
Parameters
----------
shapefile : string
name of polygon shapefile including suffix.
idVariable: string
name of a column in the shapefile's DBF to use for ids
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), "POLYID")
>>> "%.3f"%wr.pct_nonzero
'8.330'
>>> wr=rook_from_shapefile(pysal.examples.get_path("columbus.shp"), sparse=True)
>>> pct_sp = wr.sparse.nnz *1. / wr.n**2
>>> "%.3f"%pct_sp
'0.083'
Notes
-----
Rook contiguity defines as neighbors any pair of polygons that share a
common edge in their polygon definitions.
See Also
--------
:class:`pysal.weights.W`
"""
shp = pysal.open(shapefile)
w = buildContiguity(shp, criterion='rook')
if idVariable:
ids = get_ids(shapefile, idVariable)
w.remap_ids(ids)
else:
ids = None
shp.close()
w.set_shapefile(shapefile, idVariable)
if sparse:
w = pysal.weights.WSP(w.sparse, id_order=ids)
return w
def spw_from_gal(galfile):
"""
Sparse scipy matrix for w from a gal file.
Parameters
----------
galfile : string
name of gal file including suffix
Returns
-------
spw : sparse_matrix
scipy sparse matrix in CSR format
ids : array
identifiers for rows/cols of spw
Examples
--------
>>> spw = pysal.weights.user.spw_from_gal(pysal.examples.get_path("sids2.gal"))
>>> spw.sparse.nnz
462
"""
return pysal.open(galfile, 'r').read(sparse=True)
# Distance based weights
def knnW_from_array(array, k=2, p=2, ids=None, radius=None):
"""
Nearest neighbor weights from a numpy array.
Parameters
----------
data : array
(n,m)
attribute data, n observations on m attributes
k : int
number of nearest neighbors
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
ids : list
identifiers to attach to each observation
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with binary weights.
Examples
--------
>>> import numpy as np
>>> x,y=np.indices((5,5))
>>> x.shape=(25,1)
>>> y.shape=(25,1)
>>> data=np.hstack([x,y])
>>> wnn2=knnW_from_array(data,k=2)
>>> wnn4=knnW_from_array(data,k=4)
>>> set([1, 5, 6, 2]) == set(wnn4.neighbors[0])
True
>>> set([0, 1, 10, 6]) == set(wnn4.neighbors[5])
True
>>> set([1, 5]) == set(wnn2.neighbors[0])
True
>>> set([0,6]) == set(wnn2.neighbors[5])
True
>>> "%.2f"%wnn2.pct_nonzero
'8.00'
>>> wnn4.pct_nonzero
16.0
>>> wnn4=knnW_from_array(data,k=4)
>>> set([ 1,5,6,2]) == set(wnn4.neighbors[0])
True
Notes
-----
Ties between neighbors of equal distance are arbitrarily broken.
See Also
--------
:class:`pysal.weights.W`
"""
if radius is not None:
kdtree = pysal.cg.KDTree(array, distance_metric='Arc', radius=radius)
else:
kdtree = pysal.cg.KDTree(array)
return knnW(kdtree, k=k, p=p, ids=ids)
def knnW_from_shapefile(shapefile, k=2, p=2, idVariable=None, radius=None):
"""
Nearest neighbor weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
k : int
number of nearest neighbors
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with binary weights
Examples
--------
Polygon shapefile
>>> wc=knnW_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> "%.4f"%wc.pct_nonzero
'4.0816'
>>> set([2,1]) == set(wc.neighbors[0])
True
>>> wc3=pysal.knnW_from_shapefile(pysal.examples.get_path("columbus.shp"),k=3)
>>> set(wc3.neighbors[0]) == set([2,1,3])
True
>>> set(wc3.neighbors[2]) == set([4,3,0])
True
1 offset rather than 0 offset
>>> wc3_1=knnW_from_shapefile(pysal.examples.get_path("columbus.shp"),k=3,idVariable="POLYID")
>>> set([4,3,2]) == set(wc3_1.neighbors[1])
True
>>> wc3_1.weights[2]
[1.0, 1.0, 1.0]
>>> set([4,1,8]) == set(wc3_1.neighbors[2])
True
Point shapefile
>>> w=knnW_from_shapefile(pysal.examples.get_path("juvenile.shp"))
>>> w.pct_nonzero
1.1904761904761905
>>> w1=knnW_from_shapefile(pysal.examples.get_path("juvenile.shp"),k=1)
>>> "%.3f"%w1.pct_nonzero
'0.595'
>>>
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
Ties between neighbors of equal distance are arbitrarily broken.
See Also
--------
:class:`pysal.weights.W`
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
kdtree = pysal.cg.KDTree(data, distance_metric='Arc', radius=radius)
else:
kdtree = pysal.cg.KDTree(data)
if idVariable:
ids = get_ids(shapefile, idVariable)
return knnW(kdtree, k=k, p=p, ids=ids)
return knnW(kdtree, k=k, p=p)
def threshold_binaryW_from_array(array, threshold, p=2, radius=None):
"""
Binary weights based on a distance threshold.
Parameters
----------
array : array
(n,m)
attribute data, n observations on m attributes
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance
Weights object with binary weights
Examples
--------
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> wcheck = pysal.W({0: [1, 3], 1: [0, 3, ], 2: [], 3: [1, 0], 4: [5], 5: [4]})
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> w=threshold_binaryW_from_array(points,threshold=11.2)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> pysal.weights.util.neighbor_equality(w, wcheck)
True
>>>
"""
if radius is not None:
array = pysal.cg.KDTree(array, distance_metric='Arc', radius=radius)
return DistanceBand(array, threshold=threshold, p=p)
def threshold_binaryW_from_shapefile(shapefile, threshold, p=2, idVariable=None, radius=None):
"""
Threshold distance based binary weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance
Weights object with binary weights
Examples
--------
>>> w = threshold_binaryW_from_shapefile(pysal.examples.get_path("columbus.shp"),0.62,idVariable="POLYID")
>>> w.weights[1]
[1, 1]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
data = pysal.cg.KDTree(data, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
w = DistanceBand(data, threshold=threshold, p=p)
w.remap_ids(ids)
return w
return threshold_binaryW_from_array(data, threshold, p=p)
def threshold_continuousW_from_array(array, threshold, p=2,
alpha=-1, radius=None):
"""
Continuous weights based on a distance threshold.
Parameters
----------
array : array
(n,m)
attribute data, n observations on m attributes
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
alpha : float
distance decay parameter for weight (default -1.0)
if alpha is positive the weights will not decline with
distance.
radius : If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with continuous weights.
Examples
--------
inverse distance weights
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> wid=threshold_continuousW_from_array(points,11.2)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> wid.weights[0]
[0.10000000000000001, 0.089442719099991588]
gravity weights
>>> wid2=threshold_continuousW_from_array(points,11.2,alpha=-2.0)
WARNING: there is one disconnected observation (no neighbors)
Island id: [2]
>>> wid2.weights[0]
[0.01, 0.0079999999999999984]
"""
if radius is not None:
array = pysal.cg.KDTree(array, distance_metric='Arc', radius=radius)
w = DistanceBand(
array, threshold=threshold, p=p, alpha=alpha, binary=False)
return w
def threshold_continuousW_from_shapefile(shapefile, threshold, p=2,
alpha=-1, idVariable=None, radius=None):
"""
Threshold distance based continuous weights from a shapefile.
Parameters
----------
shapefile : string
shapefile name with shp suffix
threshold : float
distance band
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
alpha : float
distance decay parameter for weight (default -1.0)
if alpha is positive the weights will not decline with
distance.
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
Returns
-------
w : W
instance; Weights object with continuous weights.
Examples
--------
>>> w = threshold_continuousW_from_shapefile(pysal.examples.get_path("columbus.shp"),0.62,idVariable="POLYID")
>>> w.weights[1]
[1.6702346893743334, 1.7250729841938093]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
data = get_points_array_from_shapefile(shapefile)
if radius is not None:
data = pysal.cg.KDTree(data, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
w = DistanceBand(data, threshold=threshold, p=p, alpha=alpha, binary=False)
w.remap_ids(ids)
else:
w = threshold_continuousW_from_array(data, threshold, p=p, alpha=alpha)
w.set_shapefile(shapefile,idVariable)
return w
# Kernel Weights
def kernelW(points, k=2, function='triangular', fixed=True,
radius=None, diagonal=False):
"""
Kernel based weights.
Parameters
----------
points : array
(n,k)
n observations on k characteristics used to measure
distances between the n objects
k : int
the number of nearest neighbors to use for determining
bandwidth. Bandwidth taken as :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation).
function : {'triangular','uniform','quadratic','epanechnikov','quartic','bisquare','gaussian'}
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
epanechnikov
.. math::
K(z) = (1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
bisquare
.. math::
K(z) = (1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
fixed : boolean
If true then :math:`h_i=h \\forall i`. If false then
bandwidth is adaptive across observations.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> kw=kernelW(points)
>>> kw.weights[0]
[1.0, 0.500000049999995, 0.4409830615267465]
>>> kw.neighbors[0]
[0, 1, 3]
>>> kw.bandwidth
array([[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002],
[ 20.000002]])
use different k
>>> kw=kernelW(points,k=3)
>>> kw.neighbors[0]
[0, 1, 3, 4]
>>> kw.bandwidth
array([[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201],
[ 22.36068201]])
Diagonals to 1.0
>>> kq = kernelW(points,function='gaussian')
>>> kq.weights
{0: [0.3989422804014327, 0.35206533556593145, 0.3412334260702758], 1: [0.35206533556593145, 0.3989422804014327, 0.2419707487162134, 0.3412334260702758, 0.31069657591175387], 2: [0.2419707487162134, 0.3989422804014327, 0.31069657591175387], 3: [0.3412334260702758, 0.3412334260702758, 0.3989422804014327, 0.3011374490937829, 0.26575287272131043], 4: [0.31069657591175387, 0.31069657591175387, 0.3011374490937829, 0.3989422804014327, 0.35206533556593145], 5: [0.26575287272131043, 0.35206533556593145, 0.3989422804014327]}
>>> kqd = kernelW(points, function='gaussian', diagonal=True)
>>> kqd.weights
{0: [1.0, 0.35206533556593145, 0.3412334260702758], 1: [0.35206533556593145, 1.0, 0.2419707487162134, 0.3412334260702758, 0.31069657591175387], 2: [0.2419707487162134, 1.0, 0.31069657591175387], 3: [0.3412334260702758, 0.3412334260702758, 1.0, 0.3011374490937829, 0.26575287272131043], 4: [0.31069657591175387, 0.31069657591175387, 0.3011374490937829, 1.0, 0.35206533556593145], 5: [0.26575287272131043, 0.35206533556593145, 1.0]}
"""
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
return Kernel(points, function=function, k=k, fixed=fixed,
diagonal=diagonal)
def kernelW_from_shapefile(shapefile, k=2, function='triangular',
idVariable=None, fixed=True, radius=None, diagonal=False):
"""
Kernel based weights.
Parameters
----------
shapefile : string
shapefile name with shp suffix
k : int
the number of nearest neighbors to use for determining
bandwidth. Bandwidth taken as :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation).
function : {'triangular','uniform','quadratic','epanechnikov', 'quartic','bisquare','gaussian'}
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
epanechnikov
.. math::
K(z) = (1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
bisquare
.. math::
K(z) = (1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
idVariable : string
name of a column in the shapefile's DBF to use for ids
fixed : binary
If true then :math:`h_i=h \\forall i`. If false then
bandwidth is adaptive across observations.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> kw = pysal.kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"),idVariable='POLYID', function = 'gaussian')
>>> kwd = pysal.kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"),idVariable='POLYID', function = 'gaussian', diagonal = True)
>>> set(kw.neighbors[1]) == set([4, 2, 3, 1])
True
>>> set(kwd.neighbors[1]) == set([4, 2, 3, 1])
True
>>>
>>> set(kw.weights[1]) == set( [0.2436835517263174, 0.29090631630909874, 0.29671172124745776, 0.3989422804014327])
True
>>> set(kwd.weights[1]) == set( [0.2436835517263174, 0.29090631630909874, 0.29671172124745776, 1.0])
True
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
return Kernel(points, function=function, k=k, ids=ids, fixed=fixed,
diagonal = diagonal)
return kernelW(points, k=k, function=function, fixed=fixed,
diagonal=diagonal)
def adaptive_kernelW(points, bandwidths=None, k=2, function='triangular',
radius=None, diagonal=False):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
points : array
(n,k)
n observations on k characteristics used to measure
distances between the n objects
bandwidths : float
or array-like (optional)
the bandwidth :math:`h_i` for the kernel.
if no bandwidth is specified k is used to determine the
adaptive bandwidth
k : int
the number of nearest neighbors to use for determining
bandwidth. For fixed bandwidth, :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation). For adaptive bandwidths, :math:`h_i=dknn_i`
function : {'triangular','uniform','quadratic','quartic','gaussian'}
kernel function defined as follows with
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
User specified bandwidths
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
>>> bw=[25.0,15.0,25.0,16.0,14.5,25.0]
>>> kwa=adaptive_kernelW(points,bandwidths=bw)
>>> kwa.weights[0]
[1.0, 0.6, 0.552786404500042, 0.10557280900008403]
>>> kwa.neighbors[0]
[0, 1, 3, 4]
>>> kwa.bandwidth
array([[ 25. ],
[ 15. ],
[ 25. ],
[ 16. ],
[ 14.5],
[ 25. ]])
Endogenous adaptive bandwidths
>>> kwea=adaptive_kernelW(points)
>>> kwea.weights[0]
[1.0, 0.10557289844279438, 9.99999900663795e-08]
>>> kwea.neighbors[0]
[0, 1, 3]
>>> kwea.bandwidth
array([[ 11.18034101],
[ 11.18034101],
[ 20.000002 ],
[ 11.18034101],
[ 14.14213704],
[ 18.02775818]])
Endogenous adaptive bandwidths with Gaussian kernel
>>> kweag=adaptive_kernelW(points,function='gaussian')
>>> kweag.weights[0]
[0.3989422804014327, 0.2674190291577696, 0.2419707487162134]
>>> kweag.bandwidth
array([[ 11.18034101],
[ 11.18034101],
[ 20.000002 ],
[ 11.18034101],
[ 14.14213704],
[ 18.02775818]])
with diagonal
>>> kweag = pysal.adaptive_kernelW(points, function='gaussian')
>>> kweagd = pysal.adaptive_kernelW(points, function='gaussian', diagonal=True)
>>> kweag.neighbors[0]
[0, 1, 3]
>>> kweagd.neighbors[0]
[0, 1, 3]
>>> kweag.weights[0]
[0.3989422804014327, 0.2674190291577696, 0.2419707487162134]
>>> kweagd.weights[0]
[1.0, 0.2674190291577696, 0.2419707487162134]
"""
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
return Kernel(points, bandwidth=bandwidths, fixed=False, k=k,
function=function, diagonal=diagonal)
def adaptive_kernelW_from_shapefile(shapefile, bandwidths=None, k=2, function='triangular',
idVariable=None, radius=None,
diagonal = False):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
shapefile : string
shapefile name with shp suffix
bandwidths : float
or array-like (optional)
the bandwidth :math:`h_i` for the kernel.
if no bandwidth is specified k is used to determine the
adaptive bandwidth
k : int
the number of nearest neighbors to use for determining
bandwidth. For fixed bandwidth, :math:`h_i=max(dknn) \\forall i`
where :math:`dknn` is a vector of k-nearest neighbor
distances (the distance to the kth nearest neighbor for each
observation). For adaptive bandwidths, :math:`h_i=dknn_i`
function : {'triangular','uniform','quadratic','quartic','gaussian'}
kernel function defined as follows with
.. math::
z_{i,j} = d_{i,j}/h_i
triangular
.. math::
K(z) = (1 - |z|) \ if |z| \le 1
uniform
.. math::
K(z) = |z| \ if |z| \le 1
quadratic
.. math::
K(z) = (3/4)(1-z^2) \ if |z| \le 1
quartic
.. math::
K(z) = (15/16)(1-z^2)^2 \ if |z| \le 1
gaussian
.. math::
K(z) = (2\pi)^{(-1/2)} exp(-z^2 / 2)
idVariable : string
name of a column in the shapefile's DBF to use for ids
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
diagonal : boolean
If true, set diagonal weights = 1.0, if false (
default) diagonal weights are set to value
according to kernel function.
Returns
-------
w : W
instance of spatial weights
Examples
--------
>>> kwa = pysal.adaptive_kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"), function='gaussian')
>>> kwad = pysal.adaptive_kernelW_from_shapefile(pysal.examples.get_path("columbus.shp"), function='gaussian', diagonal=True)
>>> kwa.neighbors[0]
[0, 2, 1]
>>> kwad.neighbors[0]
[0, 2, 1]
>>> kwa.weights[0]
[0.3989422804014327, 0.24966013701844503, 0.2419707487162134]
>>> kwad.weights[0]
[1.0, 0.24966013701844503, 0.2419707487162134]
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
points = pysal.cg.KDTree(points, distance_metric='Arc', radius=radius)
if idVariable:
ids = get_ids(shapefile, idVariable)
return Kernel(points, bandwidth=bandwidths, fixed=False, k=k,
function=function, ids=ids, diagonal=diagonal)
return adaptive_kernelW(points, bandwidths=bandwidths, k=k,
function=function, diagonal=diagonal)
def min_threshold_dist_from_shapefile(shapefile, radius=None, p=2):
"""
Kernel weights with adaptive bandwidths.
Parameters
----------
shapefile : string
shapefile name with shp suffix.
radius : float
If supplied arc_distances will be calculated
based on the given radius. p will be ignored.
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
Returns
-------
d : float
Maximum nearest neighbor distance between the n
observations.
Examples
--------
>>> md = min_threshold_dist_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> md
0.61886415807685413
>>> min_threshold_dist_from_shapefile(pysal.examples.get_path("stl_hom.shp"), pysal.cg.sphere.RADIUS_EARTH_MILES)
31.846942936393717
Notes
-----
Supports polygon or point shapefiles. For polygon shapefiles, distance is
based on polygon centroids. Distances are defined using coordinates in
shapefile which are assumed to be projected and not geographical
coordinates.
"""
points = get_points_array_from_shapefile(shapefile)
if radius is not None:
kdt = pysal.cg.kdtree.Arc_KDTree(points, radius=radius)
nn = kdt.query(kdt.data, k=2)
nnd = nn[0].max(axis=0)[1]
return nnd
return min_threshold_distance(points, p)
def build_lattice_shapefile(nrows, ncols, outFileName):
"""
Build a lattice shapefile with nrows rows and ncols cols.
Parameters
----------
nrows : int
Number of rows
ncols : int
Number of cols
outFileName : str
shapefile name with shp suffix
Returns
-------
None
"""
if not outFileName.endswith('.shp'):
raise ValueError("outFileName must end with .shp")
o = pysal.open(outFileName, 'w')
dbf_name = outFileName.split(".")[0] + ".dbf"
d = pysal.open(dbf_name, 'w')
d.header = [ 'ID' ]
d.field_spec = [ ('N', 8, 0) ]
c = 0
for i in xrange(nrows):
for j in xrange(ncols):
ll = i, j
ul = i, j + 1
ur = i + 1, j + 1
lr = i + 1, j
o.write(pysal.cg.Polygon([ll, ul, ur, lr, ll]))
d.write([c])
c += 1
d.close()
o.close()
def _test():
import doctest
# the following line could be used to define an alternative to the '<BLANKLINE>' flag
#doctest.BLANKLINE_MARKER = 'something better than <BLANKLINE>'
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
| jlaura/pysal | pysal/weights/user.py | Python | bsd-3-clause | 34,513 |
from lettuce import before, after, world
from selenium import webdriver
@before.all
def set_browser():
world.browser = webdriver.Firefox()
@after.all
def shutdown_browser(results):
world.browser.quit()
| claudiob/neverfails | neverfails/terrain.py | Python | mit | 212 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DOC
"""
from __future__ import unicode_literals, print_function, division
__author__ = "Serge Kilimoff-Goriatchkine"
__email__ = "serge.kilimoff@gmail.com"
| serge-kilimoff/Sublime4Space | lexicon/__init__.py | Python | mit | 209 |
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2019 Preferred Infrastructure, Inc.
# Copyright (c) 2019 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
# \changed to roi_align by Elaine Bao
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
from __future__ import division
import numbers
import numpy as np
import six
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
from chainercv.functions.ps_roi_average_align_2d \
import _GET_BILINEAR_INTERP_KERNEL
from chainercv.functions.ps_roi_average_align_2d \
import _get_bilinear_interp_params
from chainercv.functions.ps_roi_average_align_2d import _get_bounds
from chainercv.functions.ps_roi_average_align_2d import _pair
from chainercv.functions.ps_roi_average_pooling_2d import _outsize
class PSROIMaxAlign2D(function.Function):
def __init__(
self, outsize, spatial_scale,
group_size, sampling_ratio=None
):
out_c, out_h, out_w = _outsize(outsize)
if out_c is not None and \
not (isinstance(out_c, numbers.Integral) and out_c > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(out_c), out_c))
if not (isinstance(out_h, numbers.Integral) and out_h > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(out_h), out_h))
if not (isinstance(out_w, numbers.Integral) and out_w > 0):
raise TypeError(
'outsize[2] must be positive integer: {}, {}'
.format(type(out_w), out_w))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real)
and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
if not (isinstance(group_size, numbers.Integral)
and group_size > 0):
raise TypeError(
'group_size must be positive integer: {}, {}'
.format(type(group_size), group_size))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or s is None
for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.out_c, self.out_h, self.out_w = out_c, out_h, out_w
self.spatial_scale = spatial_scale
self.group_size = group_size
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == np.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0]
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = np.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = np.empty(top_data.shape, dtype=np.int32)
group_size = self.group_size
pooled_width, pooled_height \
= self.out_w, self.out_h
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
n, ctop, ph, pw = np.unravel_index(i, top_data.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
maxval = - np.inf
maxidx = -1
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
tmpval = 0.0
isvalid = False
bottom_index = iy * roi_bin_grid_w + ix
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
tmpval += w1 * v1
isvalid = True
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
tmpval += w2 * v2
isvalid = True
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
tmpval += w3 * v3
isvalid = True
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
tmpval += w4 * v4
isvalid = True
if isvalid and tmpval > maxval:
maxval = tmpval
maxidx = bottom_index
# }}
top_data[n, ctop, ph, pw] = maxval
self.argmax_data[n, ctop, ph, pw] = maxidx
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = cuda.cupy.empty(top_data.shape, np.int32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channel,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_data_offset =
(roi_batch_ind * channel + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
T maxval = - (T) (1.0 / 0.0);
int maxidx = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T tmpval = 0.;
bool isvalid = false;
int bottom_index = iy * roi_bin_grid_w + ix;
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
tmpval += w1 * v1;
isvalid = true;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
tmpval += w2 * v2;
isvalid = true;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
tmpval += w3 * v3;
isvalid = true;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
tmpval += w4 * v4;
isvalid = true;
}
// }}
if (isvalid && tmpval > maxval) {
maxval = tmpval;
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''',
'ps_roi_max_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, self.out_h, self.out_w,
self.group_size, sampling_ratio_h, sampling_ratio_w,
top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
height, width = self._bottom_data_shape[2:]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
n, ctop, ph, pw = np.unravel_index(i, top_diff.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(float(ph) * group_size / pooled_height))
gw = int(np.floor(float(pw) * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
top_diff_this_bin = top_diff[n, ctop, ph, pw]
maxidx = self.argmax_data[n, ctop, ph, pw]
if maxidx != -1:
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
iy = int(maxidx / roi_bin_grid_w)
ix = maxidx % roi_bin_grid_w
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
channel, height, width = self._bottom_data_shape[1:]
out_c, out_h, out_w = gy[0].shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data,
raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channel, int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset =
(roi_batch_ind * channel + c) * height * width;
int top_offset =
(n * pooled_dim + ctop) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
int maxidx = argmax_data[top_offset + ph * pooled_width + pw];
if (maxidx != -1) {
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
int iy = maxidx / roi_bin_grid_w;
int ix = maxidx % roi_bin_grid_w;
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {
T g4 = top_diff_this_bin * w4;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
// }}
}
''',
'ps_roi_max_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], self.argmax_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, out_h, out_w, self.group_size,
sampling_ratio_h, sampling_ratio_w, bottom_diff,
size=gy[0].size)
return bottom_diff, None, None
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices)
| chainer/chainercv | chainercv/functions/ps_roi_max_align_2d.py | Python | mit | 26,851 |
# Testing the line trace facility.
from test import support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception as exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError as exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.__code__.co_firstlineno,
tracer.events, func.events)
def set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
namespace = {}
exec("def f():\n" + "\n" * 256 + " pass", namespace)
self.run_and_compare(
namespace["f"],
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in range(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print(i) # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.__code__.co_firstlineno + 2):
raise RuntimeError("i am crashing")
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.__code__:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError as e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError as e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError) as e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError as e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError as e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError as e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError as e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError("Trace-function-less jump failed to fail")
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_main():
support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/test/test_trace.py | Python | mit | 22,358 |
"""
Forms to support third-party to first-party OAuth 2.0 access token exchange
"""
from django.contrib.auth.models import User
from django.forms import CharField
from oauth2_provider.constants import SCOPE_NAMES
import provider.constants
from provider.forms import OAuthForm, OAuthValidationError
from provider.oauth2.forms import ScopeChoiceField, ScopeMixin
from provider.oauth2.models import Client
from requests import HTTPError
from social.backends import oauth as social_oauth
from social.exceptions import AuthException
from third_party_auth import pipeline
class AccessTokenExchangeForm(ScopeMixin, OAuthForm):
"""Form for access token exchange endpoint"""
access_token = CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
client_id = CharField(required=False)
def __init__(self, request, *args, **kwargs):
super(AccessTokenExchangeForm, self).__init__(*args, **kwargs)
self.request = request
def _require_oauth_field(self, field_name):
"""
Raise an appropriate OAuthValidationError error if the field is missing
"""
field_val = self.cleaned_data.get(field_name)
if not field_val:
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is required".format(field_name),
}
)
return field_val
def clean_access_token(self):
"""
Validates and returns the "access_token" field.
"""
return self._require_oauth_field("access_token")
def clean_client_id(self):
"""
Validates and returns the "client_id" field.
"""
return self._require_oauth_field("client_id")
def clean(self):
if self._errors:
return {}
backend = self.request.backend
if not isinstance(backend, social_oauth.BaseOAuth2):
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is not a supported provider".format(backend.name),
}
)
self.request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
client_id = self.cleaned_data["client_id"]
try:
client = Client.objects.get(client_id=client_id)
except Client.DoesNotExist:
raise OAuthValidationError(
{
"error": "invalid_client",
"error_description": "{} is not a valid client_id".format(client_id),
}
)
if client.client_type != provider.constants.PUBLIC:
raise OAuthValidationError(
{
# invalid_client isn't really the right code, but this mirrors
# https://github.com/edx/django-oauth2-provider/blob/edx/provider/oauth2/forms.py#L331
"error": "invalid_client",
"error_description": "{} is not a public client".format(client_id),
}
)
self.cleaned_data["client"] = client
user = None
try:
user = backend.do_auth(self.cleaned_data.get("access_token"), allow_inactive_user=True)
except (HTTPError, AuthException):
pass
if user and isinstance(user, User):
self.cleaned_data["user"] = user
else:
# Ensure user does not re-enter the pipeline
self.request.social_strategy.clean_partial_pipeline()
raise OAuthValidationError(
{
"error": "invalid_grant",
"error_description": "access_token is not valid",
}
)
return self.cleaned_data
| Semi-global/edx-platform | common/djangoapps/auth_exchange/forms.py | Python | agpl-3.0 | 3,848 |
# -*- coding: utf-8 -*-
'''
Created on Apr 13, 2014
@copyright 2014, Milton C Mobley
Select strings based on caller components: prefixes, suffixes and substrings.
Regular expression matching is also supported.
Note that some patch and kernel files have utf-8 chars with code > 127. Some of
these codes are not legal utf-8 start byte codes. See functions.py for the file
read, write handling.
'''
import re
from inspect import isfunction
from patchtools.lib.ptobject import PTObject
from patchtools.lib.exceptions import PatchToolsError, PT_ParameterError
#++
class Matcher(PTObject):
""" Implement filter selection of strings
"""
#--
#++
def __init__(self, params):
""" Constructor
Args:
params (dict): parameters
match (list, optional): string match pattern(s)
prefix (list, optional): string start pattern(s)
suffix (list, optional): string end pattern(s)
substr (list, optional): substring pattern(s)
regexp (list, optional): regular expression pattern(s)
funcs (list, optional): callback function(s)
Raises:
PT_ParameterError on invalid parameters
Notes:
At least one option must be specified for the filter to have an effect.
Regular expression pattern strings should be coded using the r"..." string form.
"""
#--
self.name = 'Matcher'
if (not isinstance(params, dict)):
raise PT_ParameterError(self.name, 'params')
self.prefix_patterns = self._check_optional_param(params, 'prefix', list, None)
self.suffix_patterns = self._check_optional_param(params, 'suffix', list, None)
self.substr_patterns = self._check_optional_param(params, 'substr', list, None)
self.match_patterns = self._check_optional_param(params, 'match', list, None)
regexp = self._check_optional_param(params, 'regexp', list, None)
if (isinstance(regexp, list)):
try:
self.regex_patterns = [re.compile(s) for s in regexp]
except Exception as e:
raise PT_ParameterError(self.name, str(e))
else:
self.regex_patterns = None
if ('funcs' in params):
cbs = params['funcs']
for cb in cbs:
if (not isfunction(cb)):
raise PatchToolsError(self.name, 'callback must be a function')
self.callbacks = cbs
else:
self.callbacks = None
#++
def __call__(self, string):
""" Try to match string to stored filter
Args:
string (string): string to match
Returns:
text of the matching pattern, or None
"""
#--
if ('compatible = "ti,am3359-tscadc"' in string):
pass
if (self.match_patterns is not None):
for pattern in self.match_patterns:
if (string == pattern):
return pattern
if (self.prefix_patterns is not None):
for pattern in self.prefix_patterns:
if (string.startswith(pattern)):
return pattern
if (self.suffix_patterns is not None):
for pattern in self.suffix_patterns:
if (string.endswith(pattern)):
return pattern
if (self.substr_patterns is not None):
for pattern in self.substr_patterns:
if (pattern in string):
return pattern
if (self.regex_patterns is not None):
for pattern in self.regex_patterns:
ret = pattern.match(string)
if (ret is not None):
return str(pattern)
if (self.callbacks is not None):
for callback in self.callbacks:
if callback(string):
return str(callback)
return None | miltmobley/PatchTools | patchtools/lib/matcher.py | Python | apache-2.0 | 4,212 |
'''
Pixie: FreeBSD virtualization guest configuration client
Copyright (C) 2011 The Hotel Communication Network inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import threading, Queue as queue, time, subprocess, shlex, datetime
import urllib, tarfile, os, shutil, tempfile, pwd
import cherrypy
from cherrypy.process import wspbus, plugins
from pixie.lib.jails import EzJail
from pixie.lib.interfaces import NetInterfaces
class SetupTask(object):
def __init__(self, puck, queue):
self.queue = queue
self._puck = puck
self.vm = puck.getVM()
def run(self):
raise NotImplementedError("`run` must be defined.")
def log(self, msg):
now = datetime.datetime.now()
cherrypy.log("%s %s" % (self.__class__.__name__, msg))
tpl = "%s\t%s\t%s"
date_format = "%Y-%m-%d %H:%M:%S"
cls = self.__class__.__name__
self.queue.put(tpl % (now.strftime(date_format), cls, msg))
class RcReader(object):
def _has_line(self, lines, line_start):
for line in lines:
if line.startswith(line_start):
return True
return False
def _get_rc_content(self):
rc = None
try:
with open('/etc/rc.conf', 'r') as f:
rc = f.readlines()
except IOError:
pass
if not rc:
raise RuntimeError("File `/etc/rc.conf` is empty!")
return rc
class EZJailTask(SetupTask, RcReader):
'''
Setups ezjail in the virtual machine.
'''
def run(self):
try:
self.log("Enabling EZJail.")
self._enable_ezjail()
self.log("Installing EZJail")
EzJail().install(cherrypy.config.get('setup_plugin.ftp_mirror'))
except (IOError, OSError) as e:
self.log("Error while installing ezjail: %s" % e)
return False
return True
def _enable_ezjail(self):
rc = self._get_rc_content()
if self._has_line(rc, 'ezjail_enable'):
self.log("EZJail is already enabled.")
return
self.log("Adding to rc: `%s`" % 'ezjail_enable="YES"')
'''if we get here, it means ezjail_enable is not in rc.conf'''
with open('/etc/rc.conf', 'a') as f:
f.write("ezjail_enable=\"YES\"\n")
class SSHTask(SetupTask):
'''Create the base user `puck` and add the authorized ssh keys'''
def run(self):
self._setup_ssh()
return True
def _setup_ssh(self):
if not self.vm.keys:
self.log("No keys to install.");
return True
#@TODO Could be moved to config values instead of hardcoded.
user = 'puck'
try:
pwd.getpwnam(user)
except KeyError as e:
cmd = 'pw user add %s -m -G wheel' % user
self.log("Adding user. Executing `%s`" % cmd)
subprocess.Popen(shlex.split(str(cmd))).wait()
user_pwd = pwd.getpwnam(user)
path = '/home/%s/.ssh' % user
authorized_file = "%s/authorized_keys" % path
if not os.path.exists(path):
os.mkdir(path)
os.chown(path, user_pwd.pw_uid, user_pwd.pw_gid)
with open(authorized_file, 'a') as f:
for key in self.vm.keys:
self.log("Writing key `%s`" % key)
f.write('%s\n' % self.vm.keys[key]['key'])
os.chmod(authorized_file, 0400)
os.chown(authorized_file, user_pwd.pw_uid, user_pwd.pw_gid)
os.chmod(path, 0700)
os.chmod('/home/%s' % user, 0700)
class FirewallSetupTask(SetupTask, RcReader):
def run(self):
# TODO Move this to a congfiguration value from puck. Not high priority
pf_conf = '/etc/pf.rules.conf'
rc_conf = '/etc/rc.conf'
self.setup_rc(rc_conf, pf_conf)
self.setup_pf_conf(pf_conf)
self.launch_pf()
return True
def launch_pf(self):
# Stop it in case it
commands = ['/etc/rc.d/pf stop', '/etc/rc.d/pf start']
for command in commands:
self.log("Executing: `%s`" % command)
subprocess.Popen(shlex.split(str(command))).wait()
def setup_pf_conf(self, pf_conf):
rules = self.vm.firewall
if not rules:
self.log("No firewall to write.")
return False
self.log("Writing firewall rules at `%s`." % pf_conf)
with open(pf_conf, 'w') as f:
f.write(rules.replace('\r\n', '\n').replace('\r', '\n'))
f.flush()
def setup_rc(self, rc_conf, pf_conf):
#TODO Move this to a configuration value. Not high priority.
rc_items = {
'pf_enable' : 'YES',
'pf_rules' : pf_conf,
'pflog_enable' : 'YES',
'gateway_enable' : 'YES'
}
rc_present = []
rc = self._get_rc_content()
for line in rc:
for k in rc_items:
if line.startswith(k):
rc_present.append(k)
break
missing = set(rc_items.keys()) - set(rc_present)
tpl = 'Adding to rc: `%s="%s"`'
[self.log(tpl % (k, rc_items[k])) for k in missing]
template = '%s="%s"\n'
with open(rc_conf, 'a') as f:
[f.write(template % (k,rc_items[k])) for k in missing]
f.flush()
class InterfacesSetupTask(SetupTask, RcReader):
'''Configures network interfaces for the jails.'''
def run(self):
(netaddrs, missing) = self._get_missing_netaddrs()
self._add_missing_netaddrs(missing)
self._add_missing_rc(netaddrs)
return True
def _add_missing_rc(self, netaddrs):
rc_addresses = []
rc = self._get_rc_content()
alias_count = self._calculate_alias_count(rc_addresses, rc)
with open('/etc/rc.conf', 'a') as f:
for netaddr in netaddrs:
if self._add_rc_ip(rc_addresses, f, alias_count, netaddr):
alias_count += 1
def _add_missing_netaddrs(self, netaddrs):
for netaddr in netaddrs:
self.log("Registering new ip address `%s`" % netaddr['ip'])
self._add_addr(netaddr['ip'], netaddr['netmask'])
def _get_missing_netaddrs(self):
interfaces = NetInterfaces.getInterfaces()
missing = []
netaddrs = []
for jail in self.vm.jails:
netaddr = {'ip': jail.ip, 'netmask': jail.netmask}
netaddrs.append(netaddr)
if not interfaces.has_key(jail.ip):
missing.append(netaddr)
return (netaddrs, missing)
def _calculate_alias_count(self, addresses, rc):
alias_count = 0
for line in rc:
if line.startswith('ifconfig_%s_alias' % self.vm.interface):
alias_count += 1
addresses.append(line)
return alias_count
def _add_addr(self, ip, netmask):
cmd = "ifconfig %s alias %s netmask %s"
command = cmd % (self.vm.interface, ip, netmask)
self.log('executing: `%s`' % command)
subprocess.Popen(shlex.split(str(command))).wait()
def _add_rc_ip(self, rc_addresses, file, alias_count, netaddr):
for item in rc_addresses:
if item.find(netaddr['ip']) > 0:
self.log("rc already knows about ip `%s`" % netaddr['ip'])
return False
self.log("Registering new rc value for ip `%s`" % netaddr['ip'])
template = 'ifconfig_%s_alias%s="inet %s netmask %s"'
line = "%s\n" % template
values = (
self.vm.interface, alias_count, netaddr['ip'], netaddr['netmask']
)
file.write(line % values)
file.flush()
return True
class HypervisorSetupTask(SetupTask, RcReader):
'''
Setups a few hypervisor settings such as Shared Memory/IPC
'''
def run(self):
self._add_rc_settings()
self._add_sysctl_settings()
self._set_hostname()
return True
def _set_hostname(self):
self.log("Replacing hostname in /etc/rc.conf")
(fh, abspath) = tempfile.mkstemp()
tmp = open(abspath, 'w')
with open('/etc/rc.conf', 'r') as f:
for line in f:
if not line.startswith('hostname'):
tmp.write(line)
continue
tmp.write('hostname="%s"\n' % self.vm.name)
tmp.close()
os.close(fh)
os.remove('/etc/rc.conf')
shutil.move(abspath, '/etc/rc.conf')
os.chmod('/etc/rc.conf', 0644)
cmd = str('hostname %s' % self.vm.name)
self.log('Executing: `%s`' % cmd)
subprocess.Popen(shlex.split(cmd)).wait()
def _add_sysctl_settings(self):
sysvipc = cherrypy.config.get('hypervisor.jail_sysvipc_allow')
ipc_setting = 'security.jail.sysvipc_allowed'
self.log("Configuring sysctl")
with open('/etc/sysctl.conf', 'r') as f:
sysctl = f.readlines()
if sysvipc:
cmd = str("sysctl %s=1" % ipc_setting)
self.log('Executing: `%s`' % cmd)
subprocess.Popen(shlex.split(cmd)).wait()
if self._has_line(sysctl, ipc_setting):
self.log('SysV IPC already configured in sysctl.conf')
return
template = '%s=%s\n'
data = template % (ipc_setting, 1)
self.log('Adding to sysctl.conf: `%s`' % data)
with open('/etc/sysctl.conf', 'a') as f:
f.write(data)
def _add_rc_settings(self):
items = [
'jail_sysvipc_allow',
'syslogd_flags'
]
rc = self._get_rc_content()
# settings will contain items to be added to rc
settings = {}
for i in items:
value = cherrypy.config.get('hypervisor.%s' % i)
if not value:
continue
if self._has_line(rc, i):
continue
self.log('Adding to rc: `%s="%s"`' % (i, value))
settings[i] = value
# settings now contains items to be added
template = '%s="%s"\n'
with open('/etc/rc.conf', 'a') as f:
[f.write(template % (k, settings[k])) for k in settings]
f.flush()
class EZJailSetupTask(SetupTask):
'''
Setups ezjail in the virtual machine
'''
def run(self):
base_dir = cherrypy.config.get('setup_plugin.jail_dir')
dst_dir = '%s/flavours' % base_dir
if not os.path.isdir(dst_dir):
try:
self.log("Creating folder `%s`." % dst_dir)
os.makedirs(dst_dir)
except OSError as e:
self.log('Could not create folder `%s`' % dst_dir)
return False
# Holds the temporary file list
tmpfiles = self._retrieveFlavours()
if not tmpfiles:
self.log('No flavours downloaded.')
return False
# Verify and extract the flavour tarball
for file in tmpfiles:
# Verify
if not tarfile.is_tarfile(file['tmp_file']):
msg = "File `%s` is not a tarfile."
self.log(msg % file['tmp_file'])
return False
self.log('Extracting `%s`' % file['tmp_file'])
# Extraction
try:
with tarfile.open(file['tmp_file'], mode='r:*') as t:
'''Will raise KeyError if file does not exists.'''
if not t.getmember(file['type']).isdir():
msg ="Tar member `%s` is not a folder."
raise tarfile.ExtractError(msg % file['type'])
t.extractall("%s/" % dst_dir)
except (IOError, KeyError, tarfile.ExtractError) as e:
msg = "File `%s` could not be extracted. Reason: %s"
self.log(msg % (file['tmp_file'], e))
# Remove the temporary tarball
try:
os.unlink(file['tmp_file'])
except OSerror as e:
msg = "Error while removing file `%s`: %s"
self.log(msg % (file['tmp_file'], e))
return True
def _retrieveFlavours(self):
'''Retrieve the tarball for each flavours'''
tmpfiles = []
jail_dir = cherrypy.config.get('setup_plugin.jail_dir')
for jail in self.vm.jails:
(handle, tmpname) = tempfile.mkstemp(dir=jail_dir)
self.log("Fetching flavour `%s` at `%s`" % (jail.name, jail.url))
try:
(filename, headers) = urllib.urlretrieve(jail.url, tmpname)
except (urllib.ContentTooShortError, IOError) as e:
msg = "Error while retrieving jail `%s`: %s"
self.log(msg % (jail.name, e))
return False
tmpfiles.append({'type': jail.jail_type, 'tmp_file': filename})
self.log("Jail `%s` downloaded at `%s`" % (jail.name, filename))
return tmpfiles
class JailConfigTask(SetupTask):
'''
Handles jails configuration
'''
def run(self):
jail_dir = cherrypy.config.get('setup_plugin.jail_dir')
flavour_dir = "%s/flavours" % jail_dir
for jail in self.vm.jails:
self.log("Configuring jail `%s`." % jail.jail_type)
path = "%s/%s" % (flavour_dir, jail.jail_type)
authorized_key_file = "%s/installdata/authorized_keys" % path
resolv_file = "%s/etc/resolv.conf" % path
yum_file = "%s/installdata/yum_repo" % path
rc_file = "%s/etc/rc.conf" % path
host_file = "%s/etc/hosts" % path
# Create /installdata and /etc folder.
for p in ['%s/installdata', '%s/etc']:
if not os.path.exists(p % path):
os.mkdir(p % path)
# Verify the flavours exists.
exists = os.path.exists(path)
is_dir = os.path.isdir(path)
if not exists or not is_dir:
msg = "Flavour `%s` directory is missing in `%s."
self.log(msg % (jail.jail_type, flavour_dir))
return False
msg = "Retrieving yum repository for environment `%s`."
self.log(msg % self.vm.environment)
yum_repo = self._puck.getYumRepo(self.vm.environment)
self.log("Writing ssh keys.")
if not self._writeKeys(jail, authorized_key_file):
return False
self.log("Copying resolv.conf.")
if not self._writeResolvConf(jail, resolv_file):
return False
self.log("Updating jail hostname to `%s-%s`" % (self.vm.name, jail.jail_type))
if not self._update_hostname(jail, rc_file, host_file):
return False
self.log("Writing yum repository.")
if not self._writeYumRepoConf(yum_repo, yum_file):
return False
self.log("Creating jail.")
if not self._createJail(jail):
return False
return True
def _writeKeys(self, jail, authorized_key_file):
'''Write authorized keys'''
try:
with open(authorized_key_file, 'w') as f:
for key in self.vm.keys.values():
f.write("%s\n" % key['key'])
except IOError as e:
msg = "Error while writing authorized keys to jail `%s`: %s"
self.log(msg % (jail.jail_type, e))
return False
return True
def _update_hostname(self, jail, rc_file, host_file):
hostname = "%s-%s" % (self.vm.name, jail.jail_type)
self.log("Replacing hostname in %s" % rc_file)
(fh, abspath) = tempfile.mkstemp()
has_hostname = False
tmp = open(abspath, 'w')
with open(rc_file, 'r') as f:
for line in f:
if not line.startswith('hostname'):
tmp.write(line)
continue
tmp.write('hostname="%s"\n' % hostname)
has_hostname = True
if not has_hostname:
tmp.write('hostname="%s"\n' % hostname)
tmp.close()
os.close(fh)
os.remove(rc_file)
shutil.move(abspath, rc_file)
os.chmod(rc_file, 0644)
self.log("Adding new hostname in %s" % host_file)
(fh, abspath) = tempfile.mkstemp()
has_hostname = False
tmp = open(abspath, 'w')
with open(host_file, 'r') as f:
for line in f:
if not line.startswith('127.0.0.1'):
tmp.write(line)
continue
tmp.write('%s %s\n' % (line.replace('\n', ''), hostname))
tmp.close()
os.close(fh)
os.remove(host_file)
shutil.move(abspath, host_file)
os.chmod(host_file, 0644)
return True
def _writeResolvConf(self, jail, resolv_file):
'''Copy resolv.conf'''
try:
shutil.copyfile('/etc/resolv.conf', resolv_file)
except IOError as e:
self.log("Error while copying host resolv file: %s" % e)
return False
return True
def _writeYumRepoConf(self, yum_repo, yum_file):
'''Setup yum repo.d file ezjail will use.'''
try:
with open(yum_file, 'w') as f:
f.write(yum_repo['data'])
except (KeyError, IOError) as e:
self.log("Error while writing YUM repo data: %s" % e)
return False
return True
def _createJail(self, jail):
'''Create the jail'''
try:
jail.create()
except OSError as e:
msg = "Error while installing jail `%s`: %s"
self.log(msg % (jail.jail_type, e))
return False
return True
class JailStartupTask(SetupTask):
'''
Handles starting each jail.
'''
def run(self):
# Start each jail
for jail in self.vm.jails:
self.log("Starting jail `%s`" % jail.jail_type)
try:
status = jail.start()
except OSError as e:
self.log("Could not start jail `%s`: %s" % (jail.jail_type, e))
return False
self.log("Jail status: %s" % status)
self.log("Jail `%s` started" % jail.jail_type)
if not jail.status():
self.log("Jail `%s` is not running!" % jail.jail_type)
return False
return True
class SetupWorkerThread(threading.Thread):
"""
Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition.
"""
def __init__(self, bus, queue, outqueue, puck):
super(self.__class__, self).__init__()
self._stop = threading.Event()
self.running = threading.Event()
self.successful = False
self.completed = False
self._queue = queue
self._bus = bus
self._outqueue = outqueue
self._puck = puck
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def _step(self):
'''
Run a task
@raise RuntimeError when the task failed to complete
'''
# This will probably need to be wrapped in a try/catch.
task = self._queue.get(True, 10)(self._puck, self._outqueue)
loginfo = (self.__class__.__name__, task.__class__.__name__)
task.log('Starting')
if not task.run():
raise RuntimeError("%s error while running task `%s`" % loginfo)
task.log('Completed')
self._queue.task_done()
def run(self):
if self.completed:
self._bus.log("%s had already been run." % self.__class__.__name__)
return False
if self.running.isSet():
self._bus.log("%s is already running." % self.__class__.__name__)
return False
self.running.set()
self._bus.log("%s started." % self.__class__.__name__)
try:
while not self.stopped():
self._step()
except RuntimeError as err:
self._bus.log(str(err))
self._empty_queue()
self._puck.getVM().status = 'setup_failed'
self._puck.updateStatus()
self.succesful = False
self.completed = True
return False
except queue.Empty:
pass
self.completed = True
self.sucessful = True
self._puck.getVM().status = 'setup_complete'
self._puck.updateStatus()
self._outqueue.put("%s finished." % self.__class__.__name__)
def _empty_queue(self):
while not self._queue.empty():
try:
self._queue.get(False)
except queue.Empty:
return
class SetupPlugin(plugins.SimplePlugin):
'''
Handles tasks related to virtual machine setup.
The plugin launches a separate thread to asynchronously execute the tasks.
'''
def __init__(self, puck, bus, freq=30.0):
plugins.SimplePlugin.__init__(self, bus)
self.freq = freq
self._puck = puck
self._queue = queue.Queue()
self._workerQueue = queue.Queue()
self.worker = None
self.statuses = []
def start(self):
self.bus.log('Starting up setup tasks')
self.bus.subscribe('setup', self.switch)
start.priority = 70
def stop(self):
self.bus.log('Stopping down setup task.')
self._setup_stop();
def switch(self, *args, **kwargs):
'''
This is the task switchboard. Depending on the parameters received,
it will execute the appropriate action.
'''
if not 'action' in kwargs:
self.log("Parameter `action` is missing.")
return
# Default task
def default(**kwargs):
return
return {
'start': self._setup_start,
'stop': self._setup_stop,
'status': self._setup_status,
'clear': self._clear_status
}.get(kwargs['action'], default)()
def _clear_status(self, **kwargs):
'''Clear the status list'''
del(self.statuses[:])
def _setup_stop(self, **kwargs):
self.bus.log("Received stop request.")
if self.worker and self.worker.isAlive():
self.worker.stop()
def _start_worker(self):
self.worker = SetupWorkerThread(
bus=self.bus, queue = self._queue,
outqueue = self._workerQueue, puck = self._puck
)
self.worker.start()
def _setup_start(self, **kwargs):
self.bus.log("Received start request.")
# Start the worker if it is not running.
if not self.worker:
self._start_worker()
if not self.worker.is_alive() and not self.worker.successful:
self._start_worker()
# @TODO: Persistence of the list when failure occurs.
# or a state machine instead of a queue.
for task in cherrypy.config.get('setup_plugin.tasks'):
self._queue.put(task)
def _setup_status(self, **kwargs):
'''
Returns the current log queue and if the setup is running or not.
'''
status = self._readQueue(self._workerQueue)
while status:
self.statuses.append(status)
status = self._readQueue(self._workerQueue)
if not self.worker or not self.worker.isAlive():
return (self.statuses, False)
return (self.statuses, True)
def _readQueue(self, q, blocking = True, timeout = 0.2):
'''
Wraps code to read from a queue, including exception handling.
'''
try:
item = q.get(blocking, timeout)
except queue.Empty:
return None
return item
| masom/Puck | client/pixie/lib/setup_plugin.py | Python | lgpl-3.0 | 24,567 |
#!/usr/bin/python
##########################################################################
#
# MTraceCheck
# Copyright 2017 The Regents of the University of Michigan
# Doowon Lee and Valeria Bertacco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
import os
import sys
############################################################
## Parse the history log file
############################################################
""" Format
### Execution 0
T0: 0-FFFF001E,D,FFFF000D 1-10006 2-FFFF000B,FFFF0009,0 3-10006,38,52 4-3B 5-A,0,FFFF0004,52 6-FFFF0005,19 7-52,0 8-6,24,29,C,29 9-FFFF0019 10-F,10036 11-10036,29 12- 13-20,10010,10043 14-FFFF0005,10024,19,10010,10045,10043 15-1D,29,5F
T1: 0-FFFF0017,3,19,1E,10006,10051,29 1-1,A,10012,1E,10006,10050 2-10010,C 3-FFFF000D,10024,20,10006,FFFF000D 4-FFFF001A,10050 5-9,1001D,14,1002D 6-10017,38 7-FFFF0003,FFFF0007,24,FFFF000D,38,53,1002D 8-10005,10057 9-FFFF0003,9,2B,29,10036 10-FFFF001D 11-FFFF0008,10010,F,10010,0,2B,10036,10006 12-FFFF0002,FFFF000D,10033 13-FFFF001E,1001C,C 14-24,10045 15-14,20,3B
### Execution 1
T0: 0-FFFF001E,D,FFFF000D 1-10006 2-FFFF000B,10010,0 3-10006,38,52 4-3B 5-A,0,FFFF0004,52 6-FFFF0005,19 7-52,0 8-6,24,29,C,29 9-FFFF0019 10-F,10036 11-10036,29 12- 13-20,10010,1003C 14-FFFF0005,13,19,10010,10045,10043 15-1D,29,5F
T1: 0-FFFF0017,29,19,3A,10006,10051,29 1-1,A,10012,1E,10006,10050 2-10010,C 3-FFFF000D,2B,20,10006,46 4-FFFF001A,10050 5-9,1001D,14,1002D 6-20,38 7-FFFF0003,FFFF0007,24,FFFF000D,38,53,1002D 8-10005,10057 9-FFFF0003,9,2B,29,10036 10-FFFF001D 11-FFFF0008,10010,F,10010,0,2B,10036,10006 12-FFFF0002,FFFF000D,10033 13-19,1001C,C 14-24,10045 15-14,20,4C
...
"""
# hist[executionIndex][threadIndex][registerIndex] = history of loaded value to the register
def parseHistoryFile(filenames, verbosity):
hist = dict()
for histFileName in filenames:
histFP = open(histFileName, "r")
if (verbosity > 0):
print("Processing %s" % (histFileName))
executionIndex = None
threadIndex = None
for line in histFP:
if (line.startswith("### Execution ")): # 14 characters
executionIndex = int(line[14:])
assert(executionIndex not in hist)
hist[executionIndex] = dict()
else:
assert(executionIndex != None)
assert(line.startswith("T"))
tempIndex = line.find(":")
threadIndex = int(line[1:tempIndex])
assert(threadIndex not in hist[executionIndex])
hist[executionIndex][threadIndex] = dict()
line = line[tempIndex+1:]
registerTokens = line.split()
for registerToken in registerTokens:
tokens = registerToken.split("-")
registerIndex = int(tokens[0])
assert(registerIndex not in hist[executionIndex][threadIndex])
loadValueTokens = tokens[1].split(",")
if (len(loadValueTokens) == 1 and loadValueTokens[0] == ""): # no load value
continue
#print(loadValueTokens)
loadValues = []
for loadValueToken in loadValueTokens:
loadValues.append(int(loadValueToken, 16))
hist[executionIndex][threadIndex][registerIndex] = loadValues
histFP.close()
return hist
| leedoowon/MTraceCheck | src_main/parse_hist.py | Python | apache-2.0 | 4,024 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from djanban.utils.week import get_iso_week_of_year
# Daily spent time by member
class DailySpentTime(models.Model):
class Meta:
verbose_name = u"Spent time"
verbose_name_plural = u"Spent times"
index_together = (
("date", "week_of_year", "spent_time"),
("date", "week_of_year", "board", "spent_time"),
("board", "date", "week_of_year", "spent_time")
)
uuid = models.CharField(max_length=128, verbose_name=u"External id of the comment", unique=False, null=True)
board = models.ForeignKey("boards.Board", verbose_name=u"Board", related_name="daily_spent_times")
card = models.ForeignKey("boards.Card", verbose_name=u"Card", related_name="daily_spent_times", null=True)
comment = models.OneToOneField("boards.CardComment", verbose_name=u"Comment", related_name="daily_spent_time", null=True)
member = models.ForeignKey("members.Member", verbose_name=u"Member", related_name="daily_spent_times")
description = models.TextField(verbose_name="Description of the task")
date = models.DateField(verbose_name="Date of the time measurement")
day_of_year = models.CharField(verbose_name="Day number of the time measurement", max_length=16)
week_of_year = models.CharField(verbose_name="Week number of the time measurement", max_length=16)
weekday = models.CharField(verbose_name="Week day of the time measurement", max_length=16)
adjusted_spent_time = models.DecimalField(verbose_name=u"Adjusted spent time for this day",
decimal_places=4, max_digits=12, default=None, null=True)
spent_time = models.DecimalField(verbose_name=u"Spent time for this day", decimal_places=4, max_digits=12,
default=None, null=True)
rate_amount = models.DecimalField(verbose_name=u"Rate amount for this spent time", decimal_places=4, max_digits=12,
default=None, null=True)
estimated_time = models.DecimalField(verbose_name=u"Estimated time for this day", decimal_places=4, max_digits=12,
default=None, null=True)
diff_time = models.DecimalField(verbose_name=u"Difference between the estimated time and the spent time",
decimal_places=4, max_digits=12,
default=None, null=True)
@property
def day(self):
return self.date.day
@property
def month(self):
return self.date.month
@property
def year(self):
return self.date.year
@property
def iso_date(self):
return self.date.isoformat()
# Add a new amount of spent time to a member
@staticmethod
def add_daily_spent_time(daily_spent_time):
DailySpentTime.add(board=daily_spent_time.board,
card=daily_spent_time.card,
comment=daily_spent_time.comment,
description=daily_spent_time.description,
member=daily_spent_time.member,
date=daily_spent_time.date,
spent_time=daily_spent_time.spent_time,
estimated_time=daily_spent_time.estimated_time)
# Add a new amount of spent time to a member
@staticmethod
def add(board, member, date, card, comment, description, spent_time, estimated_time):
# In case a uuid is passed, load the Member object
if type(member) is str or type(member) is unicode:
try:
member = board.members.get(uuid=member)
except ObjectDoesNotExist:
return False
weekday = date.strftime("%w")
week_of_year = get_iso_week_of_year(date)
day_of_year = date.strftime("%j")
adjusted_spent_time = None
# Convert spent_time to Decimal if is a number
if spent_time is not None:
spent_time = Decimal(spent_time)
adjusted_spent_time = member.adjust_spent_time(spent_time, date)
# Convert estimated_time to Decimal if is a number
if estimated_time is not None:
estimated_time = Decimal(estimated_time)
elif spent_time is not None:
estimated_time = Decimal(spent_time)
# Compute difference between spent_time and estimated_time
diff_time = None
if spent_time is not None and estimated_time is not None:
diff_time = Decimal(estimated_time) - Decimal(spent_time)
# Creation of daily_spent_time
daily_spent_time = DailySpentTime(board=board, member=member, card=card, comment=comment, uuid=comment.uuid,
description=description,
adjusted_spent_time=adjusted_spent_time,
spent_time=spent_time,
estimated_time=estimated_time,
diff_time=diff_time,
date=date, day_of_year=day_of_year, week_of_year=week_of_year,
weekday=weekday)
# Rate amount computation
hourly_rate = board.get_date_hourly_rate(date)
if hourly_rate:
if daily_spent_time.rate_amount is None:
daily_spent_time.rate_amount = 0
daily_spent_time.rate_amount += hourly_rate.amount
# Saving the daily spent time
daily_spent_time.save()
return spent_time
# Add a new amount of spent time to a member
@staticmethod
def factory_from_comment(comment):
card = comment.card
board = card.board
spent_estimated_time = comment.get_spent_estimated_time_from_content()
date = spent_estimated_time["date"]
weekday = date.strftime("%w")
week_of_year = get_iso_week_of_year(date)
day_of_year = date.strftime("%j")
spent_time = spent_estimated_time["spent_time"]
estimated_time = spent_estimated_time["estimated_time"]
# Set adjusted spent time
adjusted_spent_time = None
if spent_time is not None:
spent_time = Decimal(spent_time)
adjusted_spent_time = comment.author.adjust_spent_time(spent_time, date)
# Convert estimated_time to Decimal if is a number
if estimated_time is not None:
estimated_time = Decimal(estimated_time)
elif spent_time is not None:
estimated_time = Decimal(spent_time)
diff_time = estimated_time - spent_time
rate_amount = None
hourly_rate = board.get_date_hourly_rate(date)
if spent_time is not None and hourly_rate is not None:
rate_amount = spent_time * hourly_rate.amount
daily_spent_time = DailySpentTime(
uuid=comment.uuid, board=board, card=card, comment=comment,
date=spent_estimated_time["date"], weekday=weekday, week_of_year=week_of_year, day_of_year=day_of_year,
spent_time=spent_time, adjusted_spent_time=adjusted_spent_time,
estimated_time=estimated_time, diff_time=diff_time,
description=spent_estimated_time["description"],
member=comment.author, rate_amount=rate_amount
)
return daily_spent_time
@staticmethod
def create_from_comment(comment):
daily_spent_time = DailySpentTime.factory_from_comment(comment)
if comment.id:
daily_spent_time.save()
else:
daily_spent_time.comment = None
daily_spent_time.save()
return daily_spent_time
# Set daily spent time from a card comment
def set_from_comment(self, comment):
spent_estimated_time = comment.spent_estimated_time
if spent_estimated_time:
date = spent_estimated_time["date"]
spent_time = spent_estimated_time["spent_time"]
estimated_time = spent_estimated_time["estimated_time"]
# Set the object attributes
self.date = date
self.spent_time = spent_time
if spent_time is not None:
self.adjusted_spent_time = comment.author.adjust_spent_time(spent_time, date)
self.estimated_time = estimated_time
# Update adjusted spent time for this DailySpentTime
def update_adjusted_spent_time(self):
if self.spent_time is None:
self.adjusted_spent_time = None
else:
self.adjusted_spent_time = self.member.adjust_spent_time(self.spent_time, self.date)
DailySpentTime.objects.filter(id=self.id).update(adjusted_spent_time=self.adjusted_spent_time)
| diegojromerolopez/djanban | src/djanban/apps/dev_times/models.py | Python | mit | 8,954 |
"""
Django settings for felicity_threads_base project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import datetime
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
utc = UTC()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%)&6@k)c5%9zicp4-5wyo(d!3x1mdbjo7kc(4x&k_7qa-qk$kp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = [
'.felicity.iiit.ac.in',
'.felicity.iiit.ac.in.',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'longerusername',
'base',
'djcelery',
'gordian_knot',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_cas.middleware.CASMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'base.middleware.RestrictAccessTillTime'
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'base.backends.PopulatedCASBackend',
)
ROOT_URLCONF = 'felicity_threads_base.urls'
WSGI_APPLICATION = 'felicity_threads_base.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '<db-name>',
'USER' : '<user>',
'PASSWORD' : '<password>',
'HOST' : '<host>',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/contest/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
MEDIA_URL = '/contest/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIRS = (TEMPLATE_PATH,)
# CAS settings
CAS_SERVER_URL = 'http://felicity.iiit.ac.in/cas/'
CAS_VERSION = '3'
CAS_LOGOUT_COMPLETLY = True
CAS_LOGOUT_URL = 'http://felicity.iiit.ac.in/logout'
CAS_DISPLAY_MESSAGES = False
LOGIN_URL = '/contest/accounts/login'
LOGOUT_URL = '/contest/accounts/logout'
LOGIN_REDIRECT_URL = '/'
#Add start and end datetime in the format year, month, day, hour, minute, second, milisecond, utc
CONTEST_START_DATETIME = datetime.datetime()
CONTEST_END_DATETIME = datetime.datetime()
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/tmp/django-debug.log',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
},
}
# Celery Specific Variables.
BROKER_URL = 'redis://localhost:6379/0'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND='djcelery.backends.cache:CacheBackend'
~
| ParthKolekar/felicity-threads-base | felicity_threads_base/felicity_threads_base/settings_example.py | Python | lgpl-3.0 | 4,361 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import imp
import os
import sys
project_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(project_dir + '/recipes')
if __name__ == "__main__":
suite = unittest.TestSuite()
loader = unittest.TestLoader()
tests_dir = project_dir + '/tests'
for file in os.listdir(tests_dir):
path = tests_dir + '/' + file
if os.path.isfile(path) and path.endswith('.py'):
mod = imp.load_source(os.path.splitext(path)[0], path)
suite.addTest(loader.loadTestsFromModule(mod))
unittest.TextTestRunner(verbosity=1).run(suite)
| narusemotoki/python-recipes | run_tests.py | Python | mit | 644 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# module:
# author: Panagiotis Mavrogiorgos <pmav99,gmail>
"""
Package description
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Version
__major__ = 0 # for major interface/format changes
__minor__ = 1 # for minor interface/format changes
__release__ = 0 # for tweaks, bug-fixes, or development
# package information
__package_name__ = "foobar"
__package_description__ = "foo"
__version__ = "%d.%d.%d" % (__major__, __minor__, __release__)
__license__ = "BSD"
__url__ = "http://github.org/pmav99/%s" % __package_name__
__download_url__ = "http://github.org/pmav99/%s/archive/master.zip" % __package_name__
__author__ = "Panagiotis Mavrogiorgos"
__author_email__ = "gmail pmav99"
# package imports
__all__ = [
]
| pmav99/dotfiles | templates/pyinit.py | Python | mit | 836 |
## pyayaBot_useCaseTest.py
## This script deploys an instance of pyayaBot using hard-coded values and is a system-wide test.
import pyayaBot_main, sys
if (len(sys.argv) != 2):
print " Syntax error. Usage: pyayaBot_useCaseTest.py channel_name"
sys.exit()
## Initialize test variables.
test_connection_config = "../config/connection_config.json"
test_channel_config = "../config/channel_config_" + sys.argv[1].lower() + ".json"
## This int will be turned a list of binary values to control lagging.
## INFO Logging - Add 8
## WARNING Logging - Add 4
## ERROR Logging - Add 2
## DEBUG Logging - Add 1
## i.e) 15 -> All logging turned on. (Default Behavior)`
## 6 -> WARNING & ERROR Logging only.
test_bitmask = 14
## Turn the int given into a list of bools.
test_bitlist = list(bin(test_bitmask)[2:].zfill(4))
test_pyaya = pyayaBot_main.Bot(test_connection_config, test_channel_config, test_bitlist)
| pyayaBotDevs/pyayaBot | python/pyayaBot_useCaseTest.py | Python | apache-2.0 | 952 |
import unittest
def f(x, y):
return x/y
class MyFTest(unittest.TestCase):
def test_div(self):
self.assertEqual(f(1, 2), 0)
def test_div_zero(self):
self.assertRaises(Exception, f, 1, 0)
if __name__ == "__main__":
unittest.main()
| jigarkb/Programming | UnitTest/sample_python_unittest.py | Python | mit | 268 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import subprocess
class Matlab(Package):
"""MATLAB (MATrix LABoratory) is a multi-paradigm numerical computing
environment and fourth-generation programming language. A proprietary
programming language developed by MathWorks, MATLAB allows matrix
manipulations, plotting of functions and data, implementation of
algorithms, creation of user interfaces, and interfacing with programs
written in other languages, including C, C++, C#, Java, Fortran and Python.
Note: MATLAB is licensed software. You will need to create an account on
the MathWorks homepage and download MATLAB yourself. Spack will search your
current directory for the download file. Alternatively, add this file to a
mirror so that Spack can find it. For instructions on how to set up a
mirror, see http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.mathworks.com/products/matlab.html"
version('R2016b', 'b0e0b688894282139fa787b5a86a5cf7')
variant(
'mode',
default='interactive',
values=('interactive', 'silent', 'automated'),
description='Installation mode (interactive, silent, or automated)'
)
variant(
'key',
default='',
values=lambda x: True, # Anything goes as a key
description='The file installation key to use'
)
# Licensing
license_required = True
license_comment = '#'
license_files = ['licenses/license.dat']
license_vars = ['LM_LICENSE_FILE']
license_url = 'https://www.mathworks.com/help/install/index.html'
extendable = True
def url_for_version(self, version):
return "file://{0}/matlab_{1}_glnxa64.zip".format(os.getcwd(), version)
def configure(self, spec, prefix):
config = {
'destinationFolder': prefix,
'mode': spec.variants['mode'].value,
'fileInstallationKey': spec.variants['key'].value,
'licensePath': self.global_license_file
}
# Store values requested by the installer in a file
with open('spack_installer_input.txt', 'w') as inputFile:
for key in config:
inputFile.write('{0}={1}\n'.format(key, config[key]))
def install(self, spec, prefix):
self.configure(spec, prefix)
# Run silent installation script
# Full path required
inputFile = join_path(self.stage.source_path,
'spack_installer_input.txt')
subprocess.call(['./install', '-inputFile', inputFile])
| skosukhin/spack | var/spack/repos/builtin/packages/matlab/package.py | Python | lgpl-2.1 | 3,852 |
# _UID_dict.py
"""
Dictionary of UID: (name, type, name_info, is_retired)
"""
# Auto-generated by make_UID_dict.py"""
UID_dictionary = {
'1.2.840.10008.1.1': ('Verification SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.1.2': ('Implicit VR Little Endian', 'Transfer Syntax', 'Default Transfer Syntax for DICOM', ''),
'1.2.840.10008.1.2.1': ('Explicit VR Little Endian', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.1.99': ('Deflated Explicit VR Little Endian', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.2': ('Explicit VR Big Endian', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.50': ('JPEG Baseline (Process 1)', 'Transfer Syntax',
'Default Transfer Syntax for Lossy JPEG 8 Bit Image Compression', ''),
'1.2.840.10008.1.2.4.51': ('JPEG Extended (Process 2 and 4)', 'Transfer Syntax',
'Default Transfer Syntax for Lossy JPEG 12 Bit Image Compression (Process 4 only)', ''),
'1.2.840.10008.1.2.4.52': ('JPEG Extended (Process 3 and 5)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.53': ('JPEG Spectral Selection, Non-Hierarchical (Process 6 and 8)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.54': ('JPEG Spectral Selection, Non-Hierarchical (Process 7 and 9)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.55': ('JPEG Full Progression, Non-Hierarchical (Process 10 and 12)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.56': ('JPEG Full Progression, Non-Hierarchical (Process 11 and 13)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.57': ('JPEG Lossless, Non-Hierarchical (Process 14)', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.58': ('JPEG Lossless, Non-Hierarchical (Process 15)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.59': ('JPEG Extended, Hierarchical (Process 16 and 18)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.60': ('JPEG Extended, Hierarchical (Process 17 and 19)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.61': ('JPEG Spectral Selection, Hierarchical (Process 20 and 22)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.62': ('JPEG Spectral Selection, Hierarchical (Process 21 and 23)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.63': ('JPEG Full Progression, Hierarchical (Process 24 and 26)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.64': ('JPEG Full Progression, Hierarchical (Process 25 and 27)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.65': ('JPEG Lossless, Hierarchical (Process 28)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.66': ('JPEG Lossless, Hierarchical (Process 29)', 'Transfer Syntax', '', 'Retired'),
'1.2.840.10008.1.2.4.70': ('JPEG Lossless, Non-Hierarchical, First-Order Prediction (Process 14 [Selection Value 1])',
'Transfer Syntax', 'Default Transfer Syntax for Lossless JPEG Image Compression', ''),
'1.2.840.10008.1.2.4.80': ('JPEG-LS Lossless Image Compression', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.81': ('JPEG-LS Lossy (Near-Lossless) Image Compression', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.90': ('JPEG 2000 Image Compression (Lossless Only)', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.91': ('JPEG 2000 Image Compression', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.92': ('JPEG 2000 Part 2 Multi-component Image Compression (Lossless Only)', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.93': ('JPEG 2000 Part 2 Multi-component Image Compression', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.94': ('JPIP Referenced', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.95': ('JPIP Referenced Deflate', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.4.100': ('MPEG2 Main Profile @ Main Level', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.5': ('RLE Lossless', 'Transfer Syntax', '', ''),
'1.2.840.10008.1.2.6.1 ': ('RFC 2557 MIME encapsulation ', 'Transfer Syntax ', '', ''),
'1.2.840.10008.1.2.6.2': ('XML Encoding', 'Transfer Syntax ', '', ''),
'1.2.840.10008.1.3.10': ('Media Storage Directory Storage', 'SOP Class', '', ''),
'1.2.840.10008.1.4.1.1': ('Talairach Brain Atlas Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.2': ('SPM2 T1 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.3': ('SPM2 T2 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.4': ('SPM2 PD Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.5': ('SPM2 EPI Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.6': ('SPM2 FIL T1 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.7': ('SPM2 PET Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.8': ('SPM2 TRANSM Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.9': ('SPM2 SPECT Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.10': ('SPM2 GRAY Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.11': ('SPM2 WHITE Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.12': ('SPM2 CSF Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.13': ('SPM2 BRAINMASK Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.14': ('SPM2 AVG305T1 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.15': ('SPM2 AVG152T1 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.16': ('SPM2 AVG152T2 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.17': ('SPM2 AVG152PD Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.1.18': ('SPM2 SINGLESUBJT1 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.2.1': ('ICBM 452 T1 Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.4.2.2': ('ICBM Single Subject MRI Frame of Reference', 'Well-known frame of reference', '', ''),
'1.2.840.10008.1.9': ('Basic Study Content Notification SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.1.20.1': ('Storage Commitment Push Model SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.1.20.1.1': ('Storage Commitment Push Model SOP Instance', 'Well-known SOP Instance', '', ''),
'1.2.840.10008.1.20.2': ('Storage Commitment Pull Model SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.1.20.2.1': ('Storage Commitment Pull Model SOP Instance', 'Well-known SOP Instance', '', 'Retired'),
'1.2.840.10008.1.40': ('Procedural Event Logging SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.1.40.1': ('Procedural Event Logging SOP Instance', 'Well-known SOP Instance', '', ''),
'1.2.840.10008.1.42': ('Substance Administration Logging SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.1.42.1': ('Substance Administration Logging SOP Instance', 'Well-known SOP Instance', '', ''),
'1.2.840.10008.2.6.1': ('DICOM UID Registry', 'DICOM UIDs as a Coding Scheme', '', ''),
'1.2.840.10008.2.16.4': ('DICOM Controlled Terminology', 'Coding Scheme', '', ''),
'1.2.840.10008.3.1.1.1': ('DICOM Application Context Name', 'Application Context Name', '', ''),
'1.2.840.10008.3.1.2.1.1': ('Detached Patient Management SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.1.4': ('Detached Patient Management Meta SOP Class', 'Meta SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.2.1': ('Detached Visit Management SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.3.1': ('Detached Study Management SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.3.2': ('Study Component Management SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.3.3': ('Modality Performed Procedure Step SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.3.1.2.3.4': ('Modality Performed Procedure Step Retrieve SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.3.1.2.3.5': ('Modality Performed Procedure Step Notification SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.3.1.2.5.1': ('Detached Results Management SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.5.4': ('Detached Results Management Meta SOP Class', 'Meta SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.5.5': ('Detached Study Management Meta SOP Class', 'Meta SOP Class', '', 'Retired'),
'1.2.840.10008.3.1.2.6.1': ('Detached Interpretation Management SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.4.2': ('Storage Service Class', 'Service Class', '', ''),
'1.2.840.10008.5.1.1.1': ('Basic Film Session SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.2': ('Basic Film Box SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.4': ('Basic Grayscale Image Box SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.4.1': ('Basic Color Image Box SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.4.2': ('Referenced Image Box SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.9': ('Basic Grayscale Print Management Meta SOP Class', 'Meta SOP Class', '', ''),
'1.2.840.10008.5.1.1.9.1': ('Referenced Grayscale Print Management Meta SOP Class', 'Meta SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.14': ('Print Job SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.15': ('Basic Annotation Box SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.16': ('Printer SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.16.376': ('Printer Configuration Retrieval SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.17': ('Printer SOP Instance', 'Well-known Printer SOP Instance', '', ''),
'1.2.840.10008.5.1.1.17.376': ('Printer Configuration Retrieval SOP Instance', 'Well-known Printer SOP Instance', '', ''),
'1.2.840.10008.5.1.1.18': ('Basic Color Print Management Meta SOP Class', 'Meta SOP Class', '', ''),
'1.2.840.10008.5.1.1.18.1': ('Referenced Color Print Management Meta SOP Class', 'Meta SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.22': ('VOI LUT Box SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.23': ('Presentation LUT SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.1.24': ('Image Overlay Box SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.24.1': ('Basic Print Image Overlay Box SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.25': ('Print Queue SOP Instance', 'Well-known Print Queue SOP Instance', '', 'Retired'),
'1.2.840.10008.5.1.1.26': ('Print Queue Management SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.27': ('Stored Print Storage SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.29': ('Hardcopy Grayscale Image Storage SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.30': ('Hardcopy Color Image Storage SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.31': ('Pull Print Request SOP Class', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.32': ('Pull Stored Print Management Meta SOP Class', 'Meta SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.1.33': ('Media Creation Management SOP Class UID', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.1': ('Computed Radiography Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.1.1': ('Digital X-Ray Image Storage - For Presentation', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.1.1.1': ('Digital X-Ray Image Storage - For Processing', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.1.2': ('Digital Mammography X-Ray Image Storage - For Presentation', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.1.2.1': ('Digital Mammography X-Ray Image Storage - For Processing', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.1.3': ('Digital Intra-oral X-Ray Image Storage - For Presentation', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.1.3.1': ('Digital Intra-oral X-Ray Image Storage - For Processing', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.2': ('CT Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.2.1': ('Enhanced CT Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.3': ('Ultrasound Multi-frame Image Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.3.1': ('Ultrasound Multi-frame Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.4': ('MR Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.4.1': ('Enhanced MR Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.4.2': ('MR Spectroscopy Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.5': ('Nuclear Medicine Image Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.6': ('Ultrasound Image Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.6.1': ('Ultrasound Image Storage', 'SOP Class', '', ''),
# From Supp 43
'1.2.840.10008.5.1.4.1.1.6.2': ('Enhanced US Volume Storage', 'SOP Class', '', ''),
# End From Supp 43
'1.2.840.10008.5.1.4.1.1.7': ('Secondary Capture Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.7.1': ('Multi-frame Single Bit Secondary Capture Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.7.2': ('Multi-frame Grayscale Byte Secondary Capture Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.7.3': ('Multi-frame Grayscale Word Secondary Capture Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.7.4': ('Multi-frame True Color Secondary Capture Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.8': ('Standalone Overlay Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.9': ('Standalone Curve Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.9.1': ('Waveform Storage - Trial', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.9.1.1': ('12-lead ECG Waveform Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.9.1.2': ('General ECG Waveform Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.9.1.3': ('Ambulatory ECG Waveform Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.9.2.1': ('Hemodynamic Waveform Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.9.3.1': ('Cardiac Electrophysiology Waveform Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.9.4.1': ('Basic Voice Audio Waveform Storage', 'SOP Class', '', ''),
# From Supp 43
'1.2.840.10008.5.1.4.1.1.9.4.2': ('General Audio Waveform Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.9.5.1': ('Arterial Pulse Waveform Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.9.6.1': ('Respiratory Waveform Storage', 'SOP Class', '', ''),
# End From Supp 43
'1.2.840.10008.5.1.4.1.1.10': ('Standalone Modality LUT Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.11': ('Standalone VOI LUT Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.11.1': ('Grayscale Softcopy Presentation State Storage SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.11.2': ('Color Softcopy Presentation State Storage SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.11.3': ('Pseudo-Color Softcopy Presentation State Storage SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.11.4': ('Blending Softcopy Presentation State Storage SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.12.1': ('X-Ray Angiographic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.12.1.1': ('Enhanced XA Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.12.2': ('X-Ray Radiofluoroscopic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.12.2.1': ('Enhanced XRF Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.13.1.1': ('X-Ray 3D Angiographic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.13.1.2': ('X-Ray 3D Craniofacial Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.12.3': ('X-Ray Angiographic Bi-Plane Image Storage', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.20': ('Nuclear Medicine Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.66': ('Raw Data Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.66.1': ('Spatial Registration Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.66.2': ('Spatial Fiducials Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.66.3': ('Deformable Spatial Registration Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.66.4': ('Segmentation Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.67': ('Real World Value Mapping Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1': ('VL Image Storage - Trial', '', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.77.2': ('VL Multi-frame Image Storage - Trial', '', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.77.1.1': ('VL Endoscopic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.1.1': ('Video Endoscopic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.2': ('VL Microscopic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.2.1': ('Video Microscopic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.3': ('VL Slide-Coordinates Microscopic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.4': ('VL Photographic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.4.1': ('Video Photographic Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.5.1': ('Ophthalmic Photography 8 Bit Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.5.2': ('Ophthalmic Photography 16 Bit Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.5.3': ('Stereometric Relationship Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.77.1.5.4': ('Ophthalmic Tomography Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.1': ('Text SR Storage - Trial', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.88.2': ('Audio SR Storage - Trial', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.88.3': ('Detail SR Storage - Trial', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.88.4': ('Comprehensive SR Storage - Trial', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.1.88.11': ('Basic Text SR Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.22': ('Enhanced SR Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.33': ('Comprehensive SR Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.40': ('Procedure Log Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.50': ('Mammography CAD SR Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.59': ('Key Object Selection Document Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.65': ('Chest CAD SR Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.88.67': ('X-Ray Radiation Dose SR Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.104.1': ('Encapsulated PDF Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.104.2': ('Encapsulated CDA Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.128': ('Positron Emission Tomography Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.129': ('Standalone PET Curve Storage', 'SOP Class', '', 'Retired'),
# From Supp 117
'1.2.840.10008.5.1.4.1.1.130': ('Enhanced PET Image Storage', 'SOP Class', '', ''),
# End From Supp 117
'1.2.840.10008.5.1.4.1.1.481.1': ('RT Image Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.2': ('RT Dose Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.3': ('RT Structure Set Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.4': ('RT Beams Treatment Record Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.5': ('RT Plan Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.6': ('RT Brachy Treatment Record Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.7': ('RT Treatment Summary Record Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.8': ('RT Ion Plan Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.1.481.9': ('RT Ion Beams Treatment Record Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.1.1': ('Patient Root Query/Retrieve Information Model - FIND', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.1.2': ('Patient Root Query/Retrieve Information Model - MOVE', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.1.3': ('Patient Root Query/Retrieve Information Model - GET', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.2.1': ('Study Root Query/Retrieve Information Model - FIND', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.2.2': ('Study Root Query/Retrieve Information Model - MOVE', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.2.3': ('Study Root Query/Retrieve Information Model - GET', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.3.1': ('Patient/Study Only Query/Retrieve Information Model - FIND', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.2.3.2': ('Patient/Study Only Query/Retrieve Information Model - MOVE', 'SOP Class', '', 'Retired'),
'1.2.840.10008.5.1.4.1.2.3.3': ('Patient/Study Only Query/Retrieve Information Model - GET', 'SOP Class', '', 'Retired'),
# From Supp 119
'1.2.840.10008.5.1.4.1.2.4.2': ('Composite Instance Root Retrieve - MOVE', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.4.3': ('Composite Instance Root Retrieve - GET', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.1.2.5.3': ('Composite Instance Retrieve Without Bulk Data - GET', 'SOP Class', '', ''),
# End From Supp 119
'1.2.840.10008.5.1.4.31': ('Modality Worklist Information Model - FIND', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.32.1': ('General Purpose Worklist Information Model - FIND', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.32.2': ('General Purpose Scheduled Procedure Step SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.32.3': ('General Purpose Performed Procedure Step SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.32': ('General Purpose Worklist Management Meta SOP Class', 'Meta SOP Class', '', ''),
'1.2.840.10008.5.1.4.33': ('Instance Availability Notification SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.1': ('RT Beams Delivery Instruction Storage (Supplement 74 Frozen Draft)', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.2': ('RT Conventional Machine Verification (Supplement 74 Frozen Draft)', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.3': ('RT Ion Machine Verification (Supplement 74 Frozen Draft)', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.4': ('Unified Worklist and Procedure Step Service Class', 'Service Class', '', ''),
'1.2.840.10008.5.1.4.34.4.1 ': ('Unified Procedure Step - Push SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.4.2': ('Unified Procedure Step - Watch SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.4.3': ('Unified Procedure Step - Pull SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.4.4': ('Unified Procedure Step - Event SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.34.5 ': ('Unified Worklist and Procedure Step SOP Instance', 'Well-known SOP Instance', '', ''),
'1.2.840.10008.5.1.4.37.1': ('General Relevant Patient Information Query', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.37.2': ('Breast Imaging Relevant Patient Information Query', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.37.3': ('Cardiac Relevant Patient Information Query', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.38.1': ('Hanging Protocol Storage', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.38.2': ('Hanging Protocol Information Model - FIND', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.38.3': ('Hanging Protocol Information Model - MOVE', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.41': ('Product Characteristics Query SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.5.1.4.42': ('Substance Approval Query SOP Class', 'SOP Class', '', ''),
'1.2.840.10008.15.0.3.1': ('dicomDeviceName', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.2': ('dicomDescription', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.3': ('dicomManufacturer', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.4': ('dicomManufacturerModelName', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.5': ('dicomSoftwareVersion', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.6': ('dicomVendorData', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.7': ('dicomAETitle', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.8': ('dicomNetworkConnectionReference', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.9': ('dicomApplicationCluster', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.10': ('dicomAssociationInitiator', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.11': ('dicomAssociationAcceptor', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.12': ('dicomHostname', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.13': ('dicomPort', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.14': ('dicomSOPClass', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.15': ('dicomTransferRole', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.16': ('dicomTransferSyntax', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.17': ('dicomPrimaryDeviceType', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.18': ('dicomRelatedDeviceReference', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.19': ('dicomPreferredCalledAETitle', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.20': ('dicomTLSCyphersuite', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.21': ('dicomAuthorizedNodeCertificateReference', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.22': ('dicomThisNodeCertificateReference', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.23': ('dicomInstalled', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.24': ('dicomStationName', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.25': ('dicomDeviceSerialNumber', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.26': ('dicomInstitutionName', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.27': ('dicomInstitutionAddress', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.28': ('dicomInstitutionDepartmentName', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.29': ('dicomIssuerOfPatientID', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.30': ('dicomPreferredCallingAETitle', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.3.31': ('dicomSupportedCharacterSet', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.1': ('dicomConfigurationRoot', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.2': ('dicomDevicesRoot', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.3': ('dicomUniqueAETitlesRegistryRoot', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.4': ('dicomDevice', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.5': ('dicomNetworkAE', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.6': ('dicomNetworkConnection', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.7': ('dicomUniqueAETitle', 'LDAP OID', '', ''),
'1.2.840.10008.15.0.4.8': ('dicomTransferCapability', 'LDAP OID', '', '')}
| njvack/ge-mri-rtafni | upload-host/vendor/dicom/_UID_dict.py | Python | mit | 26,450 |
# vim: set fileencoding=utf-8
#
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from collections import namedtuple
from dasbus.structure import get_fields
from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core.i18n import _, N_, CN_, C_
from pyanaconda.core.storage import PROTECTED_FORMAT_TYPES, SIZE_POLICY_AUTO, SIZE_POLICY_MAX, \
DEVICE_TYPE_LVM, DEVICE_TYPE_BTRFS, DEVICE_TYPE_LVM_THINP, DEVICE_TYPE_MD, Size
from pyanaconda.core.string import lower_ascii
from pyanaconda.modules.common.structures.device_factory import DeviceFactoryRequest, \
DeviceFactoryPermissions
from pyanaconda.modules.common.structures.storage import DeviceFormatData, DeviceData
from pyanaconda.modules.common.structures.validation import ValidationReport
from pyanaconda.ui.lib.storage import size_from_input
from pyanaconda.ui.helpers import InputCheck
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.helpers import GUIDialogInputCheckHandler
from pyanaconda.ui.gui.utils import fancy_set_sensitive, really_hide, really_show
log = get_module_logger(__name__)
# Default to these units when reading user input when no units given
SIZE_UNITS_DEFAULT = "MiB"
# If the user enters a smaller size, the UI changes it to this value.
MIN_SIZE_ENTRY = Size("1 MiB")
# If the user enters a larger size, the UI changes it to this value.
MAX_SIZE_ENTRY = Size(2**64 - 1)
# If the user enters a larger size, the UI changes it to this value.
MAX_SIZE_POLICY_ENTRY = Size(2**63 - 1)
NOTEBOOK_LABEL_PAGE = 0
NOTEBOOK_DETAILS_PAGE = 1
NOTEBOOK_LUKS_PAGE = 2
NOTEBOOK_UNEDITABLE_PAGE = 3
NOTEBOOK_INCOMPLETE_PAGE = 4
NEW_CONTAINER_TEXT = N_("Create a new %(container_type)s ...")
CONTAINER_TOOLTIP = N_("Create or select %(container_type)s")
CONTAINER_DIALOG_TITLE = N_("CONFIGURE %(container_type)s")
CONTAINER_DIALOG_TEXT = N_("Please create a name for this %(container_type)s "
"and select at least one disk below.")
DESIRED_CAPACITY_HINT = N_(
"Specify the Desired Capacity in whole or decimal numbers, with an appropriate unit.\n\n"
"Spaces separating digit groups are not allowed. Units consist of a decimal or binary "
"prefix, and optionally the letter B. Letter case does not matter for units. The default "
"unit used when units are left out is MiB.\n\n"
"Examples of valid input:\n"
"'100 GiB' = 100 gibibytes\n"
"'512m' = 512 megabytes\n"
"'123456789' = 123 terabytes and a bit less than a half\n"
)
DESIRED_CAPACITY_ERROR = DESIRED_CAPACITY_HINT
ContainerType = namedtuple("ContainerType", ["name", "label"])
CONTAINER_TYPES = {
DEVICE_TYPE_LVM: ContainerType(
N_("Volume Group"),
CN_("GUI|Custom Partitioning|Configure|Devices", "_Volume Group:")),
DEVICE_TYPE_LVM_THINP: ContainerType(
N_("Volume Group"),
CN_("GUI|Custom Partitioning|Configure|Devices", "_Volume Group:")),
DEVICE_TYPE_BTRFS: ContainerType(
N_("Volume"),
CN_("GUI|Custom Partitioning|Configure|Devices", "_Volume:"))
}
def generate_request_description(request, original=None):
"""Generate a description of a device factory request.
:param request: a device factory request
:param original: an original device factory request or None
:return: a string with the description
"""
attributes = []
original = original or request
if not isinstance(request, DeviceFactoryRequest) \
or not isinstance(original, DeviceFactoryRequest):
raise ValueError("Not instances of DeviceFactoryRequest")
for name, field in get_fields(request).items():
new_value = field.get_data(request)
old_value = field.get_data(original)
if new_value == old_value:
attribute = "{} = {}".format(
name, repr(new_value)
)
else:
attribute = "{} = {} -> {}".format(
name, repr(old_value), repr(new_value)
)
attributes.append(attribute)
return "\n".join(["{"] + attributes + ["}"])
def get_size_from_entry(entry, lower_bound=MIN_SIZE_ENTRY, upper_bound=MAX_SIZE_ENTRY,
units=SIZE_UNITS_DEFAULT):
""" Get a Size object from an entry field.
:param entry: an entry field with a specified size
:param lower_bound: lower bound for size returned,
:type lower_bound: :class:`blivet.size.Size` or NoneType
:param upper_bound: upper bound for size returned,
:type upper_bound: :class:`blivet.size.Size` or NoneType
:param units: units to use if none obtained from entry
:type units: str or NoneType
:returns: a Size object corresponding to the text in the entry field
:rtype: :class:`blivet.size.Size` or NoneType
Units default to bytes if no units specified in entry or units.
Rounds up to lower_bound, if value in entry field corresponds
to a smaller value. The default for lower_bound is None, yielding
no rounding.
"""
size_text = entry.get_text().strip()
size = size_from_input(size_text, units=units)
if size is None:
return None
if lower_bound is not None and size < lower_bound:
return lower_bound
if upper_bound is not None and size > upper_bound:
return upper_bound
return size
def get_selected_raid_level(raid_level_combo):
"""Interpret the selection of a RAID level combo box.
:return str: the selected raid level, an empty string if none selected
"""
if not raid_level_combo.get_property("visible"):
# the combo is hidden when raid level isn't applicable
return ""
itr = raid_level_combo.get_active_iter()
store = raid_level_combo.get_model()
if not itr:
return ""
selected_level = store[itr][1]
return selected_level
def get_default_raid_level(device_type):
"""Returns the default RAID level for this device type.
:param int device_type: an int representing the device_type
:return str: the default RAID level for this device type or an empty string
"""
if device_type == DEVICE_TYPE_MD:
return "raid1"
return ""
def get_supported_device_raid_levels(device_tree, device_type):
"""Get RAID levels supported for the given device type.
It supports any RAID levels that it expects to support and that blivet
supports for the given device type.
Since anaconda only ever allows the user to choose RAID levels for
device type DEVICE_TYPE_MD, hiding the RAID menu for all other device
types, the function only returns a non-empty set for this device type.
If this changes, then so should this function, but at this time it
is not clear what RAID levels should be offered for other device types.
:param device_tree: a proxy of a device tree
:param int device_type: one of an enumeration of device types
:return: a set of supported raid levels
:rtype: a set of strings
"""
if device_type == DEVICE_TYPE_MD:
supported = {"raid0", "raid1", "raid4", "raid5", "raid6", "raid10"}
levels = set(device_tree.GetSupportedRaidLevels(DEVICE_TYPE_MD))
return levels.intersection(supported)
return set()
def get_supported_container_raid_levels(device_tree, device_type):
"""The raid levels anaconda supports for a container for this device_type.
For LVM, anaconda supports LVM on RAID, but also allows no RAID.
:param device_tree: a proxy of a device tree
:param int device_type: one of an enumeration of device types
:return: a set of supported raid levels
:rtype: a set of strings
"""
if device_type in (DEVICE_TYPE_LVM, DEVICE_TYPE_LVM_THINP):
supported = {"raid0", "raid1", "raid4", "raid5", "raid6", "raid10"}
levels = set(device_tree.GetSupportedRaidLevels(DEVICE_TYPE_MD))
return levels.intersection(supported).union({""})
if device_type == DEVICE_TYPE_BTRFS:
supported = {"raid0", "raid1", "raid10", "single"}
levels = set(device_tree.GetSupportedRaidLevels(DEVICE_TYPE_BTRFS))
return levels.intersection(supported)
return set()
def get_container_type(device_type):
return CONTAINER_TYPES.get(device_type, ContainerType(N_("container"), CN_(
"GUI|Custom Partitioning|Configure|Devices", "container")))
class AddDialog(GUIObject):
builderObjects = ["addDialog", "mountPointStore", "mountPointCompletion",
"mountPointEntryBuffer"]
mainWidgetName = "addDialog"
uiFile = "spokes/lib/custom_storage_helpers.glade"
def __init__(self, data, device_tree):
super().__init__(data)
self._device_tree = device_tree
self._size = Size(0)
self._mount_point = ""
self._error = ""
self._warning_label = self.builder.get_object("mountPointWarningLabel")
self._size_entry = self.builder.get_object("addSizeEntry")
self._size_entry.set_tooltip_text(DESIRED_CAPACITY_HINT)
self._populate_mount_points()
@property
def mount_point(self):
"""The requested mount point."""
return self._mount_point
@property
def size(self):
"""The requested size."""
return self._size
def _populate_mount_points(self):
mount_points = self._device_tree.CollectUnusedMountPoints()
mount_point_store = self.builder.get_object("mountPointStore")
for path in mount_points:
mount_point_store.append([path])
entry = self.builder.get_object("addMountPointEntry")
entry.set_model(mount_point_store)
completion = self.builder.get_object("mountPointCompletion")
completion.set_text_column(0)
completion.set_popup_completion(True)
def on_add_confirm_clicked(self, button, *args):
self._error = ""
self._set_mount_point()
self._set_size()
self._warning_label.set_text(self._error)
self.window.show_all()
if not self._error:
self.window.destroy()
def _set_mount_point(self):
self._mount_point = self.builder.get_object("addMountPointEntry").get_active_text()
if lower_ascii(self._mount_point) in ("swap", "biosboot", "prepboot"):
return
report = ValidationReport.from_structure(
self._device_tree.ValidateMountPoint(self._mount_point)
)
self._error = " ".join(report.get_messages())
def _set_size(self):
self._size = get_size_from_entry(self._size_entry) or Size(0)
def refresh(self):
super().refresh()
self._warning_label.set_text("")
def run(self):
while True:
self._error = ""
rc = self.window.run()
if not self._error:
return rc
class ConfirmDeleteDialog(GUIObject):
builderObjects = ["confirmDeleteDialog"]
mainWidgetName = "confirmDeleteDialog"
uiFile = "spokes/lib/custom_storage_helpers.glade"
def __init__(self, data, device_tree, root_name, device_name, is_multiselection):
super().__init__(data)
self._device_tree = device_tree
self._root_name = root_name
self._device_name = device_name
self._is_multiselection = is_multiselection
self._label = self.builder.get_object("confirmLabel")
self._label.set_text(self._get_label_text())
self._optional_checkbox = self.builder.get_object("optionalCheckbox")
self._optional_checkbox.set_label(self._get_checkbox_text())
if not self._optional_checkbox.get_label():
self._optional_checkbox.hide()
@property
def option_checked(self):
return self._optional_checkbox.get_active()
def on_delete_confirm_clicked(self, button, *args):
self.window.destroy()
def _get_checkbox_text(self):
root_name = self._root_name
if root_name and "_" in root_name:
root_name = root_name.replace("_", "__")
if self._is_multiselection:
return C_(
"GUI|Custom Partitioning|Confirm Delete Dialog",
"Do _not show this dialog for other selected file systems."
)
if root_name:
return C_(
"GUI|Custom Partitioning|Confirm Delete Dialog",
"Delete _all file systems which are only used by {}."
).format(root_name)
return None
def _get_label_text(self):
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(self._device_name)
)
format_data = DeviceFormatData.from_structure(
self._device_tree.GetFormatData(self._device_name)
)
device_name = self._device_name
mount_point = format_data.attrs.get("mount-point", "")
if mount_point:
device_name = "{} ({})".format(mount_point, self._device_name)
if format_data.type in PROTECTED_FORMAT_TYPES:
return _(
"{} may be a system boot partition! Deleting it may break "
"other operating systems. Are you sure you want to delete it?"
).format(device_name)
if device_data.type == "btrfs" and device_data.children:
return _(
"Are you sure you want to delete all of the data on {}, including subvolumes?"
).format(device_name)
if device_data.type == "lvmthinlv" and device_data.children:
return _(
"Are you sure you want to delete all of the data on {}, including snapshots?"
).format(device_name)
return _("Are you sure you want to delete all of the data on {}?").format(device_name)
def run(self):
return self.window.run()
class DisksDialog(GUIObject):
builderObjects = ["disks_dialog", "disk_store", "disk_view"]
mainWidgetName = "disks_dialog"
uiFile = "spokes/lib/custom_storage_helpers.glade"
def __init__(self, data, device_tree, disks, selected_disks):
super().__init__(data)
self._device_tree = device_tree
self._selected_disks = selected_disks
self._disks = disks
self._store = self.builder.get_object("disk_store")
self._view = self.builder.get_object("disk_view")
self._populate_disks()
self._select_disks()
self._view.set_tooltip_column(0)
@property
def selected_disks(self):
"""Selected disks."""
return self._selected_disks
def _populate_disks(self):
for device_name in self._disks:
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(device_name)
)
device_free_space = self._device_tree.GetDiskFreeSpace(
[device_name]
)
self._store.append([
"{} ({})".format(
device_data.description,
device_data.attrs.get("serial", "")
),
str(Size(device_data.size)),
str(Size(device_free_space)),
device_name
])
def _select_disks(self):
model = self._view.get_model()
itr = model.get_iter_first()
selection = self._view.get_selection()
while itr:
device_name = model.get_value(itr, 3)
if device_name in self._selected_disks:
selection.select_iter(itr)
itr = model.iter_next(itr)
def on_cancel_clicked(self, button):
self.window.destroy()
def on_select_clicked(self, button):
treeview = self.builder.get_object("disk_view")
model, paths = treeview.get_selection().get_selected_rows()
self._selected_disks = []
for path in paths:
itr = model.get_iter(path)
device_name = model.get_value(itr, 3)
self._selected_disks.append(device_name)
self.window.destroy()
def run(self):
return self.window.run()
class ContainerDialog(GUIObject, GUIDialogInputCheckHandler):
builderObjects = ["container_dialog", "disk_store", "container_disk_view",
"containerRaidStoreFiltered", "containerRaidLevelLabel",
"containerRaidLevelCombo", "raidLevelStore",
"containerSizeCombo", "containerSizeEntry",
"containerSizeLabel", "containerEncryptedCheckbox",
"luksVersionCombo", "luksVersionStore", "luksVersionLabel"]
mainWidgetName = "container_dialog"
uiFile = "spokes/lib/custom_storage_helpers.glade"
def __init__(self, data, device_tree, request: DeviceFactoryRequest,
permissions: DeviceFactoryPermissions, disks, names):
GUIObject.__init__(self, data)
self._device_tree = device_tree
self._disks = disks
self._request = request
self._permissions = permissions
self._original_name = request.container_name
self._container_names = names
self._original_luks_version = request.luks_version
self._error = ""
self._title_label = self.builder.get_object("container_dialog_title_label")
self._dialog_label = self.builder.get_object("container_dialog_label")
self._error_label = self.builder.get_object("containerErrorLabel")
self._name_entry = self.builder.get_object("container_name_entry")
self._encryptCheckbutton = self.builder.get_object("containerEncryptedCheckbox")
self._luks_combo = self.builder.get_object("luksVersionCombo")
self._luks_store = self.builder.get_object("luksVersionStore")
self._luks_label = self.builder.get_object("luksVersionLabel")
self._raidStoreFilter = self.builder.get_object("containerRaidStoreFiltered")
self._store = self.builder.get_object("disk_store")
self._treeview = self.builder.get_object("container_disk_view")
self._sizeCombo = self.builder.get_object("containerSizeCombo")
self._sizeEntry = self.builder.get_object("containerSizeEntry")
self._raidLevelCombo = self.builder.get_object("containerRaidLevelCombo")
self._raidLevelLabel = self.builder.get_object("containerRaidLevelLabel")
self._save_button = self.builder.get_object("container_save_button")
GUIDialogInputCheckHandler.__init__(self, self._save_button)
self._supported_raid_levels = get_supported_container_raid_levels(
self._device_tree, self._request.device_type
)
self._set_labels()
self._populate_disks()
self._select_disks()
self._populate_raid()
self._set_name()
self._set_size()
self._set_encryption()
self._populate_luks()
def _set_labels(self):
container_type = get_container_type(self._request.device_type)
title_text = _(CONTAINER_DIALOG_TITLE) % {
"container_type": _(container_type.name).upper()
}
self._title_label.set_text(title_text)
dialog_text = _(CONTAINER_DIALOG_TEXT) % {
"container_type": _(container_type.name).lower()
}
self._dialog_label.set_text(dialog_text)
def _populate_disks(self):
for device_name in self._disks:
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(device_name)
)
device_free_space = self._device_tree.GetDiskFreeSpace(
[device_name]
)
self._store.append([
"{} ({})".format(
device_data.description,
device_data.attrs.get("serial", "")
),
str(Size(device_data.size)),
str(Size(device_free_space)),
device_name
])
def _select_disks(self):
model = self._treeview.get_model()
itr = model.get_iter_first()
selection = self._treeview.get_selection()
while itr:
device_name = model.get_value(itr, 3)
if device_name in self._request.disks:
selection.select_iter(itr)
itr = model.iter_next(itr)
if not self._permissions.can_modify_container():
self._treeview.set_sensitive(False)
def _populate_raid(self):
"""Set up the raid-specific portion of the device details.
Hide the RAID level menu if this device type does not support RAID.
Choose a default RAID level.
"""
self._raidStoreFilter.set_visible_func(self._raid_level_visible)
self._raidStoreFilter.refilter()
if not self._supported_raid_levels:
for widget in [self._raidLevelLabel, self._raidLevelCombo]:
really_hide(widget)
return
raid_level = self._request.container_raid_level
for (i, row) in enumerate(self._raidLevelCombo.get_model()):
if row[1] == raid_level:
self._raidLevelCombo.set_active(i)
break
for widget in [self._raidLevelLabel, self._raidLevelCombo]:
really_show(widget)
fancy_set_sensitive(self._raidLevelCombo, self._permissions.container_raid_level)
def _raid_level_visible(self, model, itr, user_data):
raid_level = model[itr][1]
return raid_level in self._supported_raid_levels
def _set_name(self):
self._name_entry.set_text(self._request.container_name)
self.add_check(self._name_entry, self._check_name_entry)
if not self._permissions.container_name:
fancy_set_sensitive(self._name_entry, False)
def _check_name_entry(self, inputcheck):
container_name = self.get_input(inputcheck.input_obj).strip()
if container_name == self._original_name:
return InputCheck.CHECK_OK
if container_name in self._container_names:
return _("Name is already in use.")
report = ValidationReport.from_structure(
self._device_tree.ValidateContainerName(container_name)
)
if not report.is_valid():
return " ".join(report.get_messages())
return InputCheck.CHECK_OK
def _set_size(self):
if self._request.container_size_policy == SIZE_POLICY_AUTO:
self._sizeCombo.set_active(0)
self._sizeEntry.set_text("")
elif self._request.container_size_policy == SIZE_POLICY_MAX:
self._sizeCombo.set_active(1)
self._sizeEntry.set_text("")
else:
self._sizeCombo.set_active(2)
size = Size(self._request.container_size_policy)
self._sizeEntry.set_text(size.human_readable(max_places=2))
if not self._permissions.container_size_policy:
fancy_set_sensitive(self._sizeCombo, False)
self._sizeEntry.set_sensitive(False)
def _set_encryption(self):
self._encryptCheckbutton.set_active(self._request.container_encrypted)
if not self._permissions.container_encrypted:
fancy_set_sensitive(self._encryptCheckbutton, False)
def _populate_luks(self):
"""Set up the LUKS version combo box."""
# Add the values.
self._luks_store.clear()
for version in ["luks1", "luks2"]:
self._luks_store.append([version])
# Get the selected value.
luks_version = self._request.luks_version or self._device_tree.GetDefaultLUKSVersion()
# Set the selected value.
idx = next(
i for i, data in enumerate(self._luks_combo.get_model())
if data[0] == luks_version
)
self._luks_combo.set_active(idx)
self._update_luks_combo()
def _update_luks_combo(self):
if self._encryptCheckbutton.get_active():
really_show(self._luks_label)
really_show(self._luks_combo)
else:
really_hide(self._luks_label)
really_hide(self._luks_combo)
def run(self):
while True:
self._error = ""
rc = self.window.run()
if rc == 1:
# Save clicked and input validation passed, try saving it
if self.on_ok_clicked():
self._save_clicked()
# If that failed, try again
if self._error:
continue
else:
break
# Save clicked with invalid input, try again
else:
continue
else:
# Cancel or something similar, just exit
break
self.window.destroy()
return rc
def _save_clicked(self):
if not self._permissions.can_modify_container():
return
if not self._validate_disks():
return
if not self._validate_raid_level():
return
self._request.disks = self._get_disks()
self._request.container_name = self._name_entry.get_text().strip()
self._request.container_encrypted = self._encryptCheckbutton.get_active()
self._request.luks_version = self._get_luks_version()
self._request.container_size_policy = self._get_size_policy()
self._request.container_raid_level = get_selected_raid_level(self._raidLevelCombo)
self._error_label.set_text("")
def _validate_disks(self):
if not self._get_disks():
self._error = _("No disks selected.")
self._error_label.set_text(self._error)
self.window.show_all()
return False
return True
def _validate_raid_level(self):
raid_level = get_selected_raid_level(self._raidLevelCombo)
self._error = ""
if raid_level:
paths = self._treeview.get_selection().get_selected_rows()[1]
report = ValidationReport.from_structure(
self._device_tree.ValidateRaidLevel(raid_level, len(paths))
)
if not report.is_valid():
self._error = " ".join(report.get_messages())
self._error_label.set_text(self._error)
self.window.show_all()
return False
return True
def _get_disks(self):
model, paths = self._treeview.get_selection().get_selected_rows()
disks = []
for path in paths:
itr = model.get_iter(path)
device_name = model.get_value(itr, 3)
disks.append(device_name)
return disks
def _get_size_policy(self):
idx = self._sizeCombo.get_active()
if idx == 0:
return SIZE_POLICY_AUTO
if idx == 1:
return SIZE_POLICY_MAX
original_size = Size(self._request.container_size_policy)
original_entry = original_size.human_readable(max_places=2)
if self._sizeEntry.get_text() == original_entry:
return self._request.container_size_policy
size = get_size_from_entry(self._sizeEntry, upper_bound=MAX_SIZE_POLICY_ENTRY)
if size is None:
return SIZE_POLICY_MAX
return size.get_bytes()
def _get_luks_version(self):
if self._encryptCheckbutton.get_active():
active_index = self._luks_combo.get_active()
if active_index != -1:
return self._luks_combo.get_model()[active_index][0]
return self._original_luks_version
def on_size_changed(self, combo):
active_index = combo.get_active()
if active_index == 0:
self._sizeEntry.set_sensitive(False)
elif active_index == 1:
self._sizeEntry.set_sensitive(False)
else:
self._sizeEntry.set_sensitive(True)
def on_encrypt_toggled(self, widget):
self._update_luks_combo()
| jkonecny12/anaconda | pyanaconda/ui/gui/spokes/lib/custom_storage_helpers.py | Python | gpl-2.0 | 28,820 |
import random
from database import *
from packettypes import *
from gamelogic import *
from objects import *
from constants import *
from utils import *
import globalvars as g
#debug
import time
class DataHandler():
def handleData(self, index, data):
jsonData = decodeJSON(data)
packetType = jsonData[0]["packet"]
if packetType == ClientPackets.CGetClasses:
self.handleGetClasses(index)
elif packetType == ClientPackets.CNewAccount:
self.handleNewAccount(index, jsonData)
elif packetType == ClientPackets.CLogin:
self.handleLogin(index, jsonData)
elif packetType == ClientPackets.CAddChar:
self.handleAddChar(index, jsonData)
elif packetType == ClientPackets.CUseChar:
self.handleUseChar(index, jsonData)
elif packetType == ClientPackets.CSayMsg:
self.handleSayMsg(index, jsonData)
elif packetType == ClientPackets.CEmoteMsg:
self.handleEmoteMsg(index, jsonData)
elif packetType == ClientPackets.CBroadcastMsg:
self.handleBroadcastMsg(index, jsonData)
elif packetType == ClientPackets.CGlobalMsg:
self.handleGlobalMsg(index, jsonData)
elif packetType == ClientPackets.CAdminMsg:
self.handleAdminMsg(index, jsonData)
elif packetType == ClientPackets.CPlayerMsg:
self.handlePlayerMsg(index, jsonData)
elif packetType == ClientPackets.CPlayerMove:
self.handlePlayerMove(index, jsonData)
elif packetType == ClientPackets.CPlayerDir:
self.handlePlayerDir(index, jsonData)
elif packetType == ClientPackets.CUseItem:
self.handleUseItem(index, jsonData)
elif packetType == ClientPackets.CCast:
self.handleCastSpell(index, jsonData)
elif packetType == ClientPackets.CTarget:
self.handleTarget(index, jsonData)
elif packetType == ClientPackets.CAttack:
self.handleAttack(index)
elif packetType == ClientPackets.CSpells:
self.handleSpells(index)
elif packetType == ClientPackets.CPlayerInfoRequest:
self.handlePlayerInfoRequest(index, jsonData)
elif packetType == ClientPackets.CWarpMeTo:
self.handleWarpMeTo(index, jsonData)
elif packetType == ClientPackets.CWarpToMe:
self.handleWarpToMe(index, jsonData)
elif packetType == ClientPackets.CWarpTo:
self.handleWarpTo(index, jsonData)
elif packetType == ClientPackets.CSetSprite:
self.handleSetSprite(index, jsonData)
elif packetType == ClientPackets.CRequestNewMap:
self.handleRequestNewMap(index, jsonData)
elif packetType == ClientPackets.CMapData:
self.handleMapData(index, jsonData)
elif packetType == ClientPackets.CNeedMap:
self.handleNeedMap(index, jsonData)
elif packetType == ClientPackets.CMapGetItem:
self.handleMapGetItem(index)
elif packetType == ClientPackets.CMapReport:
self.handleMapReport(index)
elif packetType == ClientPackets.CMapRespawn:
self.handleMapRespawn(index)
elif packetType == ClientPackets.CWhosOnline:
self.handleWhosOnline(index)
elif packetType == ClientPackets.CRequestEditMap:
self.handleRequestEditMap(index)
elif packetType == ClientPackets.CRequestEditItem:
self.handleRequestEditItem(index)
elif packetType == ClientPackets.CSaveItem:
self.handleSaveItem(index, jsonData)
elif packetType == ClientPackets.CRequestEditSpell:
self.handleRequestEditSpell(index)
elif packetType == ClientPackets.CEditSpell:
self.handleEditSpell(index, jsonData)
elif packetType == ClientPackets.CSaveSpell:
self.handleSaveSpell(index, jsonData)
elif packetType == ClientPackets.CRequestEditNpc:
self.handleRequestEditNpc(index)
elif packetType == ClientPackets.CEditNpc:
self.handleEditNpc(index, jsonData)
elif packetType == ClientPackets.CSaveNpc:
self.handleSaveNpc(index, jsonData)
elif packetType == ClientPackets.CSetAccess:
self.handleSetAccess(index, jsonData)
elif packetType == ClientPackets.CGiveItem:
self.handleGiveItem(index, jsonData)
elif packetType == ClientPackets.CQuit:
self.handleQuit(index)
else:
# Packet is unknown - hacking attempt
hackingAttempt(index, 'Packet Modification')
def handleGetClasses(self, index):
if not isPlaying(index):
sendNewCharClasses(index)
def handleNewAccount(self, index, jsonData):
name = jsonData[0]['name']
password = jsonData[0]['password']
if not isPlaying(index):
if not isLoggedIn(index):
# prevent hacking
if len(name) < 3 or len(password) < 3:
print "hacking attempt"
alertMsg(index, "Your name and password must be at least three characters in length.")
return
# check if account already exists
if not accountExists(name):
addAccount(index, name, password)
g.serverLogger.info('Account ' + name + ' has been created')
alertMsg(index, "Your account has been created!")
else:
g.serverLogger.info('Account name has already been taken!')
alertMsg(index, "Sorry, that account name is already taken!")
''' Player login '''
def handleLogin(self, index, jsonData):
if not isPlaying(index):
if not isLoggedIn(index):
plrName = jsonData[0]["name"]
plrPassword = jsonData[0]["password"]
# todo: check version
# todo: is shutting down?
if len(plrName) < 3 or len(plrPassword) < 3:
alertMsg(index, "The acount name or password is too short!")
return
#Not necessary
'''if not accountExists(plrName):
# alert msg
return'''
if not passwordOK(plrName, plrPassword):
alertMsg(index, "Wrong account name or password!")
return
if isMultiAccounts(plrName):
alertMsg(index, "That account is already logged in!")
g.conn.closeConnection(index)
return
# load the player
loadPlayer(index, plrName)
sendChars(index)
g.connectionLogger.info(getPlayerLogin(index) + ' has logged in')
''' player creates a new character '''
def handleAddChar(self, index, jsonData):
if not isPlaying(index):
name = jsonData[0]["name"]
sex = jsonData[0]["sex"]
Class = jsonData[0]["class"]
charNum = jsonData[0]["slot"]
# prevent hacking
if len(name) < 3:
alertMsg(index, 'Character name must be at least three characters in length.')
return
#todo: check for certain letters
if charNum < 0 or charNum > MAX_CHARS:
alertMsg(index, 'Invalid CharNum')
return
#todo: check sex
if Class < 0 or Class > g.maxClasses:
alertMsg(index, 'Invalid Class')
return
# check if a character already exists in slot
if charExist(index, charNum):
alertMsg(index, 'Character already exists')
return
# check if name is in use
if findChar(name):
alertMsg(index, 'Sorry, but that name is in use!')
return
# everything went ok, add the character
addChar(index, name, sex, Class, charNum)
g.serverLogger.info("Character " + name + " added to " + getPlayerLogin(index) + "'s account.")
# alertMsg(player created)
# send characters to player
sendChars(index)
''' Player selected character '''
def handleUseChar(self, index, jsonData):
if not isPlaying(index):
charNum = jsonData[0]["charslot"]
if charNum < 0 or charNum > MAX_CHARS:
hackingAttempt(index, 'Invalid CharNum')
return
# make sure character exists
if charExist(index, charNum):
TempPlayer[index].charNum = charNum
joinGame(index)
g.connectionLogger.info("Has begun playing")
''' say msg '''
def handleSayMsg(self, index, jsonData):
msg = jsonData[0]["msg"]
mapMsg(getPlayerMap(index), getPlayerName(index) + ': ' + msg, sayColor)
def handleEmoteMsg(self, index, jsonData):
msg = jsonData[0]["msg"]
mapMsg(getPlayerMap(index), getPlayerName(index) + ' ' + msg, emoteColor)
def handleBroadcastMsg(self, index, jsonData):
msg = jsonData[0]["msg"]
# prevent hacking
# check size
string = getPlayerName(index) + ': ' + msg
globalMsg(string, broadcastColor)
def handleGlobalMsg(self, index, jsonData):
msg = jsonData[0]["msg"]
if getPlayerAccess(index) > 0:
string = '(global) ' + getPlayerName(index) +': ' + msg
globalMsg(string, globalColor)
def handleAdminMsg(self, index, jsonData):
msg = jsonData[0]["msg"]
if getPlayerAccess(index) > 0:
string = '(admin ' + getPlayerName(index) +') ' + msg
globalMsg(string, adminColor)
''' Player message '''
def handlePlayerMsg(self, index, jsonData):
msg = jsonData[0]["msg"]
msgTo = findPlayer(jsonData[0]["msgto"])
# check if they are talking to themselves
if msgTo != index:
if msgTo is not None:
playerMsg(msgTo, getPlayerName(index) + ' tells you "' + getPlayerName(msgTo) + ', ' + msg + '"', tellColor)
playerMsg(index, 'You tell ' + getPlayerName(msgTo) + ', "' + msg + '"', tellColor)
else:
playerMsg(index, 'Player is not online.', textColor.WHITE)
else:
playerMsg(index, 'You cannot message yourself.', textColor.BRIGHT_RED)
''' Player movement '''
def handlePlayerMove(self, index, jsonData):
if TempPlayer[index].gettingMap:
return
direction = jsonData[0]["direction"]
movement = jsonData[0]["moving"]
# prevent hacking
if direction < DIR_UP or direction > DIR_RIGHT:
hackingAttempt(index, 'Invalid Direction')
# prevent hacking
if movement < 1 or movement > 2:
hackingAttempt(index, 'Invalid Movement')
# prevent player from moving if they have casted a spell
if TempPlayer[index].castedSpell:
# check if they have already casted a spell and if so we cant let them move
tickCount = time.time() * 1000
if tickCount > TempPlayer[index].attackTimer + 1000:
TempPlayer[index].castedSpell = False
else:
sendPlayerXY(index)
return
playerMove(index, direction, movement)
''' Player direction '''
def handlePlayerDir(self, index, jsonData):
direction = jsonData[0]["direction"]
setPlayerDir(index, direction)
sendPlayerDir(index)
def handleUseItem(self, index, jsonData):
invNum = jsonData[0]['invnum']
charNum = TempPlayer[index].charNum
# prevent cheating
if invNum < 0 or invNum > MAX_ITEMS:
hackingAttempt(index, 'Invalid invNum')
return
if charNum < 0 or charNum > MAX_CHARS:
hackingAttempt(index, 'Invalid charNum')
return
if getPlayerInvItemNum(index, invNum) >= 0 and getPlayerInvItemNum(index, invNum) <= MAX_ITEMS:
n = Item[getPlayerInvItemNum(index, invNum)].data2
# find out what item it is
itemType = Item[getPlayerInvItemNum(index, invNum)].type
if itemType == ITEM_TYPE_HELMET:
if invNum != getPlayerEquipmentSlot(index, Equipment.helmet):
# todo: check if required stats have been met
setPlayerEquipmentSlot(index, invNum, Equipment.helmet)
else:
setPlayerEquipmentSlot(index, None, Equipment.helmet)
sendWornEquipment(index)
elif itemType == ITEM_TYPE_ARMOR:
if invNum != getPlayerEquipmentSlot(index, Equipment.armor):
# todo: check if required stats have been met
setPlayerEquipmentSlot(index, invNum, Equipment.armor)
else:
setPlayerEquipmentSlot(index, None, Equipment.armor)
sendWornEquipment(index)
elif itemType == ITEM_TYPE_WEAPON:
if invNum != getPlayerEquipmentSlot(index, Equipment.weapon):
# todo: check if required stats have been met
setPlayerEquipmentSlot(index, invNum, Equipment.weapon)
else:
setPlayerEquipmentSlot(index, None, Equipment.weapon)
sendWornEquipment(index)
elif itemType == ITEM_TYPE_SHIELD:
if invNum != getPlayerEquipmentSlot(index, Equipment.shield):
# todo: check if required stats have been met
setPlayerEquipmentSlot(index, invNum, Equipment.shield)
else:
setPlayerEquipmentSlot(index, None, Equipment.shield)
sendWornEquipment(index)
elif itemType == ITEM_TYPE_SPELL:
# spell num
spellNum = Item[getPlayerInvItemNum(index, invNum)].data1
if spellNum != None:
# make sure they are the right class
if int(1 if Spell[spellNum].reqClass is None else Spell[spellNum].reqClass) - 1 == getPlayerClass(index) or Spell[n].reqClass is None:
# make sure they are the right level
levelReq = Spell[spellNum].reqLevel
if levelReq <= getPlayerLevel(index):
i = findOpenSpellSlot(index)
if i is not None:
if not hasSpell(index, spellNum):
setPlayerSpell(index, i, spellNum)
takeItem(index, getPlayerInvItemNum(index, invNum), 0)
playerMsg(index, 'You study the spell carefully...', textColor.YELLOW)
playerMsg(index, 'You have learned a new spell!', textColor.WHITE)
else:
playerMsg(index, 'You have already learned this spell!', textColor.BRIGHT_RED)
else:
playerMsg(index, 'You have learned all that you can learn!', textColor.BRIGHT_RED)
else:
playerMsg(index, 'You must be level ' + str(levelReq) + ' to learn this spell.', textColor.WHITE)
else:
playerMsg(index, 'This spell can only be learned by a '+ getClassName(Spell[spellNum].reqClass) + '.', textColor.WHITE)
else:
playerMsg(index, 'An error occured with the spell. Please inform an admin!', textColor.WHITE)
# todo: potions, keys
def handleCastSpell(self, index, jsonData):
spellSlot = jsonData[0]['spellslot']
castSpell(index, spellSlot)
def handleTarget(self, index, jsonData):
x = jsonData[0]['x']
y = jsonData[0]['y']
if x < 0 or x > MAX_MAPX or y < 0 or y > MAX_MAPY:
return
# check for player
for i in range(len(g.playersOnline)):
if getPlayerMap(index) == getPlayerMap(g.playersOnline[i]):
if getPlayerX(g.playersOnline[i]) == x and getPlayerY(g.playersOnline[i]) == y:
# consider the player
if g.playersOnline[i] != index:
if getPlayerLevel(g.playersOnline[i]) >= getPlayerLevel(index) + 5:
playerMsg(index, 'You wouldn\'t stand a chance.', textColor.BRIGHT_RED)
elif getPlayerLevel(g.playersOnline[i]) > getPlayerLevel(index):
playerMsg(index, 'This one seems to have an advantage over you.', textColor.YELLOW)
elif getPlayerLevel(g.playersOnline[i]) == getPlayerLevel(index):
playerMsg(index, 'This would be an even fight.', textColor.WHITE)
elif getPlayerLevel(g.playersOnline[i]) + 5 <= getPlayerLevel(index):
playerMsg(index, 'You could slaughter that player.', textColor.BRIGHT_BLUE)
elif getPlayerLevel(g.playersOnline[i]) < getPlayerLevel(index):
playerMsg(index, 'You would have an advantage over that player.', textColor.YELLOW)
# change the target
TempPlayer[index].target = g.playersOnline[i]
TempPlayer[index].targetType = TARGET_TYPE_PLAYER
playerMsg(index, 'Your target is now ' + getPlayerName(g.playersOnline[i]) + '.', textColor.YELLOW)
return
# check for npc
for i in range(MAX_MAP_NPCS):
if mapNPC[getPlayerMap(index)][i].num is not None:
if mapNPC[getPlayerMap(index)][i].x == x and mapNPC[getPlayerMap(index)][i].y == y:
# change the target
TempPlayer[index].target = i
TempPlayer[index].targetType = TARGET_TYPE_NPC
playerMsg(index, 'Your target is now a ' + NPC[mapNPC[getPlayerMap(index)][i].num].name + '.', textColor.YELLOW)
return
def handleAttack(self, index):
# try to attack a player
for i in range(g.totalPlayersOnline):
tempIndex = g.playersOnline[i]
# make sure we dont attack ourselves
if tempIndex != index:
# can we attack the player?
if canAttackPlayer(index, tempIndex):
# check if player can block the hit
if not canPlayerBlockHit(tempIndex):
# get the damage we can do
if not canPlayerCriticalHit(index):
# normal hit
damage = getPlayerDamage(index) - getPlayerProtection(tempIndex)
else:
# critical hit so add bonus
n = getPlayerDamage(index)
damage = n + random.randint(1, (n // 2)) + 1 - getPlayerProtection(tempIndex)
playerMsg(index, 'You feel a surge of energy upon swinging!', textColor.BRIGHT_CYAN)
playerMsg(tempIndex, getPlayerName(index) + ' swings with enormous might!', textColor.BRIGHT_CYAN)
attackPlayer(index, tempIndex, damage)
else:
# player has blocked the hit
playerMsg(index, getPlayerName(tempIndex) + '\'s ' + Item[getPlayerInvItemNum(tempIndex, getPlayerEquipmentSlot(tempIndex, Equipment.shield))].name + ' has blocked your hit!', textColor.BRIGHT_CYAN)
playerMsg(tempIndex, 'Your ' + Item[getPlayerInvItemNum(tempIndex, getPlayerEquipmentSlot(tempIndex, Equipment.shield))].name + ' has blocked ' + getPlayerName(index) + '\'s hit!', textColor.BRIGHT_CYAN)
# todo: handle attack npc
for i in range(MAX_MAP_NPCS):
if canAttackNpc(index, i):
# get the damage we can do
if not canPlayerCriticalHit(index):
damage = getPlayerDamage(index) - (NPC[mapNPC[getPlayerMap(index)][i].num].stat[Stats.defense] // 2)
else:
n = getPlayerDamage(index)
damage = n + random.randint(0, n // 2) + 1 - (NPC[mapNPC[getPlayerMap(index)][i].num].stat[Stats.defense] // 2)
playerMsg(index, 'You feel a surge of energy upon swinging!', textColor.BRIGHT_CYAN)
if damage > 0:
attackNpc(index, i, damage)
else:
playerMsg(index, 'Your attack does nothing.', textColor.BRIGHT_RED)
def handleSpells(self, index):
sendPlayerSpells(index)
def handlePlayerInfoRequest(self, index, jsonData):
name = jsonData[0]['name']
i = findPlayer(name)
if i != None:
playerMsg(index, 'Account: ' + Player[i].Login + ', Name: ' + getPlayerName(i), textColor.BRIGHT_GREEN)
if getPlayerAccess(index) > ADMIN_MONITOR:
playerMsg(index, '-=- Stats for ' + getPlayerName(i) + ' -=-', textColor.BRIGHT_GREEN)
playerMsg(index, 'Level: ' + str(getPlayerLevel(i)) + ' EXP: ' + str(getPlayerExp(i)) + '/' + str(getPlayerNextLevel(i)), textColor.BRIGHT_GREEN)
playerMsg(index, 'HP: ' + str(getPlayerVital(i, Vitals.hp)) + '/' + str(getPlayerMaxVital(i, Vitals.hp)) + ' MP: ' + str(getPlayerVital(i, Vitals.mp)) + '/' + str(getPlayerMaxVital(i, Vitals.mp)) + ' SP: ' + str(getPlayerVital(i, Vitals.sp)) + '/' + str(getPlayerMaxVital(i, Vitals.sp)), textColor.BRIGHT_GREEN)
playerMsg(index, 'Strength: ' + str(getPlayerStat(i, Stats.strength)) + ' Defense: ' + str(getPlayerStat(i, Stats.defense)) + ' Magic: ' + str(getPlayerStat(i, Stats.magic)) + ' Speed: ' + str(getPlayerStat(i, Stats.speed)), textColor.BRIGHT_GREEN)
n = (getPlayerStat(i, Stats.strength) // 2) + (getPlayerLevel(i) // 2)
k = (getPlayerStat(i, Stats.defense) // 2) + (getPlayerLevel(i) // 2)
if n > 100:
n = 100
if k > 100:
k = 100
playerMsg(index, 'Critical Hit Chance: ' + str(n) + '%, Block Chance: ' + str(k) + '%', textColor.BRIGHT_GREEN)
else:
playerMsg(index, 'Player is not online.', textColor.WHITE)
def handleWarpMeTo(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
playerName = jsonData[0]['name']
playerIndex = findPlayer(playerName)
if playerIndex != index:
if playerIndex is not None:
playerWarp(index, getPlayerMap(playerIndex), getPlayerX(playerIndex), getPlayerY(playerIndex))
playerMsg(playerIndex, getPlayerName(index) + ' has been warped to you.', textColor.BRIGHT_BLUE)
playerMsg(index, 'You have been warped to ' + getPlayerName(playerIndex) + '.', textColor.BRIGHT_BLUE)
g.connectionLogger.info(getPlayerName(index) + ' has warped to ' + getPlayerName(playerIndex) + ', map #' + str(getPlayerMap(index)))
else:
playerMsg(index, 'Player is not online.', textColor.RED) # white?
return
else:
playerMsg(index, 'You cannot warp to yourself!', textColor.RED) # white?
return
def handleWarpToMe(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
playerName = jsonData[0]['name']
playerIndex = findPlayer(playerName)
if playerIndex != index:
if playerIndex is not None:
playerWarp(playerIndex, getPlayerMap(index), getPlayerX(index), getPlayerY(index))
playerMsg(playerIndex, 'You have been summoned by ' + getPlayerName(index) + '.', textColor.BRIGHT_BLUE)
playerMsg(index, getPlayerName(playerIndex) + ' has been summoned.', textColor.BRIGHT_BLUE)
g.connectionLogger.info(getPlayerName(index) + ' has warped ' + getPlayerName(playerIndex) + ' to self, map #' + str(getPlayerMap(index)))
else:
playerMsg(index, 'Player is not online.', textColor.RED) # white?
return
else:
playerMsg(index, 'You cannot warp to yourself!', textColor.RED) # white?
return
def handleWarpTo(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
mapNum = jsonData[0]['map']
if mapNum < 0 or mapNum > MAX_MAPS:
hackingAttempt(index, 'Invalid MapNum')
return
playerWarp(index, mapNum, getPlayerX(index), getPlayerY(index))
playerMsg(index, 'You have been warped to map #' + str(mapNum), textColor.BRIGHT_BLUE)
g.connectionLogger.info(getPlayerName(index) + ' warped to map #' + str(mapNum))
def handleSetSprite(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
n = jsonData[0]["sprite"]
setPlayerSprite(index, n)
sendPlayerData(index)
def handleRequestNewMap(self, index, jsonData):
direction = jsonData[0]["direction"]
if direction < DIR_UP or direction > DIR_RIGHT:
print "hacking attempt"
return
playerMove(index, direction, 1)
def handleMapData(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
mapNum = getPlayerMap(index)
revision = Map[mapNum].revision + 1
clearMap(mapNum)
Map[mapNum].name = jsonData[0]["mapname"]
Map[mapNum].revision = revision
Map[mapNum].moral = jsonData[0]["moral"]
Map[mapNum].tileSet = jsonData[0]["tileset"]
Map[mapNum].up = jsonData[0]["up"]
Map[mapNum].down = jsonData[0]["down"]
Map[mapNum].left = jsonData[0]["left"]
Map[mapNum].right = jsonData[0]["right"]
Map[mapNum].bootMap = jsonData[0]["bootmap"]
Map[mapNum].bootx = jsonData[0]["bootx"]
Map[mapNum].booty = jsonData[0]["booty"]
tile_i = 1
#todo: fix [0]
for x in range(MAX_MAPX):
for y in range(MAX_MAPY):
Map[mapNum].tile[x][y].layer1 = jsonData[tile_i][0]["layer1"]
Map[mapNum].tile[x][y].layer2 = jsonData[tile_i][0]["layer2"]
Map[mapNum].tile[x][y].layer3 = jsonData[tile_i][0]["layer3"]
Map[mapNum].tile[x][y].mask = jsonData[tile_i][0]["mask"]
Map[mapNum].tile[x][y].anim = jsonData[tile_i][0]["animation"]
Map[mapNum].tile[x][y].fringe = jsonData[tile_i][0]["fringe"]
Map[mapNum].tile[x][y].type = jsonData[tile_i][0]["type"]
Map[mapNum].tile[x][y].data1 = jsonData[tile_i][0]["data1"]
Map[mapNum].tile[x][y].data2 = jsonData[tile_i][0]["data2"]
Map[mapNum].tile[x][y].data3 = jsonData[tile_i][0]["data3"]
tile_i += 1
for i in range(MAX_MAP_NPCS):
Map[mapNum].npc[i] = jsonData[tile_i][0]['npcnum']
clearMapNpc(i, mapNum)
tile_i += 1
sendMapNpcsToMap(mapNum)
spawnMapNpcs(mapNum)
# clear map items
for i in range(MAX_MAP_ITEMS):
spawnItemSlot(i, None, None, None, getPlayerMap(index), mapItem[getPlayerMap(index)][i].x, mapItem[getPlayerMap(index)][i].y)
clearMapItem(i, getPlayerMap(index))
# respawn items
spawnMapItems(getPlayerMap(index))
# save map
saveMap(mapNum)
mapCacheCreate(mapNum)
# refresh map for everyone online
for i in range(g.totalPlayersOnline):
index = g.playersOnline[i]
if isPlaying(index):
if getPlayerMap(index) == mapNum:
playerWarp(index, mapNum, getPlayerX(index), getPlayerY(index))
def handleNeedMap(self, index, jsonData):
g.serverLogger.debug("handleNeedMap()")
answer = jsonData[0]["answer"]
if answer == 1:
# needs new revision
sendMap(index, getPlayerMap(index))
sendMapItemsTo(index, getPlayerMap(index))
sendMapNpcsTo(index, getPlayerMap(index))
sendJoinMap(index)
TempPlayer[index].gettingMap = False
sendMapDone(index)
# todo: senddoordata
def handleMapGetItem(self, index):
playerMapGetItem(index)
def handleMapReport(self, index):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
msg = 'Free Maps: '
tMapStart = 1
tMapEnd = 1
for i in range(MAX_MAPS):
if len(Map[i].name) == 0:
tMapEnd += 1
else:
if tMapEnd - tMapStart > 0:
msg += str(tMapStart) + '-' + str(tMapEnd-1) + ', '
tMapStart = i + 1
tMapEnd = i + 1
msg += str(tMapStart) + '-' + str(tMapEnd-1) + ', '
msg = msg[:-2] + '.'
playerMsg(index, msg, textColor.BROWN)
def handleMapRespawn(self, index):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
# clear it all
for i in range(MAX_MAP_ITEMS):
spawnItemSlot(i, None, None, None, getPlayerMap(index), mapItem[getPlayerMap(index)][i].x, mapItem[getPlayerMap(index)][i].y)
clearMapItem(i, getPlayerMap(index))
# respawn
spawnMapItems(getPlayerMap(index))
# respawn npcs
for i in range(MAX_MAP_NPCS):
spawnNpc(i, getPlayerMap(index))
playerMsg(index, 'Map respawned.', textColor.BLUE)
g.connectionLogger.info(getPlayerName(index) + ' has respawned map #' + str(getPlayerMap(index)) + '.')
def handleMapDropItem(self, index, jsonData):
invNum = jsonData[0]['invnum']
amount = jsonData[0]['amount']
# prevent hacking
if invNum < 1 or invNum > MAX_INV:
return
if amount > getPlayerInvItemValue(index, invNum):
return
if Item[getPlayerInvItemNum(index, invNum)].type == ITEM_TYPE_CURRENCY:
# check if money and if so, make sure it wont drop to value 0
if amoun <= 0:
# hacking attemt
takeItem(index, getPlayerInvItemNum(index, invNum), 0)
playerMapDropItem(index, invNum, amount)
def handleWhosOnline(self, index):
sendWhosOnline(index)
def handleRequestEditMap(self, index):
if getPlayerAccess(index) < ADMIN_MAPPER:
hackingAttempt(index, 'Admin Cloning')
return
sendEditMap(index)
def handleRequestEditItem(self, index):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
return
sendItemEditor(index)
def handleSaveItem(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_CREATOR:
hackingAttempt(index, 'Admin Cloning')
return
itemNum = int(jsonData[0]['itemnum'])
if itemNum < 0 or itemNum > MAX_ITEMS:
hackingAttempt(index, 'Invalid ItemNum')
return
# update item
Item[itemNum].name = jsonData[0]['itemname']
Item[itemNum].pic = jsonData[0]['itempic']
Item[itemNum].type = jsonData[0]['itemtype']
Item[itemNum].data1 = jsonData[0]['itemdata1']
Item[itemNum].data2 = jsonData[0]['itemdata2']
Item[itemNum].data3 = jsonData[0]['itemdata3']
# save item
sendUpdateItemToAll(itemNum)
saveItem(itemNum)
g.connectionLogger.info(getPlayerName(index) + ' saved item #' + str(itemNum) + '.')
playerMsg(index, Item[itemNum].name + ' was saved as item #' + str(itemNum), textColor.BRIGHT_BLUE)
def handleRequestEditSpell(self, index):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
sendSpellEditor(index)
def handleEditSpell(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
return
spellNum = jsonData[0]['spellnum']
# prevent hacking
if spellNum < 0 or spellNum > MAX_SPELLS:
hackingAttempt(index, 'Invalid Spell Index')
g.connectionLogger.info(getPlayerName(index) + ' editing spell #' + str(spellNum) + '.')
sendEditSpellTo(index, spellNum)
def handleSaveSpell(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
# spell num
spellNum = jsonData[0]['spellnum']
# prevent hacking
if spellNum is None or spellNum > MAX_SPELLS:
hackingAttempt(index, 'Invalid Spell Index')
# update spell
Spell[spellNum].name = jsonData[0]['spellname']
Spell[spellNum].pic = jsonData[0]['spellpic']
Spell[spellNum].type = jsonData[0]['spelltype']
Spell[spellNum].reqMp = jsonData[0]['mpreq']
Spell[spellNum].reqClass = jsonData[0]['classreq']
Spell[spellNum].reqLevel = jsonData[0]['levelreq']
Spell[spellNum].data1 = jsonData[0]['data1']
Spell[spellNum].data2 = jsonData[0]['data2']
Spell[spellNum].data3 = jsonData[0]['data3']
# save
sendUpdateSpellToAll(spellNum)
saveSpell(spellNum)
g.serverLogger.info(getPlayerName(index) + ' saved spell #' + str(spellNum) + '.')
def handleRequestEditNpc(self, index):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
return
sendNpcEditor(index)
def handleEditNpc(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
return
npcNum = jsonData[0]['npcnum']
# prevent hacking
if npcNum < 0 or npcNum > MAX_NPCS:
hackingAttempt(index, 'Invalid NPC Index')
g.connectionLogger.info(getPlayerName(index) + ' editing NPC #' + str(npcNum) + '.')
sendEditNpcTo(index, npcNum)
def handleSaveNpc(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
return
npcNum = jsonData[0]['npcnum']
if npcNum < 0 or npcNum > MAX_NPCS:
return
# update npc
NPC[npcNum].name = jsonData[0]['name']
NPC[npcNum].attackSay = jsonData[0]['attacksay']
NPC[npcNum].sprite = jsonData[0]['sprite']
NPC[npcNum].spawnSecs = jsonData[0]['spawnsec']
NPC[npcNum].behaviour = jsonData[0]['behavior']
NPC[npcNum].range = jsonData[0]['range']
NPC[npcNum].dropChance = jsonData[0]['dropchance']
NPC[npcNum].dropItem = jsonData[0]['dropitem']
NPC[npcNum].dropItemValue = jsonData[0]['dropitemval']
NPC[npcNum].stat[Stats.strength] = jsonData[0]['strength']
NPC[npcNum].stat[Stats.defense] = jsonData[0]['defense']
NPC[npcNum].stat[Stats.magic] = jsonData[0]['magic']
NPC[npcNum].stat[Stats.speed] = jsonData[0]['speed']
# save it
sendUpdateNpcToAll(npcNum)
saveNpc(npcNum)
g.connectionLogger.info(getPlayerName(index) + ' saved NPC #' + str(npcNum) + '.')
playerMsg(index, NPC[npcNum].name + ' was saved as NPC #' + str(npcNum), textColor.BRIGHT_BLUE)
def handleSetAccess(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_CREATOR:
hackingAttempt(index, 'Admin Cloning')
return
plrName = jsonData[0]['name']
access = jsonData[0]['access']
plrIndex = findPlayer(plrName)
# check for invalid access level
if access >= 1 or access <= 4:
if plrIndex is not None:
if getPlayerAccess(plrIndex) == getPlayerAccess(index):
playerMsg(index, 'Invalid access level.', textColor.RED)
return
if getPlayerAccess(plrIndex) <= 0:
globalMsg(getPlayerName(index) + ' has been blessed with administrative access.', textColor.BRIGHT_BLUE)
setPlayerAccess(plrIndex, access)
sendPlayerData(plrIndex)
g.connectionLogger.info(getPlayerName(index) + ' has modified ' + getPlayerName(plrIndex) + 's access.')
else:
playerMsg(index, 'Player is not online.', textColor.WHITE)
else:
playerMsg(index, 'Invalid access level.', textColor.RED)
def handleGiveItem(self, index, jsonData):
if getPlayerAccess(index) < ADMIN_DEVELOPER:
hackingAttempt(index, 'Admin Cloning')
return
plrName = jsonData[0]['name']
itemNum = jsonData[0]['itemnum']
plrIndex = findPlayer(plrName)
giveItem(plrIndex, itemNum, 1)
def handleSearch(self, index, jsonData):
x = jsonData[0]['x']
y = jsonData[0]['y']
if x < 0 or x > MAX_MAPX or y < 0 or y > MAX_MAPY:
return
for i in range(g.totalPlayersOnline):
if getPlayerMap(index) == getPlayerMap(g.playersOnline[i]):
if getPlayerX(g.playersOnline[i]) == x:
if getPlayerY(g.playersOnline[i]) == y:
# consider the player
if g.playersOnline[i] != index:
if getPlayerLevel(playersOnline[i]) >= getPlayerLevel(index) + 5:
playerMsg(index, "You wouldn't stand a chance.", textColor.BRIGHT_RED)
elif getPlayerLevel(playersOnline[i]) > getPlayerLevel(index):
playerMsg(index, "This one seems to have an advantage over you.", textColor.YELLOW)
elif getPlayerLevel(playersOnline[i]) == getPlayerLevel(index):
playerMsg(index, "This would be an even fight.", textColor.WHITE)
elif getPlayerLevel(playersOnline[i]) + 5 <= getPlayerLevel(index):
playerMsg(index, "You could slaughter that player.", textColor.BRIGHT_BLUE)
elif getPlayerLevel(playersOnline[i]) < getPlayerLevel(index):
playerMsg(index, "You would have an advantage over that player.", textColor.YELLOW)
# todo: change target
for i in range(MAX_MAP_ITEMS):
if mapItem[getPlayerMap(index)][i].num != None:
if mapItem[getPlayerMap(index)][i].x == x and mapItem[getPlayerMap(index)][i].y == y:
playerMsg(index, 'You see a ' + Item[mapItem[getPlayerMap(index)][i].num].name + '.', textColor.YELLOW)
def handleQuit(self, index):
closeConnection(index)
| marcusmoller/pyorpg-server | src/datahandler.py | Python | mit | 40,001 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 10:50:54 2016
@author: Radu
"""
from neuron import h
from ib_in import Ib_in
from ia_in import Ia_in
from rc_in import Renshaw
from motoneuron import Motoneuron
import numpy
from neuronpy.util import spiketrain
class ReflexNetwork:
"""
"""
def __init__(self, N=5):
"""
:param N: Number of cells.
"""
self._N = N # Total number of cells in the net
self.cells = [] # Cells in the net
self.nclist = [] # NetCon list
self.stim = None # Stimulator
self.gidlist = [] # List of global identifiers on this host
self.t_vec = h.Vector() # Spike time of all cells
self.id_vec = h.Vector() # Ids of spike times
#### Make a new ParallelContext object
self.pc = h.ParallelContext()
self.set_numcells(N) # Actually build the net.
#
def set_gids(self):
"""Set the gidlist on this host."""
self.gidlist = []
#### Round-robin counting.
#### Each host as an id from 0 to pc.nhost() - 1.
for i in range(int(self.pc.id()), self._N, int(self.pc.nhost())):
self.gidlist.append(i)
def set_numcells(self, N, radius=50):
"""Create, layout, and connect N cells."""
self._N = N
self.set_gids() #### Used when creating and connecting cells
self.create_cells(N)
self.connect_cells()
self.connect_stim()
#
def create_cells(self, N):
"""Create and layout N cells in the network."""
self.cells = []
self.cells_MNE = []
self.cells_MNF = []
self.cells_IaE = []
self.cells_IaF = []
self.cells_IbE = []
self.cells_IbF = []
self.cells_RE = []
self.cells_RF = []
r = 50 # Radius of cell locations from origin (0,0,0) in microns
N = self._N
for i in range(N):
# Create and position cells
_MNE = Motoneuron()
_MNE.set_position(0, -2*r, i*r)
self.cells.append(_MNE)
self.cells_MNE.append(_MNE)
_MNF = Motoneuron()
_MNF.set_position(0, 2*r, i*r)
self.cells.append(_MNF)
self.cells_MNF.append(_MNF)
_IaE = Ia_in()
_IaE.set_position(-r, -2*r, i*r)
self.cells.append(_IaE)
self.cells_IaE.append(_IaE)
_IaF = Ia_in()
_IaF.set_position(-r, 2*r, i*r)
self.cells.append(_IaF)
self.cells_IaF.append(_IaF)
_IbE = Ib_in()
_IbE.set_position(-2*r, -2*r, i*r)
self.cells.append(_IbE)
self.cells_IbE.append(_IbE)
_IbF = Ib_in()
_IbF.set_position(-2*r, 2*r, i*r)
self.cells.append(_IbF)
self.cells_IbF.append(_IbF)
_RE = Renshaw()
_RE.set_position(r, -r, i*r)
self.cells.append(_RE)
self.cells_RE.append(_RE)
_RF = Renshaw()
_RF.set_position(r, r, i*r)
self.cells.append(_RF)
self.cells_RF.append(_RF)
#
def connect_cells(self):
"""Connect cell n to cell n + 1."""
self.nclist = []
#
def connect_stim(self):
"""Connect a spiking generator to the first cell to get
the network going."""
#create stimulators
N = self._N
self.iaf_fiber_list = []
self.iae_fiber_list = []
self.iie_fiber_list = []
self.iif_fiber_list = []
self.ibe_fiber_list = []
self.ibf_fiber_list = []
for i in range(N):
iaf = h.NetStim()
iaf.interval = 100
iaf.number = 1e9
iaf.start = 0
iaf.noise = 0.5
self.iaf_fiber_list.append(iaf)
iae = h.NetStim()
iae.interval = 100
iae.number = 1e9
iae.start = 0
iae.noise = 0.5
self.iae_fiber_list.append(iae)
iie = h.NetStim()
iie.interval = 1000
iie.number = 1e9
iie.start = 0
iie.noise = 0.5
self.iie_fiber_list.append(iie)
iif = h.NetStim()
iif.interval = 1000
iif.number = 1e9
iif.start = 0
iif.noise = 0.5
self.iif_fiber_list.append(iif)
ibe = h.NetStim()
ibe.interval = 50
ibe.number = 1e9
ibe.start = 0
ibe.noise = 0.5
self.ibe_fiber_list.append(ibe)
ibf = h.NetStim()
ibf.interval = 50
ibf.number = 1e9
ibf.start = 0
ibf.noise = 0.5
self.ibf_fiber_list.append(ibf)
numpy.random.seed(123)
p = 50e-2
syn_weight = 5e-3
for i in range(N):
Y_iaf_ia = numpy.random.binomial(1, p, N) # 5% probability Ia fiber
# to Ia interneuron connection
Y_iae_ia = numpy.random.binomial(1, p, N) # 5% probability Ia fiber
# to Ia interneuron connection
Y_iaf_ib = numpy.random.binomial(1, p, N) # 5% probability Ia fiber
# to Ib interneuron connection
Y_iae_ib = numpy.random.binomial(1, p, N) # 5% probability Ia fiber
# to Ib interneuron connection
Y_iaf_mn = numpy.random.binomial(1, p, N) # 5% probability Ia fiber
# to motoneuron connection
Y_iae_mn = numpy.random.binomial(1, p, N) # 5% probability Ia fiber
# to motoneuron connection
Y_ibf_ib = numpy.random.binomial(1, p, N) # 5% probability Ib fiber
# to Ib interneuron connection
Y_ibe_ib = numpy.random.binomial(1, p, N) # 5% probability Ib fiber
# to Ib interneuron connection
for k in range(N):
if Y_iaf_ia[k]:
ncstim = h.NetCon(self.iaf_fiber_list[k],\
self.cells_IaF[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
if Y_iae_ia[k]:
ncstim = h.NetCon(self.iae_fiber_list[k],\
self.cells_IaE[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
if Y_iaf_ib[k]:
ncstim = h.NetCon(self.iaf_fiber_list[k],\
self.cells_IbF[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
if Y_iae_ib[k]:
ncstim = h.NetCon(self.iae_fiber_list[k],\
self.cells_IbE[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
if Y_iaf_mn[k]:
ncstim = h.NetCon(self.iaf_fiber_list[k],\
self.cells_MNF[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
if Y_iae_mn[k]:
ncstim = h.NetCon(self.iae_fiber_list[k],\
self.cells_MNE[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
if Y_ibf_ib[k]:
ncstim = h.NetCon(self.ibf_fiber_list[k],\
self.cells_IbF[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
if Y_ibe_ib[k]:
ncstim = h.NetCon(self.ibe_fiber_list[k],\
self.cells_IbE[i].synlist[0]) #ex
ncstim.delay = 0
ncstim.weight[0] = syn_weight
self.nclist.append(ncstim)
for i in range(len(self.nclist)):
self.nclist[i].record(self.t_vec, self.id_vec, i)
#
def get_spikes(self):
"""Get the spikes as a list of lists."""
return spiketrain.netconvecs_to_listoflists(self.t_vec, self.id_vec) | penguinscontrol/Spinal-Cord-Modeling | CPG/CPG_Network.py | Python | gpl-2.0 | 9,101 |
# -*- coding: utf-8 -*-
from decimal import Decimal
from django.contrib.auth.base_user import BaseUserManager, AbstractBaseUser
from django.db import models
from game.helpers import FieldHistory
import game
class PlayerManager(BaseUserManager):
def create_user(self, login, password, name=None):
if not login:
raise ValueError('Gracz musi mieć login')
user = self.model(
login=login,
name=name,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, login, password, name=None):
user = self.create_user(
login=login,
password=password,
name=name,
)
user.is_admin = True
user.save(using=self._db)
return user
class Player(AbstractBaseUser, FieldHistory):
login = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, blank=True, null=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
won_games = models.IntegerField(default=0)
lost_games = models.IntegerField(default=0)
tie_games = models.IntegerField(default=0)
won_point = models.IntegerField(default=0)
lost_point = models.IntegerField(default=0)
# percent of won matches
win_percent = models.DecimalField(default=0, max_digits=5, decimal_places=2)
win_lost_points_ratio = models.DecimalField(default=0, max_digits=5, decimal_places=3)
objects = PlayerManager()
USERNAME_FIELD = 'login'
REQUIRED_FIELDS = []
def get_full_name(self):
return self.name if self.name else self.login
def get_short_name(self):
return self.name if self.name else self.login
def calculate_scores(self):
self.won_games = 0
self.lost_games = 0
self.won_point = 0
self.lost_point = 0
for match in game.models.Match.objects.filter(team_2__players__id=self.id).prefetch_related('matchset_set'):
if match.state == 'lost':
self.won_games += 1
elif match.state == 'win':
self.lost_games += 1
else:
self.tie_games += 1
self.won_point += match.get_team_2_total_points()
self.lost_point += match.get_team_1_total_points()
for match in game.models.Match.objects.filter(team_1__players__id=self.id).prefetch_related('matchset_set'):
if match.state == 'win':
self.won_games += 1
elif match.state == 'lost':
self.lost_games += 1
else:
self.tie_games += 1
self.won_point += match.get_team_1_total_points()
self.lost_point += match.get_team_2_total_points()
self.win_percent = Decimal(self.won_games) / Decimal(self.won_games + self.lost_games + self.tie_games)
self.win_lost_points_ratio = Decimal(self.won_point) / Decimal(self.lost_point)
self.save()
def __str__(self): # __unicode__ on Python 2
return self.login
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
| rymcimcim/django-foosball | players/models.py | Python | mit | 3,649 |
import numpy as np
f = open("stapler_test_parts.g",'w')
# assume we start zero'd such that x0y0z0a0 is the bottom-leftmost viable position on the first layer
# stapler 1 is left stapler
# stapler 0 is right stapler
z_feedrate = 250
layer = -1
z_max = 20
z_min = 0
z_close_offset = 4 # relative to z_down
z_clear_offset = 8 # relative to z_down
z_preload = 0.2
wait_s = 0.75
bl_overshoot = 1.
dual_head_offset_x = -33.02 # (1.3 inches) (right head pos in left head cord sys)
dual_head_offset_y = -0.3 # (right head pos in left head cord sys)
x=0
y=0
def init():
f.write("G21\n")
def placePartAt(part_type,part_pos):
# if (part_type == 0):
# offset_part_pos = [part_pos[0] + dual_head_offset, part_pos[1]]
# else:
# offset_part_pos = part_pos
goToXY(part_pos,part_type)
placePart(part_type)
def placePart(part_type):
global z_close,z_down,z_clear,wait_s
f.write("G0Z%f\n" % (z_close))
f.write("G1Z%fF%f\n" % (z_down,z_feedrate))
if (part_type == 0):
f.write("M3\n")
else:
f.write("M4\n")
f.write("G4P%f\n" % wait_s)
f.write("M5\n")
f.write("G1Z%fF%f\n" % (z_down,z_feedrate))
f.write("G0Z%f\n" % (z_clear))
def goToXY(part_pos,stapler_head=1):
# always approach location from +x and +y
global x,y
overshoot = False
last_x = x
last_y = y
if (stapler_head == 0):
x = part_pos[0]*1.27 + 18.23*((layer+1)%2) + dual_head_offset_x
y = part_pos[1]*1.27 + dual_head_offset_y + 0.3*((layer+1)%2)
else:
x = part_pos[0]*1.27 + 18.23*((layer+1)%2)
y = part_pos[1]*1.27 + 0.3*((layer+1)%2)
if (last_x < x):
# need to overshoot by a bit and then come back
x_bl = x+bl_overshoot
overshoot = True
else:
x_bl = x
if (last_y < y):
y_bl = y+bl_overshoot
overshoot = True
else:
y_bl = y
if (overshoot):
f.write("G0X%fY%f (bl comp)\n" % (x_bl,y_bl))
f.write("G0X%fY%f\n" % (x,y))
def indexStage():
f.write("G0A%f\n" % (((layer+1)%2)*0.3125))
def newLayer():
global layer,z_close,z_clear,z_close_offset,z_clear_offset,z_min,z_down
f.write("(new layer)\n")
layer += 1
z_down = z_min+1.6*layer-z_preload
z_close = z_down+z_close_offset
z_clear = z_down+z_clear_offset
indexStage()
def finish():
global z_max
f.write("G0Z%f"%(z_max))
goToXY([0,0])
init()
newLayer()
# placePartAt(1,[0,0])
# placePartAt(1,[-4,0])
# placePartAt(1,[1,1])
# placePartAt(1,[-3,1])
# placePartAt(1,[0,2])
# placePartAt(1,[-4,2])
# placePartAt(1,[1,3])
# placePartAt(1,[-3,3])
# placePartAt(1,[0,4])
# placePartAt(1,[1,5])
# placePartAt(1,[0,6])
# placePartAt(1,[1,7])
# placePartAt(1,[-4,4])
# placePartAt(1,[-3,5])
# placePartAt(1,[-4,6])
# placePartAt(1,[-3,7])
newLayer()
# placePartAt(1,[0,0])
# placePartAt(1,[4,0])
# placePartAt(1,[1,1])
# placePartAt(1,[5,1])
# placePartAt(1,[0,2])
# placePartAt(1,[4,2])
# placePartAt(1,[1,3])
# placePartAt(1,[5,3])
# placePartAt(1,[0,4])
# placePartAt(1,[1,5])
# placePartAt(1,[0,6])
# placePartAt(1,[1,7])
# placePartAt(1,[4,4])
# placePartAt(1,[5,5])
# placePartAt(1,[4,6])
# placePartAt(1,[5,7])
newLayer()
# placePartAt(1,[0,0])
# placePartAt(1,[-4,0])
# placePartAt(1,[1,1])
# placePartAt(1,[-3,1])
# placePartAt(1,[0,2])
# placePartAt(1,[-4,2])
# placePartAt(1,[1,3])
# placePartAt(1,[-3,3])
# placePartAt(1,[0,4])
# placePartAt(1,[1,5])
# placePartAt(1,[0,6])
# placePartAt(1,[1,7])
# placePartAt(1,[-4,4])
# placePartAt(1,[-3,5])
# placePartAt(1,[-4,6])
# placePartAt(1,[-3,7])
newLayer()
# placePartAt(0,[0,0])
# placePartAt(1,[4,0])
# placePartAt(0,[1,1])
# placePartAt(1,[5,1])
# placePartAt(0,[0,2])
# placePartAt(1,[4,2])
# placePartAt(0,[1,3])
# placePartAt(1,[5,3])
# placePartAt(1,[0,4])
# placePartAt(0,[1,5])
# placePartAt(0,[0,6])
# placePartAt(1,[1,7])
# placePartAt(0,[4,4])
# placePartAt(0,[5,5])
# placePartAt(1,[4,6])
# placePartAt(0,[5,7])
newLayer()
placePartAt(1,[0,0])
placePartAt(1,[-4,0])
placePartAt(1,[1,1])
placePartAt(1,[-3,1])
placePartAt(0,[0,2])
placePartAt(0,[-4,2])
placePartAt(0,[1,3])
placePartAt(0,[-3,3])
placePartAt(0,[0,4])
placePartAt(0,[1,5])
placePartAt(0,[0,6])
placePartAt(0,[1,7])
placePartAt(1,[-4,4])
placePartAt(1,[-3,5])
placePartAt(1,[-4,6])
placePartAt(1,[-3,7])
# capacitor
# newLayer()
# placePartAt(0,[0,0])
# placePartAt(0,[1,1])
# placePartAt(0,[0,2])
# placePartAt(0,[1,3])
# newLayer()
# placePartAt(0,[0,0])
# placePartAt(1,[1,1])
# placePartAt(1,[0,2])
# placePartAt(0,[1,3])
# newLayer()
# placePartAt(0,[0,0])
# placePartAt(0,[1,1])
# placePartAt(0,[0,2])
# placePartAt(0,[1,3])
# newLayer()
# placePartAt(0,[0,0])
# placePartAt(1,[1,1])
# placePartAt(1,[0,2])
# placePartAt(0,[1,3])
# goToXY([0,0])
# placePart(0)
# goToXY([1,1])
# placePart(0)
# goToXY([0,2])
# placePart(1)
# goToXY([1,3])
# placePart(1)
# newLayer()
# goToXY([0,0])
# placePart(1)
# goToXY([1,1])
# placePart(0)
# goToXY([0,2])
# placePart(1)
# goToXY([1,3])
# placePart(0)
# newLayer()
# goToXY([0,0])
# placePart(1)
# goToXY([1,1])
# placePart(1)
# goToXY([0,2])
# placePart(0)
# goToXY([1,3])
# placePart(0)
# newLayer()
# goToXY([0,0])
# placePart(0)
# goToXY([1,1])
# placePart(1)
# goToXY([0,2])
# placePart(1)
# goToXY([1,3])
# placePart(0)
finish()
# newLayer()
# goToXY([0,0])
# placePart()
# goToXY([1,1])
# placePart()
# goToXY([0,2])
# placePart()
# goToXY([1,3])
# placePart()
# goToXY([-4,0])
# placePart()
# goToXY([-3,1])
# placePart()
# goToXY([-4,2])
# placePart()
# goToXY([-3,3])
# placePart()
# newLayer()
# goToXY([0,0])
# placePart()
# goToXY([1,1])
# placePart()
# goToXY([0,2])
# placePart()
# goToXY([1,3])
# placePart()
# goToXY([0,4])
# placePart()
# goToXY([1,5])
# placePart()
# goToXY([0,6])
# placePart()
# goToXY([1,7])
# placePart()
# goToXY([0,0])
# placePart()
# goToXY([1,1])
# placePart()
# goToXY([0,2])
# placePart()
# goToXY([1,3])
# placePart()
# newLayer()
f.close()
| langfordw/stapler_gcode | stapler_gcode_blcomp.py | Python | mit | 5,784 |
""" Description here
Author: Leonard Berrada
Date: 5 Nov 2015
"""
import sys
sys.path.append("../")
from Regression import AutoRegressive, AutoCorrelation, GaussianProcess, KalmanFilter
from process_data import data_from_file
file_name = "sunspots.mat"
data_dict = data_from_file(file_name)
model = "GP"
# model = "KF"
# model = "AR"
# model = "AC"
if model.lower() == 'kf':
p = 100
kf = KalmanFilter(data_dict, p)
kf.fit()
kf.display(out="./sun_kf.png")
if model.lower() == "ar":
p = 50
my_ar = AutoRegressive(data_dict, p)
my_ar.fit()
my_ar.predict()
my_ar.display(out="./sun_ar.png")
if model.lower() == "ac":
p = 50
my_ac = AutoCorrelation(data_dict, p)
my_ac.fit()
my_ac.predict()
my_ac.display(out="./sun_ac.png")
my_ac.spectrum()
if model.lower() == "gp":
# Q = 3
# use_kernels = "exponential_quadratic* cosine"
# for _ in range(Q - 1):
# use_kernels += "+ exponential_quadratic * cosine"
use_kernels = "matern_32 + periodic"
use_means = "constant"
estimator = "MLE"
params = [0.34, 1., 26.5, 1e-06, 3.18, -2.9]
my_gp = GaussianProcess(data_dict=data_dict,
use_kernels=use_kernels,
params=params,
use_means=use_means,
estimator=estimator,
sequential_mode=True)
my_gp.predict()
my_gp.compute_score()
my_gp.show_prediction(out="./sun_gp.png")
| leonardbj/AIMS | src/exec/sunspots_data.py | Python | mit | 1,531 |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 7 11:27:35 2017
@author: AmatVictoriaCuramIII
"""
#Get modules
#import scipy as sp
import numpy as np
from pandas_datareader import data
import pandas as pd
#portfolio set up
port = ['^GSPC', '^RUA']
numsec = len(port)
equalweight = 1/numsec
df2 = pd.DataFrame(columns=[])
x=0
y=0
#HERE'S AN IDEA print(list(enumerate(port, start=1)))
#List the log returns in columns
for s in port:
x = x + 1
y = y - 1
s = data.DataReader(s, 'yahoo', start='1/1/1900', end='01/01/2050')
s[x] = np.log(s['Adj Close']/s['Adj Close'].shift(1)) #log returns
s['equalweight'] = equalweight
s[y] = s[x] * s['equalweight'] #This is the weighted return
df2 = pd.concat([df2,s[x],s[y]], axis=1)
#Multiply the individual columns by the last and sum
#df2['portfolioreturn'] = df2[(range(-1, -numsec, -1))]
df2 = pd.concat([df2,s['equalweight']], axis=1)
print(df2) | adamrvfisher/TechnicalAnalysisLibrary | Weight.py | Python | apache-2.0 | 916 |
from django.db import models
from django_rv_apps.apps.believe_his_prophets.models.spirit_prophecy import SpiritProphecy
from django_rv_apps.apps.believe_his_prophets.models.language import Language
from gdstorage.storage import GoogleDriveStorage
gd_storage = GoogleDriveStorage()
class SpiritProphecyChapter(models.Model):
id = models.AutoField(
primary_key=True,
editable=False)
archivo =models.FileField(upload_to="files/")
spirit_prophecy = models.ForeignKey(
SpiritProphecy,
db_column='spirit_prophecy_id',
blank=False, null=False,
on_delete=models.PROTECT)
chapter = models.IntegerField(
blank=True, null=True)
language = models.ManyToManyField(
Language,
through='SpiritProphecyChapterLanguage', blank=True,
related_name='spirit_prophecy_language_chapter_set'
)
start_date = models.DateField(
blank=True, null=True
)
end_date = models.DateField(
blank=True, null=True
)
class Meta:
verbose_name = 'SpiritProphecyChapter'
db_table = 'believe_spirit_prophecy_chapter'
verbose_name_plural = 'SpiritProphecyChapter'
def __str__(self):
return (self.spirit_prophecy.__str__() + ' ' + str(self.chapter))
class SpiritProphecyChapterLanguage(models.Model):
id = models.AutoField(
primary_key=True,
editable=False)
archivo = models.FileField(upload_to='files')
name = models.CharField(
max_length=250,
blank=False, null=False)
spirit_prophecy_chapter = models.ForeignKey(
'SpiritProphecyChapter', db_column='spirit_prophecy_chapter_id',
related_name='spirit_prophecy_chapter_language_spirit_prophecy_chapter_set',
blank=False, null=False,
on_delete=models.PROTECT)
language = models.ForeignKey(
Language, db_column='language_id',
related_name='spirit_prophecy_chapter_language_language_set',
blank=False, null=False,
on_delete=models.PROTECT)
data = models.TextField(
blank=False, null=False)
audio = models.FileField(
blank=True, null=True,
upload_to='audios/sp/', storage=gd_storage)
class Meta:
verbose_name = 'SpiritProphecyChapterLanguage'
verbose_name_plural = 'SpiritProphecyChapterLanguage'
db_table = 'believe_spirit_pr_chapter_lang'
| davrv93/creed-en-sus-profetas-backend | django_rv_apps/apps/believe_his_prophets/models/spirit_prophecy_chapter.py | Python | apache-2.0 | 2,402 |
import unittest2 as unittest
import random
from time import sleep
import os
from nose import SkipTest
from tweepy import Friendship, MemoryCache, FileCache
from config import TweepyTestCase, username, use_replay
test_tweet_id = '266367358078169089'
tweet_text = 'testing 1000'
"""Unit tests"""
class TweepyErrorTests(unittest.TestCase):
def testpickle(self):
"""Verify exceptions can be pickled and unpickled."""
import pickle
from tweepy.error import TweepError
e = TweepError('no reason', {'status': 200})
e2 = pickle.loads(pickle.dumps(e))
self.assertEqual(e.reason, e2.reason)
self.assertEqual(e.response, e2.response)
class TweepyAPITests(TweepyTestCase):
# TODO: Actually have some sort of better assertion
def testgetoembed(self):
data = self.api.get_oembed(test_tweet_id)
self.assertEqual(data['author_name'], "Twitter")
def testhometimeline(self):
self.api.home_timeline()
def testusertimeline(self):
self.api.user_timeline()
self.api.user_timeline('twitter')
def testmentionstimeline(self):
self.api.mentions_timeline()
def testretweetsofme(self):
self.api.retweets_of_me()
def testretweet(self):
# TODO(josh): Need a way to get random tweets to retweet.
raise SkipTest()
def testretweets(self):
self.api.retweets(test_tweet_id)
def testretweeters(self):
self.api.retweeters(test_tweet_id)
def testgetstatus(self):
self.api.get_status(id=test_tweet_id)
def testupdateanddestroystatus(self):
# test update
text = tweet_text if use_replay else 'testing %i' % random.randint(0, 1000)
update = self.api.update_status(status=text)
self.assertEqual(update.text, text)
# test destroy
deleted = self.api.destroy_status(id=update.id)
self.assertEqual(deleted.id, update.id)
def testupdatestatuswithmedia(self):
update = self.api.update_with_media('examples/banner.png', status=tweet_text)
self.assertIn(tweet_text + ' http://t.co', update.text)
def testgetuser(self):
u = self.api.get_user('twitter')
self.assertEqual(u.screen_name, 'twitter')
u = self.api.get_user(783214)
self.assertEqual(u.screen_name, 'twitter')
def testlookupusers(self):
def check(users):
self.assertEqual(len(users), 2)
check(self.api.lookup_users(user_ids=[6844292, 6253282]))
check(self.api.lookup_users(screen_names=['twitterapi', 'twitter']))
def testsearchusers(self):
self.api.search_users('twitter')
def testsuggestedcategories(self):
self.api.suggested_categories()
def testsuggestedusers(self):
categories = self.api.suggested_categories()
if len(categories) != 0:
self.api.suggested_users(categories[0].slug)
def testsuggesteduserstweets(self):
categories = self.api.suggested_categories()
if len(categories) != 0:
self.api.suggested_users_tweets(categories[0].slug)
def testme(self):
me = self.api.me()
self.assertEqual(me.screen_name, username)
def testdirectmessages(self):
self.api.direct_messages()
def testsentdirectmessages(self):
self.api.sent_direct_messages()
def testsendanddestroydirectmessage(self):
# send
sent_dm = self.api.send_direct_message(username, text='test message')
self.assertEqual(sent_dm.text, 'test message')
self.assertEqual(sent_dm.sender.screen_name, username)
self.assertEqual(sent_dm.recipient.screen_name, username)
# destroy
destroyed_dm = self.api.destroy_direct_message(sent_dm.id)
self.assertEqual(destroyed_dm.text, sent_dm.text)
self.assertEqual(destroyed_dm.id, sent_dm.id)
self.assertEqual(destroyed_dm.sender.screen_name, username)
self.assertEqual(destroyed_dm.recipient.screen_name, username)
def testcreatedestroyfriendship(self):
enemy = self.api.destroy_friendship('twitter')
self.assertEqual(enemy.screen_name, 'twitter')
# Wait 5 seconds to allow Twitter time
# to process the friendship destroy request.
sleep(5)
friend = self.api.create_friendship('twitter')
self.assertEqual(friend.screen_name, 'twitter')
def testshowfriendship(self):
source, target = self.api.show_friendship(target_screen_name='twitter')
self.assert_(isinstance(source, Friendship))
self.assert_(isinstance(target, Friendship))
def testfriendsids(self):
self.api.friends_ids(username)
def testfollowersids(self):
self.api.followers_ids(username)
def testfriends(self):
self.api.friends(username)
def testfollowers(self):
self.api.followers(username)
def testverifycredentials(self):
self.assertNotEqual(self.api.verify_credentials(), False)
# make sure that `me.status.entities` is not an empty dict
me = self.api.verify_credentials(include_entities=True)
self.assertTrue(me.status.entities)
# `status` shouldn't be included
me = self.api.verify_credentials(skip_status=True)
self.assertFalse(hasattr(me, 'status'))
def testratelimitstatus(self):
self.api.rate_limit_status()
""" TODO(josh): Remove once this deprecated API is gone.
def testsetdeliverydevice(self):
self.api.set_delivery_device('im')
self.api.set_delivery_device('none')
"""
def testupdateprofilecolors(self):
original = self.api.me()
updated = self.api.update_profile_colors('000', '000', '000', '000', '000')
# restore colors
self.api.update_profile_colors(
original.profile_background_color,
original.profile_text_color,
original.profile_link_color,
original.profile_sidebar_fill_color,
original.profile_sidebar_border_color
)
self.assertEqual(updated.profile_background_color, '000000')
self.assertEqual(updated.profile_text_color, '000000')
self.assertEqual(updated.profile_link_color, '000000')
self.assertEqual(updated.profile_sidebar_fill_color, '000000')
self.assertEqual(updated.profile_sidebar_border_color, '000000')
"""
def testupateprofileimage(self):
self.api.update_profile_image('examples/profile.png')
def testupdateprofilebg(self):
self.api.update_profile_background_image('examples/bg.png')
"""
def testupdateprofilebannerimage(self):
self.api.update_profile_banner('examples/banner.png')
def testupdateprofile(self):
original = self.api.me()
profile = {
'name': 'Tweepy test 123',
'location': 'pytopia',
'description': 'just testing things out'
}
updated = self.api.update_profile(**profile)
self.api.update_profile(
name = original.name, url = original.url,
location = original.location, description = original.description
)
for k,v in profile.items():
if k == 'email': continue
self.assertEqual(getattr(updated, k), v)
def testfavorites(self):
self.api.favorites()
def testcreatedestroyfavorite(self):
self.api.create_favorite(4901062372)
self.api.destroy_favorite(4901062372)
def testcreatedestroyblock(self):
self.api.create_block('twitter')
self.api.destroy_block('twitter')
self.api.create_friendship('twitter') # restore
def testblocks(self):
self.api.blocks()
def testblocksids(self):
self.api.blocks_ids()
# TODO: Rewrite test to be less brittle. It fails way too often.
# def testcreateupdatedestroylist(self):
# params = {
# 'owner_screen_name': username,
# 'slug': 'tweeps'
# }
# l = self.api.create_list(name=params['slug'], **params)
# l = self.api.update_list(list_id=l.id, description='updated!')
# self.assertEqual(l.description, 'updated!')
# self.api.destroy_list(list_id=l.id)
def testlistsall(self):
self.api.lists_all()
def testlistsmemberships(self):
self.api.lists_memberships()
def testlistssubscriptions(self):
self.api.lists_subscriptions()
def testlisttimeline(self):
self.api.list_timeline('applepie', 'stars')
def testgetlist(self):
self.api.get_list(owner_screen_name='applepie', slug='stars')
def testaddremovelistmember(self):
params = {
'slug': 'test',
'owner_screen_name': username,
'screen_name': 'twitter'
}
def assert_list(l):
self.assertEqual(l.name, params['slug'])
assert_list(self.api.add_list_member(**params))
sleep(3)
assert_list(self.api.remove_list_member(**params))
def testlistmembers(self):
self.api.list_members('applepie', 'stars')
def testshowlistmember(self):
self.assertTrue(self.api.show_list_member(owner_screen_name='applepie', slug='stars', screen_name='NathanFillion'))
def testsubscribeunsubscribelist(self):
params = {
'owner_screen_name': 'applepie',
'slug': 'stars'
}
self.api.subscribe_list(**params)
self.api.unsubscribe_list(**params)
def testlistsubscribers(self):
self.api.list_subscribers('applepie', 'stars')
def testshowlistsubscriber(self):
self.assertTrue(self.api.show_list_subscriber('tweepytest', 'test', 'applepie'))
def testsavedsearches(self):
s = self.api.create_saved_search('test')
self.api.saved_searches()
self.assertEqual(self.api.get_saved_search(s.id).query, 'test')
self.api.destroy_saved_search(s.id)
def testsearch(self):
self.api.search('tweepy')
def testgeoapis(self):
def place_name_in_list(place_name, place_list):
"""Return True if a given place_name is in place_list."""
return any([x.full_name.lower() == place_name.lower() for x in place_list])
twitter_hq = self.api.geo_similar_places(lat=37, long= -122, name='Twitter HQ')
# Assumes that twitter_hq is first Place returned...
self.assertEqual(twitter_hq[0].id, '3bdf30ed8b201f31')
# Test various API functions using Austin, TX, USA
self.assertEqual(self.api.geo_id(id='c3f37afa9efcf94b').full_name, 'Austin, TX')
self.assertTrue(place_name_in_list('Austin, TX',
self.api.reverse_geocode(lat=30.267370168467806, long= -97.74261474609375))) # Austin, TX, USA
def testsupportedlanguages(self):
languages = self.api.supported_languages()
expected_dict = {
"name": "English",
"code": "en",
"status": "production"
}
self.assertTrue(expected_dict in languages)
def testcachedresult(self):
self.api.cache = MemoryCache()
self.api.home_timeline()
self.assertFalse(self.api.cached_result)
self.api.home_timeline()
self.assertTrue(self.api.cached_result)
class TweepyCacheTests(unittest.TestCase):
timeout = 2.0
memcache_servers = ['127.0.0.1:11211'] # must be running for test to pass
def _run_tests(self, do_cleanup=True):
# test store and get
self.cache.store('testkey', 'testvalue')
self.assertEqual(self.cache.get('testkey'), 'testvalue',
'Stored value does not match retrieved value')
# test timeout
sleep(self.timeout)
self.assertEqual(self.cache.get('testkey'), None,
'Cache entry should have expired')
# test cleanup
if do_cleanup:
self.cache.store('testkey', 'testvalue')
sleep(self.timeout)
self.cache.cleanup()
self.assertEqual(self.cache.count(), 0, 'Cache cleanup failed')
# test count
for i in range(0, 20):
self.cache.store('testkey%i' % i, 'testvalue')
self.assertEqual(self.cache.count(), 20, 'Count is wrong')
# test flush
self.cache.flush()
self.assertEqual(self.cache.count(), 0, 'Cache failed to flush')
def testmemorycache(self):
self.cache = MemoryCache(timeout=self.timeout)
self._run_tests()
def testfilecache(self):
os.mkdir('cache_test_dir')
self.cache = FileCache('cache_test_dir', self.timeout)
self._run_tests()
self.cache.flush()
os.rmdir('cache_test_dir')
if __name__ == '__main__':
unittest.main()
| dnr2/fml-twitter | tweepy-master/tests/test_api.py | Python | mit | 12,771 |
from AppKit import NSDragOperationMove
from vanilla import *
from mojo.events import setToolOrder, getToolOrder
toolOrderDragType = "toolOrderDragType"
class ToolOrder:
def __init__(self):
self.w = Window((200, 300), "Tool Orderer")
self.w.tools = List((10, 10, -10, -40), getToolOrder(),
dragSettings=dict(type=toolOrderDragType, callback=self.dragCallback),
selfDropSettings=dict(type=toolOrderDragType, operation=NSDragOperationMove, callback=self.dropListSelfCallback),
)
self.w.apply = Button((10, -30, -10, 22), "Apply", callback=self.applyCallback)
self.w.open()
def applyCallback(self, sender):
setToolOrder(self.w.tools.get())
def dragCallback(self, sender, indexes):
return indexes
def dropListSelfCallback(self, sender, dropInfo):
isProposal = dropInfo["isProposal"]
if not isProposal:
indexes = [int(i) for i in sorted(dropInfo["data"])]
indexes.sort()
source = dropInfo["source"]
rowIndex = dropInfo["rowIndex"]
items = sender.get()
toMove = [items[index] for index in indexes]
for index in reversed(indexes):
del items[index]
rowIndex -= len([index for index in indexes if index < rowIndex])
for font in toMove:
items.insert(rowIndex, font)
rowIndex += 1
sender.set(items)
return True
ToolOrder() | typemytype/RoboFontExamples | UI/toolOrderer.py | Python | mit | 1,640 |
# -*- coding: utf-8 -*-
"""
************************************************************************************
Class : PatientFrame
Author : Thierry Maillard (TMD)
Date : 26/11/2016 - 1/12/2016
Role : Define Patient frame content.
Licence : GPLv3
Copyright (c) 2016 - Thierry Maillard
This file is part of CalcAl project.
CalcAl project is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CalcAl project is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CalcAl project. If not, see <http://www.gnu.org/licenses/>.
************************************************************************************
"""
import datetime
import tkinter
from tkinter.ttk import Combobox
from tkinter import messagebox
from util import CalcalExceptions
from . import CallTypWindow
from . import FrameBaseCalcAl
class PatientFrame(FrameBaseCalcAl.FrameBaseCalcAl):
""" Patient frame used to manage patient information """
def __init__(self, master, mainWindow, logoFrame, patientFrameModel):
""" Initialize Patients Frame """
super(PatientFrame, self).__init__(master, mainWindow, logoFrame)
self.patientFrameModel = patientFrameModel
self.patientFrameModel.addObserver(self)
self.listPathologies = []
##########
# Patient definition Frame
patientMainFrame = tkinter.LabelFrame(self, text=_("Patient information"), padx=10)
patientMainFrame.pack(side=tkinter.TOP)
rightFrame = tkinter.Frame(patientMainFrame)
rightFrame.pack(side=tkinter.LEFT)
patientDefinitionFrame = tkinter.Frame(rightFrame)
patientDefinitionFrame.pack(side=tkinter.TOP)
labelPatientCode = tkinter.Label(patientDefinitionFrame, text=_("Patient code") + " :")
labelPatientCode.grid(row=0, column=0, sticky=tkinter.E)
CallTypWindow.createToolTip(labelPatientCode,
_("Type new patient code or select an existant"),
self.delaymsTooltips)
self.patientCodeVar = tkinter.StringVar()
self.patientCodeVar.trace_variable("w", self.changePatient)
widthCode = int(self.configApp.get('Size', 'patientCodeComboboxWidth'))
self.patientCodeCombobox = Combobox(patientDefinitionFrame, exportselection=0,
textvariable=self.patientCodeVar,
state=tkinter.NORMAL,
width=widthCode)
self.patientCodeCombobox.grid(row=0, column=1, sticky=tkinter.W)
tkinter.Label(patientDefinitionFrame, text=_("Birth year") + " :").grid(row=1, column=0,
sticky=tkinter.E)
self.currentYear = datetime.datetime.now().year
self.oldestPatient = int(self.configApp.get('Patient', 'oldestPatient'))
self.birthYearCombobox = Combobox(patientDefinitionFrame, exportselection=0,
state="readonly",
width=len(str(self.currentYear + self.oldestPatient)),
values=list(range(self.currentYear-self.oldestPatient,
self.currentYear+1)))
self.birthYearCombobox.bind('<<ComboboxSelected>>', self.modifyPatient)
self.birthYearCombobox.grid(row=1, column=1, sticky=tkinter.W)
tkinter.Label(patientDefinitionFrame, text=_("Gender") + " :").grid(row=2, column=0,
sticky=tkinter.E)
genderFrame = tkinter.Frame(patientDefinitionFrame)
genderFrame.grid(row=2, column=1, sticky=tkinter.EW)
self.genderVar = tkinter.StringVar()
tkinter.Radiobutton(genderFrame, text=_("M"), variable=self.genderVar, value="M",
command=self.modifyPatient).pack(side=tkinter.LEFT)
tkinter.Radiobutton(genderFrame, text=_("F"), variable=self.genderVar, value="F",
command=self.modifyPatient).pack(side=tkinter.LEFT)
tkinter.Radiobutton(genderFrame, text=_("U"), variable=self.genderVar, value="U",
command=self.modifyPatient).pack(side=tkinter.LEFT)
tkinter.Label(patientDefinitionFrame, text=_("Size") + " (cm) :").grid(row=3, column=0,
sticky=tkinter.E)
self.sizeMin = int(self.configApp.get('Patient', 'sizeMin'))
self.sizeMax = int(self.configApp.get('Patient', 'sizeMax'))
self.sizeCombobox = Combobox(patientDefinitionFrame, exportselection=0,
state="readonly", width=len(str(self.sizeMax)),
values=list(range(self.sizeMin, self.sizeMax+1)))
self.sizeCombobox.bind('<<ComboboxSelected>>', self.modifyPatient)
self.sizeCombobox.grid(row=3, column=1, sticky=tkinter.W)
# Buttons command
buttonDefinitionFrame = tkinter.Frame(rightFrame)
buttonDefinitionFrame.pack(side=tkinter.TOP)
tkinter.Button(buttonDefinitionFrame, text=_("Delete"),
command=self.deletePatient).pack(side=tkinter.LEFT)
# Notes frame
patientNoteFrame = tkinter.LabelFrame(patientMainFrame, text=_("Notes for this patient"),
padx=10)
patientNoteFrame.pack(side=tkinter.LEFT)
self.patientNotesTextEditor = tkinter.Text(patientNoteFrame,
wrap=tkinter.NONE,
height=10, width=30,
background=self.configApp.get('Colors',
'colorPatientEditor'))
self.patientNotesTextEditor.bind('<FocusOut>', self.modifyPatient)
self.patientNotesTextEditor.grid(row=2, columnspan=2)
scrollbarRightNotes = tkinter.Scrollbar(patientNoteFrame,
command=self.patientNotesTextEditor.yview)
scrollbarRightNotes.grid(row=2, column=2, sticky=tkinter.W+tkinter.N+tkinter.S)
scrollbarBottom = tkinter.Scrollbar(patientNoteFrame, orient=tkinter.HORIZONTAL,
command=self.patientNotesTextEditor.xview)
scrollbarBottom.grid(row=3, columnspan=2, sticky=tkinter.N+tkinter.E+tkinter.W)
self.patientNotesTextEditor.config(yscrollcommand=scrollbarRightNotes.set)
self.patientNotesTextEditor.config(xscrollcommand=scrollbarBottom.set)
patientListsFrame = tkinter.Frame(patientMainFrame, padx=10)
patientListsFrame.pack(side=tkinter.LEFT)
# Pathologies listbox for this patient
pathologiesListboxFrame = tkinter.LabelFrame(patientListsFrame,
text=_("Patient pathologies"))
pathologiesListboxFrame.pack(side=tkinter.TOP)
color = self.configApp.get('Colors', 'colorPathologiesList')
self.pathologiesListbox = tkinter.Listbox(pathologiesListboxFrame,
selectmode=tkinter.EXTENDED,
background=color, height=9, width=20,
exportselection=False)
self.pathologiesListbox.grid(row=0, columnspan=2)
CallTypWindow.createToolTip(self.pathologiesListbox,
_("Use Ctrl and Shift keys") + "\n" + \
_("for multiple selection"),
self.delaymsTooltips)
scrollbarRightPathologies = tkinter.Scrollbar(pathologiesListboxFrame,
orient=tkinter.VERTICAL,
command=self.pathologiesListbox.yview)
scrollbarRightPathologies.grid(row=0, column=2, sticky=tkinter.W+tkinter.N+tkinter.S)
self.pathologiesListbox.config(yscrollcommand=scrollbarRightPathologies.set)
self.pathologiesListbox.bind('<ButtonRelease-1>', self.clicPathologiesListbox)
def changePatient(self, *dummy):
""" Inform model that patient has changed by clicking on combobox or changing its value"""
try:
self.logger.debug("PatientFrame/changePatient()")
newPatientCode = self.patientCodeVar.get()
if newPatientCode in self.patientCodeCombobox['values']:
self.patientFrameModel.changePatient(newPatientCode)
self.mainWindow.setStatusText(_("Patient displayed") + " : " + newPatientCode)
else:
self.clearPatientDefinition()
except CalcalExceptions.CalcalValueError as exc:
self.mainWindow.setStatusText(_("Error") + " : " + str(exc) + " !", True)
def clearPatientDefinition(self):
""" Clear Patient definition frame """
#self.birthYearCombobox.set(self.currentYear - self.oldestPatient//2)
self.birthYearCombobox.set("")
self.genderVar.set("U")
#self.sizeCombobox.set((self.sizeMax + self.sizeMin)//2)
self.sizeCombobox.set(0)
self.patientNotesTextEditor.delete('1.0', tkinter.END)
def updateObserver(self, observable, event):
"""Called when the model object is modified. """
if observable == self.patientFrameModel:
self.logger.debug("PatientFrame received from model : " + event)
try:
if event == "INIT_DB":
self.init()
elif event == "PATIENT_CHANGED":
self.displayOtherPatient()
elif event == "NEW_PATIENT":
self.patientCreated()
elif event == "PATIENT_UPDATED":
self.patientUpdated()
elif event == "UPDATE_ALL_PATHOLOGIES":
self.initListPathologies()
elif event == "PATIENT_DELETED":
self.patientDeleted()
except ValueError as exc:
message = _("Error") + " : " + str(exc) + " !"
self.mainWindow.setStatusText(message, True)
def initListPathologies(self):
""" Init Pathologies list content """
self.pathologiesListbox.delete(0, tkinter.END)
self.listPathologies = self.patientFrameModel.getAllPathologies()
for pathology in self.listPathologies:
self.pathologiesListbox.insert(tkinter.END, pathology)
def modifyPatient(self, *dummy):
""" Modify patient info in database """
try:
listInfoPatient = [self.patientCodeVar.get(), self.birthYearCombobox.get(),
self.genderVar.get(), self.sizeCombobox.get(),
self.patientNotesTextEditor.get('1.0', 'end-1c')]
self.patientFrameModel.createOrModifyPatient(listInfoPatient)
except ValueError as exc:
self.mainWindow.setStatusText(_("Error") + " : " + str(exc) + " !", True)
def deletePatient(self):
""" Delete patient info in database """
isDestructionOk = messagebox.askyesno(_("Deleting selected user element in database"),
_("Do you really want to delete selection in database ?") + \
"\n" + self.patientCodeVar.get(),
icon='warning')
if isDestructionOk:
self.patientFrameModel.deleteCurrentPatient()
else:
self.mainWindow.setStatusText(_("Destruction canceled"))
def init(self):
""" Initialyse fields from database """
listCodes = self.patientFrameModel.getAllPatientCodes()
self.patientCodeCombobox['values'] = listCodes
self.initListPathologies()
def clicPathologiesListbox(self, dummy=None):
""" Add or remove pathologies for this patient """
selectedIndex = list(self.pathologiesListbox.curselection())
if len(selectedIndex) > 0:
listpathologies = set([self.pathologiesListbox.get(index)
for index in selectedIndex])
self.patientFrameModel.updatePathologies(listpathologies)
def displayOtherPatient(self):
""" Update because model said patient has changed """
patient = self.patientFrameModel.getCurrentPatient()
self.birthYearCombobox.set(patient.getData("birthYear"))
self.genderVar.set(patient.getData("gender"))
self.sizeCombobox.set(patient.getData("size"))
self.patientNotesTextEditor.delete('1.0', tkinter.END)
self.patientNotesTextEditor.insert(tkinter.INSERT, patient.getData("notes"))
listPathologies2Select = patient.getPathologies()
self.pathologiesListbox.selection_clear(0, tkinter.END)
firstIndex = True
for pathology2Select in listPathologies2Select:
index = 0
for pathology in self.listPathologies:
if pathology == pathology2Select:
self.pathologiesListbox.selection_set(index)
if firstIndex:
self.pathologiesListbox.see(index)
firstIndex = False
break
index += 1
self.clicPathologiesListbox()
def patientCreated(self):
""" Called when model sid that a patient has been created """
listCodes = self.patientFrameModel.getAllPatientCodes()
self.patientCodeCombobox['values'] = listCodes
self.pathologiesListbox.selection_clear(0, tkinter.END)
self.mainWindow.setStatusText(_("Patient created in database") + " : " +
self.patientCodeVar.get())
def patientUpdated(self):
""" Called when model said that current patient has been updated in database """
self.mainWindow.setStatusText(_("Patient info updated in database") + " : " +
self.patientCodeVar.get())
def patientDeleted(self):
""" Called when model said that current patient has been deleted in database """
self.clearPatientDefinition()
listCodes = self.patientFrameModel.getAllPatientCodes()
self.patientCodeCombobox['values'] = listCodes
self.patientCodeCombobox.set("")
self.mainWindow.setStatusText(_("Patient deleted in database"))
| Thierry46/CalcAl | gui/PatientFrame.py | Python | gpl-3.0 | 15,036 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-30 17:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20180623_0212'),
]
operations = [
migrations.AddField(
model_name='uiroute',
name='parent',
field=models.CharField(blank=True, max_length=50, verbose_name='Parent Route'),
),
migrations.AddField(
model_name='uiroute',
name='label',
field=models.CharField(blank=True, max_length=50, verbose_name='Human Readable Label'),
),
]
| sauli6692/ibc-server | core/migrations/0006_uiroute_parent.py | Python | mit | 689 |
#!/usr/bin/env python
import click
import logging
import os
import pagoda
import pagoda.viewer
def full(name):
return os.path.join(os.path.dirname(__file__), name)
@click.command()
def main():
logging.basicConfig()
w = pagoda.cooper.World(dt=1. / 120)
w.load_skeleton(full('../optimized-skeleton.txt'))
w.load_markers(full('cooper-motion.c3d'), full('../optimized-markers.txt'))
pagoda.viewer.Viewer(w).run()
if __name__ == '__main__':
main()
| EmbodiedCognition/pagoda | examples/cooper.py | Python | mit | 478 |
import pyfftw
import numpy as np
import tomviz.operators
import time
class ReconConstrintedDFMOperator(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset, Niter=None, Niter_update_support=None,
supportSigma=None, supportThreshold=None):
"""
3D Reconstruct from a tilt series using constraint-based Direct Fourier
Method
"""
self.progress.maximum = 1
from tomviz import utils
import numpy as np
supportThreshold = supportThreshold / 100.0
nonnegativeVoxels = True
tiltAngles = utils.get_tilt_angles(dataset) #Get Tilt angles
tiltSeries = utils.get_array(dataset)
if tiltSeries is None:
raise RuntimeError("No scalars found!")
self.progress.message = 'Initialization'
#Direct Fourier recon without constraints
(recon, recon_F) \
= dfm3(tiltSeries, tiltAngles, np.size(tiltSeries, 1) * 2)
kr_cutoffs = np.linspace(0.05, 0.5, 10)
#average Fourier magnitude of tilt series as a function of kr
I_data = radial_average(tiltSeries, kr_cutoffs)
(Nx, Ny, Nz) = recon_F.shape
#Note: Nz = np.int(Ny/2+1)
Ntot = Nx * Ny * Ny
f = pyfftw.n_byte_align_empty((Nx, Ny, Nz), 16, dtype='complex128')
r = pyfftw.n_byte_align_empty((Nx, Ny, Ny), 16, dtype='float64')
fft_forward = pyfftw.FFTW(r, f, axes=(0, 1, 2))
fft_inverse = pyfftw.FFTW(
f, r, direction='FFTW_BACKWARD', axes=(0, 1, 2))
kx = np.fft.fftfreq(Nx)
ky = np.fft.fftfreq(Ny)
kz = ky[0:Nz]
kX, kY, kZ = np.meshgrid(ky, kx, kz)
kR = np.sqrt(kY**2 + kX**2 + kZ**2)
sigma = 0.5 * supportSigma
G = np.exp(-kR**2 / (2 * sigma**2))
#create initial support using sw
f = recon_F * G
fft_inverse.update_arrays(f, r)
fft_inverse.execute()
cutoff = np.amax(r) * supportThreshold
support = r >= cutoff
recon_F[kR > kr_cutoffs[-1]] = 0
x = np.random.rand(Nx, Ny, Ny) #initial solution
self.progress.maximum = Niter
step = 0
t0 = time.time()
counter = 1
etcMessage = 'Estimated time to complete: n/a'
for i in range(Niter):
if self.canceled:
return
self.progress.message = 'Iteration No.%d/%d. ' % (
i + 1, Niter) + etcMessage
#image space projection
y1 = x.copy()
if nonnegativeVoxels:
y1[y1 < 0] = 0 #non-negative constraint
y1[np.logical_not(support)] = 0 #support constraint
#Fourier space projection
y2 = 2 * y1 - x
r = y2.copy()
fft_forward.update_arrays(r, f)
fft_forward.execute()
f[kR > kr_cutoffs[-1]] = 0 #apply low pass filter
f[recon_F != 0] = recon_F[recon_F != 0] #data constraint
#Fourier magnitude constraint
#leave the inner shell unchanged
for j in range(1, kr_cutoffs.size):
shell = np.logical_and(
kR > kr_cutoffs[j - 1], kR <= kr_cutoffs[j])
shell[recon_F != 0] = False
I = np.sum(np.absolute(f[shell]))
if I != 0:
I = I / np.sum(shell)
# lower magnitude for high frequency information to reduce
# artifacts
f[shell] = f[shell] / I * I_data[j] * 0.5
fft_inverse.update_arrays(f, r)
fft_inverse.execute()
y2 = r.copy() / Ntot
#update
x = x + y2 - y1
#update support
if (i < Niter and np.mod(i, Niter_update_support) == 0):
recon[:] = (y2 + y1) / 2
r = recon.copy()
fft_forward.update_arrays(r, f)
fft_forward.execute()
f = f * G
fft_inverse.update_arrays(f, r)
fft_inverse.execute()
cutoff = np.amax(r) * supportThreshold
support = r >= cutoff
step += 1
self.progress.value = step
timeLeft = (time.time() - t0) / counter * (Niter - counter)
counter += 1
timeLeftMin, timeLeftSec = divmod(timeLeft, 60)
timeLeftHour, timeLeftMin = divmod(timeLeftMin, 60)
etcMessage = 'Estimated time to complete: %02d:%02d:%02d' % (
timeLeftHour, timeLeftMin, timeLeftSec)
recon[:] = (y2 + y1) / 2
recon[:] = np.fft.fftshift(recon)
from vtk import vtkImageData
recon_dataset = vtkImageData()
recon_dataset.CopyStructure(dataset)
utils.set_array(recon_dataset, recon)
utils.mark_as_volume(recon_dataset)
returnValues = {}
returnValues["reconstruction"] = recon_dataset
return returnValues
def dfm3(input, angles, Npad):
input = np.double(input)
(Nx, Ny, Nproj) = input.shape
angles = np.double(angles)
pad_pre = int(np.ceil((Npad - Ny) / 2.0))
pad_post = int(np.floor((Npad - Ny) / 2.0))
# Initialization
Nz = Ny // 2 + 1
w = np.zeros((Nx, Ny, Nz)) #store weighting factors
v = pyfftw.n_byte_align_empty((Nx, Ny, Nz), 16, dtype='complex128')
v = np.zeros(v.shape) + 1j * np.zeros(v.shape)
recon = pyfftw.n_byte_align_empty(
(Nx, Ny, Ny), 16, dtype='float64', order='F')
recon_fftw_object = pyfftw.FFTW(
v, recon, direction='FFTW_BACKWARD', axes=(0, 1, 2))
p = pyfftw.n_byte_align_empty((Nx, Npad), 16, dtype='float64')
pF = pyfftw.n_byte_align_empty((Nx, Npad // 2 + 1), 16, dtype='complex128')
p_fftw_object = pyfftw.FFTW(p, pF, axes=(0, 1))
dk = np.double(Ny) / np.double(Npad)
for a in range(0, Nproj):
ang = angles[a] * np.pi / 180
projection = input[:, :, a] #2D projection image
p = np.lib.pad(projection, ((0, 0), (pad_pre, pad_post)),
'constant', constant_values=(0, 0)) #pad zeros
p = np.fft.ifftshift(p)
p_fftw_object.update_arrays(p, pF)
p_fftw_object()
probjection_f = pF.copy()
if ang < 0:
probjection_f = np.conj(pF.copy())
probjection_f[1:, :] = np.flipud(probjection_f[1:, :])
ang = np.pi + ang
# Bilinear extrapolation
for i in range(0, np.int(np.ceil(Npad / 2)) + 1):
ky = i * dk
#kz = 0
ky_new = np.cos(ang) * ky #new coord. after rotation
kz_new = np.sin(ang) * ky
sy = abs(np.floor(ky_new) - ky_new) #calculate weights
sz = abs(np.floor(kz_new) - kz_new)
for b in range(1, 5): #bilinear extrapolation
pz, py, weight = bilinear(kz_new, ky_new, sz, sy, Ny, b)
if (py >= 0 and py < Ny and pz >= 0 and pz < Nz / 2 + 1):
w[:, py, pz] = w[:, py, pz] + weight
v[:, py, pz] = v[:, py, pz] + weight * probjection_f[:, i]
v[w != 0] = v[w != 0] / w[w != 0]
recon_F = v.copy()
recon_fftw_object.update_arrays(v, recon)
recon_fftw_object()
recon[:] = np.fft.fftshift(recon)
return (recon, recon_F)
# Bilinear extrapolation
def bilinear(kz_new, ky_new, sz, sy, N, p):
if p == 1:
py = np.floor(ky_new)
pz = np.floor(kz_new)
weight = (1 - sy) * (1 - sz)
elif p == 2:
py = np.ceil(ky_new)
pz = np.floor(kz_new)
weight = sy * (1 - sz)
elif p == 3:
py = np.floor(ky_new)
pz = np.ceil(kz_new)
weight = (1 - sy) * sz
elif p == 4:
py = np.ceil(ky_new)
pz = np.ceil(kz_new)
weight = sy * sz
if py < 0:
py = N + py
else:
py = py
return (int(pz), int(py), weight)
def radial_average(tiltseries, kr_cutoffs):
(Nx, Ny, Nproj) = tiltseries.shape
f = pyfftw.n_byte_align_empty((Nx, Ny // 2 + 1), 16, dtype='complex128')
r = pyfftw.n_byte_align_empty((Nx, Ny), 16, dtype='float64')
p_fftw_object = pyfftw.FFTW(r, f, axes=(0, 1))
Ir = np.zeros(kr_cutoffs.size)
I = np.zeros(kr_cutoffs.size)
kx = np.fft.fftfreq(Nx)
ky = np.fft.fftfreq(Ny)
ky = ky[0:int(np.ceil(Ny / 2) + 1)]
kX, kY = np.meshgrid(ky, kx)
kR = np.sqrt(kY**2 + kX**2)
for a in range(0, Nproj):
r = tiltseries[:, :, a].copy().astype('float64')
p_fftw_object.update_arrays(r, f)
p_fftw_object.execute()
shell = kR <= kr_cutoffs[0]
I[0] = np.sum(np.absolute(f[shell]))
I[0] = I[0] / np.sum(shell)
for j in range(1, kr_cutoffs.size):
shell = np.logical_and(kR > kr_cutoffs[j - 1], kR <= kr_cutoffs[j])
I[j] = np.sum(np.absolute(f[shell]))
I[j] = I[j] / np.sum(shell)
Ir = Ir + I
Ir = Ir / Nproj
return Ir
| cryos/tomviz | tomviz/python/Recon_DFT_constraint.py | Python | bsd-3-clause | 9,005 |
import markdown
AUTHOR = 'charlesreid1'
SITENAME = 'paradise lost bot flock'
SITEURL = ''#b-milton'
PATH = 'content'
TIMEZONE = 'America/Los_Angeles'
DEFAULT_LANG = 'en'
# --------------8<---------------------
THEME = 'scurvy-knave-theme'
LICENSE_URL = "https://opensource.org/licenses/MIT"
LICENSE_NAME = "MIT License"
# Pelican is designed for files => pages.
# Use variables (below) to set pieces of pages.
INTROCOLOR = "#fff"
ACOLOR = "#00edac"
AHOVERCOLOR = "#00b484"
BRIGHTCOLOR = "#1df1ca"
TEMPLATE_PAGES = {
'custom.css' : 'custom.css'
}
INTROBKG='img/book3black.jpg'
LINKSBKG='img/book1.jpg'
# img/ should be in content/
# available at <url>/img
STATIC_PATHS = ['img']
# ---
# description appears between <p> tags, so don't include them
SITE_TITLE = "paradise lost bot flock"
SITE_DESCRIPTION = "Tweeting John Milton's <i>Paradise Lost</i> forever"
GITEA_URL = "https://git.charlesreid1.com/bots/b-milton"
# ---
about_md = markdown.Markdown(extensions=['extra','codehilite'],
output_format='html4')
ABOUT_SHORT = "About"
ABOUT_TITLE = "about paradise lost bot flock"
ABOUT_TEXT = """
<br />
**What is the paradise lost bot flock?**
The paradise lost bot flock is a flock of 12 twitter bots that each tweet
one book of John Milton's epic poem, _Paradise Lost_.
The flock is implemented in Python and uses the
[rainbow mind machine](https://pages.charlesreid1.com/bots/b-rainbow-mind-machine)
library.
Each bot is given the entire contents of one book of _Paradise Lost_,
and tweets the entire contents one line at a time.
For more information about bots and bot flocks, see [bots.charlesreid1.com](https://bots.charlesreid1.com).
Find the bots on twitter at the [paradise lost bot flock twitter list](https://twitter.com/charlesreid1/lists/miltonbotflock)
[@milton_book1](https://twitter.com/milton_book1) •
[@milton_book2](https://twitter.com/milton_book2) •
[@milton_book3](https://twitter.com/milton_book3) •
[@milton_book4](https://twitter.com/milton_book4) •
[@milton_book5](https://twitter.com/milton_book5) •
[@milton_book6](https://twitter.com/milton_book6) •
[@milton_book7](https://twitter.com/milton_book7) •
[@milton_book8](https://twitter.com/milton_book8) •
[@milton_book9](https://twitter.com/milton_book9) •
[@milton_book10](https://twitter.com/milton_book10) •
[@milton_book11](https://twitter.com/milton_book11) •
[@milton_book12](https://twitter.com/milton_book12)
<br />
**Why build the paradise lost bot flock?**
The paradise lost bot flock was built to help drown out some of the noise on Twitter,
and to provide a juxtaposition of classic 17th century English poetry about the fall
of Satan and Original Sin with the latest goings-on in the Twitter timeline.
<br />
<br />
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">"Whence and what art thou, execrable Shape,</p>— Paradise Lost Book 2 (@milton_book2) <a href="https://twitter.com/milton_book2/status/990385793666506752?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">That dar'st, though grim and terrible, advance</p>— Paradise Lost Book 2 (@milton_book2) <a href="https://twitter.com/milton_book2/status/990386801700360192?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Thy miscreated front athwart my way</p>— Paradise Lost Book 2 (@milton_book2) <a href="https://twitter.com/milton_book2/status/990387809717444608?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">To yonder gates? Through them I mean to pass,</p>— Paradise Lost Book 2 (@milton_book2) <a href="https://twitter.com/milton_book2/status/990388817700970496?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">That be assured, without leave asked of thee.</p>— Paradise Lost Book 2 (@milton_book2) <a href="https://twitter.com/milton_book2/status/990389825701216256?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<br />
<br />
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">From Chaos, and the inroad of Darkness old,</p>— Paradise Lost Book 3 (@milton_book3) <a href="https://twitter.com/milton_book3/status/990301854734008320?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Satan alighted walks: A globe far off</p>— Paradise Lost Book 3 (@milton_book3) <a href="https://twitter.com/milton_book3/status/990302862793060352?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">It seemed, now seems a boundless continent</p>— Paradise Lost Book 3 (@milton_book3) <a href="https://twitter.com/milton_book3/status/990303870709460992?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<br />
<br />
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">His arrows, from the fourfold-visaged Four</p>— Paradise Lost Book 6 (@milton_book6) <a href="https://twitter.com/milton_book6/status/990262814097866752?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Distinct with eyes, and from the living wheels</p>— Paradise Lost Book 6 (@milton_book6) <a href="https://twitter.com/milton_book6/status/990263822110736384?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Distinct alike with multitude of eyes;</p>— Paradise Lost Book 6 (@milton_book6) <a href="https://twitter.com/milton_book6/status/990264830194941952?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">One Spirit in them ruled; and every eye</p>— Paradise Lost Book 6 (@milton_book6) <a href="https://twitter.com/milton_book6/status/990265838245572609?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Glared lightning, and shot forth pernicious fire</p>— Paradise Lost Book 6 (@milton_book6) <a href="https://twitter.com/milton_book6/status/990266846220771328?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<br />
<br />
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">From Man or Angel the great Architect</p>— Paradise Lost Book 8 (@milton_book8) <a href="https://twitter.com/milton_book8/status/990400620476612608?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Did wisely to conceal, and not divulge</p>— Paradise Lost Book 8 (@milton_book8) <a href="https://twitter.com/milton_book8/status/990401628430745600?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">His secrets to be scanned by them who ought</p>— Paradise Lost Book 8 (@milton_book8) <a href="https://twitter.com/milton_book8/status/990402636468781056?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Rather admire; or, if they list to try</p>— Paradise Lost Book 8 (@milton_book8) <a href="https://twitter.com/milton_book8/status/990403644548792320?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Conjecture, he his fabrick of the Heavens</p>— Paradise Lost Book 8 (@milton_book8) <a href="https://twitter.com/milton_book8/status/990404652494606336?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">Hath left to their disputes, perhaps to move</p>— Paradise Lost Book 8 (@milton_book8) <a href="https://twitter.com/milton_book8/status/990405660473868289?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">His laughter at their quaint opinions wide</p>— Paradise Lost Book 8 (@milton_book8) <a href="https://twitter.com/milton_book8/status/990406668449021952?ref_src=twsrc%5Etfw">April 29, 2018</a></blockquote>
<br />
<br />
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">From standing lake to tripping ebb, that stole</p>— Paradise Lost Book11 (@milton_book11) <a href="https://twitter.com/milton_book11/status/990243018551709696?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">With soft foot towards the deep; who now had stopt</p>— Paradise Lost Book11 (@milton_book11) <a href="https://twitter.com/milton_book11/status/990244027554398208?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
<blockquote class="twitter-tweet playfair" data-lang="en"><p lang="en" dir="ltr">His sluces, as the Heaven his windows shut.</p>— Paradise Lost Book11 (@milton_book11) <a href="https://twitter.com/milton_book11/status/990245035663806464?ref_src=twsrc%5Etfw">April 28, 2018</a></blockquote>
"""
ABOUT_DESCRIPTION = about_md.convert(ABOUT_TEXT)
# -----------
def make_pages():
descr = ""
#
#
# On The Web
#
#
descr += "<h3>Paradise Lost Bot Flock On The Web</h3>"
# items format: [ button text, href url, fa-icon ]
items = [
["git.charlesreid1.com/bots/b-milton", "https://git.charlesreid1.com/bots/b-milton", "code-fork"],
["github.com/charlesreid1/milton", "https://github.com/charlesreid1/milton", "github"],
["pages.charlesreid1.com/b-milton", "https://pages.charlesreid1.com/b-milton", "globe"],
]
for item in items:
button_text = item[0]
button_link = item[1]
button_icon = item[2]
descr += "<p><a class=\"btn btn-default btn-lg\" href=\"%s\">"%(button_link)
descr += "<i class=\"fa fa-fw fa-2x fa-%s\"></i> %s"%(button_icon, button_text)
descr += "</a></p>\n"
descr += "\n"
#
#
# On The Twitter
#
#
descr += "<h3>Paradise Lost Bot Flock On Twitter</h3>"
for i in range(1,12+1):
handle = "milton_book%s"%(i)
button_text = "@%s"%(handle)
button_link = "https://twitter.com/%s"%(handle)
button_icon = "twitter"
descr += "<p><a class=\"btn btn-default btn-lg\" href=\"%s\">"%(button_link)
descr += "<i class=\"fa fa-fw fa-2x fa-%s\"></i> %s"%(button_icon, button_text)
descr += "</a></p>\n"
descr += "\n"
return descr
LINKS_TITLE = ""
LINKS_DESCRIPTION = make_pages()
# ---
CONTACT_TITLE = "Contact charlesreid1"
CONTACT_DESCRIPTION = """<p>@charlesreid1 is a full-time data engineer and part-time bot-wrangler working on
the intersection of cloud computing and genomics at UC Santa Cruz.</p>
<p>Get in touch:</p>
<p><a href="mailto:twitter@charlesreid1.com">twitter (at) charlesreid1.com</a></p>
"""
# --------------8<---------------------
DISPLAY_PAGES_ON_MENU = False
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = False
| charlesreid1/milton | pelican/pelicanconf.py | Python | mit | 11,825 |
# -*- coding: utf-8 -*-
from knowledgebase.db.base import Vertex as BaseVertex
from knowledgebase.db.base import Edge as BaseEdge
from knowledgebase.db.base import ElementView as BaseElementView
from knowledgebase.db.base import Graph as BaseGraph
from bson.objectid import ObjectId
from pymongo import MongoClient
from copy import deepcopy
VERTEX_COLLECTION = 'kb_vertices'
EDGE_COLLECTION = 'kb_edges'
class Vertex(BaseVertex):
__collection__ = VERTEX_COLLECTION
@classmethod
def get(cls, graph, eid):
elt = graph.db[VERTEX_COLLECTION].find_one({'_id': ObjectId(eid)})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
return elt
@classmethod
def get_all(cls, graph):
elts = graph.db[VERTEX_COLLECTION].find()
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
yield elt
def save(self):
doc = self.data()
if self.eid is not None:
_id = ObjectId(doc.pop('eid'))
self._graph.db[VERTEX_COLLECTION].replace_one(
{'_id': _id},
doc
)
else:
ret = self._graph.db[VERTEX_COLLECTION].insert_one(doc)
self.eid = str(ret.inserted_id)
def outE(self, **properties):
properties['source'] = ObjectId(self.eid)
elts = self._graph.db[EDGE_COLLECTION].find(properties)
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.edge_class(self._graph, data=elt)
yield elt
def inE(self, **properties):
properties['target'] = ObjectId(self.eid)
elts = self._graph.db[EDGE_COLLECTION].find(properties)
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.edge_class(self._graph, data=elt)
yield elt
def bothE(self, **properties):
src_properties = deepcopy(properties)
src_properties['source'] = ObjectId(self.eid)
tgt_properties = deepcopy(properties)
tgt_properties['target'] = ObjectId(self.eid)
elts = self._graph.db[EDGE_COLLECTION].find({
'$or': [
src_properties,
tgt_properties
]
})
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.edge_class(self._graph, data=elt)
yield elt
class Edge(BaseEdge):
__collection__ = EDGE_COLLECTION
@classmethod
def get(cls, graph, eid):
elt = graph.db[EDGE_COLLECTION].find_one({'_id': ObjectId(eid)})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
return elt
@classmethod
def get_all(cls, graph):
elts = graph.db[EDGE_COLLECTION].find()
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = cls(graph, data=elt)
yield elt
def save(self):
doc = self.data()
if self.eid is not None:
_id = ObjectId(doc.pop('eid'))
self._graph.db[EDGE_COLLECTION].replace_one(
{'_id': _id},
doc
)
else:
ret = self._graph.db[EDGE_COLLECTION].insert_one(doc)
self.eid = str(ret.inserted_id)
def outV(self):
elt = self._graph.db[VERTEX_COLLECTION].find_one({
'_id': ObjectId(self.target)
})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.vertex_class(self._graph, data=elt)
return elt
def inV(self):
elt = self._graph.db[VERTEX_COLLECTION].find_one({
'_id': ObjectId(self.source)
})
if elt is not None:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.vertex_class(self._graph, data=elt)
return elt
def bothV(self):
elts = self._graph.db[VERTEX_COLLECTION].find({
'_id': {'$in': [
ObjectId(self.source),
ObjectId(self.target)
]}
})
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self._graph.vertex_class(self._graph, data=elt)
yield elt
class ElementView(BaseElementView):
def find(self, **properties):
elts = self.graph.db[self.cls.__collection__].find(properties)
for elt in elts:
elt['eid'] = str(elt.pop('_id'))
elt = self.cls(self.graph, data=elt)
yield elt
def update(self, eid, **data):
self.graph.db[self.cls.__collection__].update_one(
{'_id': ObjectId(eid)},
{'$set': data}
)
def delete(self, eid):
self.graph.db[self.cls.__collection__].remove_one(
{'_id': ObjectId(eid)}
)
class Graph(BaseGraph):
elementview_class = ElementView
vertex_class = Vertex
edge_class = Edge
def __init__(self, *args, **kwargs):
super(Graph, self).__init__(*args, **kwargs)
self.conn = MongoClient(self.conf.uri)
self.db = self.conn.get_default_database()
if self.conf.username and self.conf.password:
self.db.authenticate(self.conf.username, self.conf.password)
def close(self):
self.conn.close()
| linkdd/knowledgebase | knowledgebase/db/mongo.py | Python | mit | 5,433 |
# -*- coding: utf-8 -*-
from datetime import datetime
from numpy import mean
from random import randint
from time import time as timestamp
from src.shell.callstack import CallStack
from src.shell.parser.type import TypeLogParser
from src.shell.parser.memalloc import MemallocParser
from src.shell.utils import list_split
from .addrtable import AddrTable
from .memory import Memory
class MemComb(object):
"""
Retrieve allocators
"""
def __init__(self, mem_log_file, type_log_file, pgm,
cli_ignore=None, cli_libmatch=None, coupleres_log_file=None, verbose=True):
super(MemComb, self).__init__()
self.__parser = MemallocParser(mem_log_file, cli_ignore, cli_libmatch)
self.__protos = TypeLogParser(type_log_file)
if coupleres_log_file is not None:
self.__couples_file = coupleres_log_file
else:
self.__couples_file = None
self.__pgm = pgm
self.__free_candidates = dict()
self.verbose = verbose
def log(self, msg):
if self.verbose:
print "[*] " + msg
def __compute_callers(self):
call_stack = CallStack(self.__pgm)
callers = dict()
for block in self.__parser.get():
# if block.is_in():
# if not libraries and block.is_from_main():
# callers.setdefault(block.id, 0)
# callers[block.id] += 1
if block.is_in():
caller = call_stack.top_id()
call_stack.push(block)
else:
call_stack.pop()
caller = call_stack.top_id()
callers.setdefault(block.id, list())
if caller is not None and caller not in callers[block.id]:
callers[block.id].append(caller)
for k, v in callers.items():
if ".plt.got" in [x.split(":")[-1] for x in v]:
callers[k] = 3
else:
callers[k] = len(v)
return callers
def __alloc(self, nb_callers, libraries, ignore=None):
"""
Try to retrieve the top-level allocator
"""
# Number of new addresses outputted by each function
nb_new_addr = dict()
# Addresses seen so far
addr_seen = AddrTable()
# Call stack
call_stack = CallStack(self.__pgm)
# For each block of data in the log file
ii = 0
for block in self.__parser.get():
# IN PARAMETER
if block.is_in():
call_stack.push(block)
# OUT PARAMETER
else:
call_stack.expect(block.id, block.date)
# if block.id == "libc.so.6:529408:" and call_stack.depth <= 1:
# print call_stack.stack
if call_stack.depth > 0 and not libraries:
call_stack.pop()
continue
if block.is_addr():
if ignore is not None and block.id in ignore:
call_stack.pop()
continue
if block.id not in nb_new_addr.keys():
nb_new_addr[block.id] = [0, 0]
if not addr_seen.contains(block.val):
if not libraries or nb_callers[block.id] > 1:
if 'ADDR' not in self.__protos.get_proto(block.id)[1:]:
nb_new_addr[block.id][0] += 1
addr_seen.add(block.val)
nb_new_addr[block.id][1] += 1
call_stack.pop()
ii += 1
ll = sorted(nb_new_addr.items(), key=lambda a: -a[1][0])
candidates = list()
for f in ll:
proto = self.__protos.get_proto(f[0])
if 'ADDR' not in proto[1:]:
candidates.append(f[0])
if len(candidates) > 0:
return candidates[0]
return None
def __wrappers(self, ALLOC):
# Number of new addresses outputted by each function
wrappers = dict()
# Last output value
last_addr = None
#
prev = None
# TTL
ttl = 0
# Call stack
call_stack = CallStack(self.__pgm)
# For each block of data in the log file
for block in self.__parser.get():
# IN PARAMETER
if block.is_in():
call_stack.push(block)
# OUT PARAMETER
else:
call_stack.expect(block.id, block.date)
if block.is_addr():
if block.id in wrappers.keys():
wrappers[block.id][1] += 1
if block.id == ALLOC:
last_addr = block.val
depth = 1
prev = ALLOC
elif last_addr is not None:
if block.val == last_addr:
if block.id not in wrappers.keys():
wrappers[block.id] = [0, 1, list(), list()]
wrappers[block.id][0] += 1
wrappers[block.id][2].append(depth)
wrappers[block.id][3].append(prev)
depth += 1
prev = block.id
# else:
# last_addr = None
# ttl = 0
# prev = None
call_stack.pop()
if call_stack.is_empty():
last_addr = None
wrappers = map(lambda a: (a[0], float(a[1][0])/float(a[1][1]), mean(a[1][2]), max(set(a[1][3]), key=a[1][3].count)), sorted(wrappers.items(), key=lambda a: a[1][0]))
wrappers = sorted(filter(lambda a: a[1] > 0.5, wrappers), key=lambda a:a[2])
WTREE = Wrapper(ALLOC, 1)
for wrapper in wrappers:
wrap = WTREE.get(wrapper[3], wrapper[2] - 1)
if wrap is not None:
wrap.add_child(Wrapper(wrapper[0], int(wrapper[1])))
else:
print "Elaged: {0}".format(wrapper[0])
print WTREE.to_str(0)
def candidates_from_couples(self, alloc_id):
if self.__free_candidates.get(alloc_id) is None:
#Iter on the coupleres file ton find function who are candidates
#to be the liberator
free_candidates = dict()
with open(self.__couples_file, 'r') as f:
line = f.readline().split(':')
while line != ['']:
if line[1] == alloc_id:
free_candidates[line[4]] = True
line = f.readline().split(':')
self.__free_candidates[alloc_id] = free_candidates
return self.__free_candidates[alloc_id]
def block_not_in_couple(self, alloc, block):
"""
Check that the block doesn't correspond to a couple between its
function and the guessed allocator
"""
function_id = block.id.split(':')[1]
alloc_id = alloc.split(':')[1]
r = self.candidates_from_couples(alloc_id).get(function_id) is None
return r and (function_id != alloc_id)
def __free(self, ALLOC, libraries):
nb_alloc = 0
# Number of new addresses outputted by each function
nb_new_addr = dict()
# Addresses seen so far
addr_alloc = AddrTable(dic=True)
# Call stack
call_stack = CallStack(self.__pgm)
# Number of calls
nb_calls = dict()
for block in self.__parser.get():
if (self.__couples_file is not None
and self.block_not_in_couple(ALLOC, block)):
continue
if block.is_out():
nb_calls.setdefault(block.id, 0)
nb_calls[block.id] += 1
if block.id.split(":")[0] != self.__pgm and not libraries:
continue
if block.is_addr():
if block.is_in() and block.id != ALLOC:
if not addr_alloc.contains(block.val):
addr_alloc.add(block.val)
block_id = "{0}|{1}".format(block.id, block.pos)
addr_alloc.add_dic(block.val, block_id)
elif block.is_out() and block.id == ALLOC:
if not addr_alloc.contains(block.val):
addr_alloc.add(block.val)
addr_alloc.add_dic(block.val, block.id)
for addr, call in addr_alloc.items():
if len(call) == 0 or call.count(ALLOC) == 0:
continue
nb_alloc += call.count(ALLOC)
candidates = map(lambda a: a[-1], list_split(call, ALLOC))
for free in candidates:
# while call.count(ALLOC) > 0:
# if call.index(ALLOC) == 0:
# call.pop(0)
# if len(call) > 0:
# free = call[-1]
# else:
# continue
# else:
# free = call[call.index(ALLOC) - 1]
# call = call[call.index(ALLOC)+1:]
if free not in nb_new_addr.keys():
nb_new_addr[free] = 0
nb_new_addr[free] += 1
# call.count(free)
free = sorted(nb_new_addr.items(), key=lambda a:-a[1])
return map(lambda a: a[0].split("|"), free)
# free[0][0].split("|")
return free[0], free[1]
def compute_blocks(self, ALLOC, FREE, POS):
mem = Memory(debug=False)
for i, block in enumerate(self.__parser.get()):
if (self.__couples_file is not None
and self.block_not_in_couple(ALLOC, block)):
continue
if block.id == ALLOC and block.is_out():
mem.alloc(block.val, 1)
elif block.id == FREE and block.is_in():
mem.free(block.val)
#if i % 5000 == 0:
# print "[errors] ALLOC: {0} | FREE: {1}".format(mem.errors[0], mem.errors[1])
# print "[allocs] CURR: {0} | TOTAL: {1}".format(*mem.allocated)
# print "[nbcall] ALLOC: {0} | FREE: {1}".format(*mem.nb_calls)
# size_stack = [(0, -1)]
# for block in self.__parser.get():
# if block.id.split(":")[0] != self.__pgm:
# continue
# if block.id == ALLOC:
# # if block.is_in() and block.is_num() and size_stack[-1][1] != block.date:
# # size_stack.append((block.val, block.date))
# if block.is_out():
# # if len(size_stack) <= 1:
# # raise Exception("ALLOC stack inconsistancy at date {0}".format(block.date))
# size, date = size_stack.pop(-1)
# mem.alloc(block.val, size)
# else:
# is_free = False
# for free, pos in zip(FREE, POS):
# if block.id == free and block.pos == int(pos):
# if block.is_in():
# mem.free(block.val)
# is_free = True
# if not is_free and block.is_addr() and block.is_in() and not mem.is_allocated(block.val):
# print "UAF", block.id, block.val, block.date
self.log("[errors] ALLOC: {0} | FREE: {1}".format(mem.errors[0], mem.errors[1]))
self.log("[allocs] CURR: {0} | TOTAL: {1}".format(*mem.allocated))
self.log("[nbcall] ALLOC: {0} | FREE: {1}".format(*mem.nb_calls))
return mem.errors, mem.nb_calls
def __log_res(self, logfile, ALLOC, FREE, consistency, time):
with open(logfile, "a") as f:
f.write("{}:{}:{}:{}:{}:{}:{}:{}:{}.{}:{}.{}\n".format(
self.__pgm,
ALLOC,
FREE,
consistency[1][0],
consistency[1][1],
consistency[0][0],
consistency[0][1],
self.__parser.time,
time[0].seconds,
time[0].microseconds,
time[1].seconds,
time[1].microseconds,
))
def run(self, libraries=False, wrappers=False, ignore=None,
cli_ignore=None, get=False, log=None, test_consistency=False):
time = list()
if libraries:
nb_callers = self.__compute_callers()
else:
nb_callers = None
FREE = None
consistency = [[0,0],[0,0]]
start = datetime.now()
ALLOC = self.__alloc(nb_callers, libraries)
stop = datetime.now()
time.append(stop - start)
if test_consistency:
ignore = list()
while ALLOC is not None:
start = datetime.now()
FREES = self.__free(ALLOC, libraries)
stop = datetime.now()
time.append(stop - start)
for FREE, POS in FREES[:3]:
# self.log("checking consistancy of blocks for ({},{})...".format(ALLOC, FREE))
consistency = self.compute_blocks(ALLOC, FREE, POS)
self.__log_res(log, ALLOC, FREE, consistency, time)
ignore.append(ALLOC)
ALLOC = self.__alloc(nb_callers, libraries, ignore=ignore)
return
else:
if ALLOC is None:
self.log("allocator not found")
FREE = None
else:
ALLOC_IMAGE, ALLOC_ADDR, ALLOC_NAME = ALLOC.split(":")
ALLOC_ADDR = hex(int(ALLOC.split(":")[1]))
self.log("allocator found - {0}:{1}:{2}".format(ALLOC_IMAGE, ALLOC_ADDR, ALLOC_NAME))
start = datetime.now()
FREES = self.__free(ALLOC, libraries)
stop = datetime.now()
time.append(stop - start)
try:
FREE_IMAGE, FREE_ADDR, FREE_NAME = FREES[0][0].split(":")
except IndexError:
if self.__couples_file is not None:
self.log("Liberator not found in couples! Aborting.")
if log:
self.__log_res(log, ALLOC, FREE, consistency, time)
return
else:
self.log("Liberator not found! Aborting.")
if log:
self.__log_res(log, ALLOC, FREE, consistency, time)
return
FREE_ADDR = hex(int(FREE_ADDR))
self.log("liberator found - {0}:{1}:{2}".format(FREE_IMAGE, FREE_ADDR, FREE_NAME))
self.log("checking consistancy of blocks...")
with open("log/{}_memcomb_{}.log".format(self.__pgm, int(timestamp())), "w") as f:
f.write(ALLOC + '\n')
f.write(FREES[0][0])
consistency = self.compute_blocks(ALLOC, *FREES[0])
FREE = FREES[0][0]
# if wrappers:
# # Detecting suballocators
# SUBALLOC = self.__wrappers(ALLOC)
while len(time) < 2:
time.append(datetime.now() - datetime.now())
if log:
self.__log_res(log, ALLOC, FREE, consistency, time)
if get:
return ALLOC, FREE, consistency, time, self.__parser.time
| Frky/scat | src/shell/memory/memcomb.py | Python | mit | 15,616 |
# coding=utf-8
import json
from pprint import pprint
from flask import Response
import requests
from urllib.parse import quote_plus, unquote_plus
from .base_class import ZmirrorTestBase
from .utils import *
class TestCustomResponseRewriter(ZmirrorTestBase):
"""testing using https://httpbin.org/"""
class C(ZmirrorTestBase.C):
my_host_name = 'b.test.com'
my_host_scheme = 'https://'
target_domain = 'httpbin.org'
target_scheme = 'https://'
external_domains = ('eu.httpbin.org',)
force_https_domains = 'ALL'
enable_automatic_domains_whitelist = False
# verbose_level = 4
possible_charsets = None
custom_text_rewriter_enable = True
def test_homepage(self):
"""https://httpbin.org/"""
self.rv = self.client.get(self.url("/"), environ_base=env()) # type: Response
self.assertIn(b'httpbin', self.rv.data, msg=self.dump())
def test_relative_redirect_to(self):
"""https://httpbin.org/redirect-to?url=http%3A%2F%2Fexample.com%2F"""
self.rv = self.client.get(
self.url("/redirect-to"),
query_string="url=http%3A%2F%2Fexample.com%2F",
environ_base=env(),
headers=headers(),
) # type: Response
self.assertIn("example.com", self.rv.location, msg=self.dump())
def test_relative_redirect_to_2(self):
"""https://httpbin.org/redirect-to?url=http%3A%2F%2Fexample.com%2F"""
self.rv = self.client.get(
self.url("/redirect-to"),
query_string="url=http%3A%2F%2Feu.httpbin.org%2F",
environ_base=env(),
headers=headers(),
) # type: Response
self.assertEqual(self.url("/extdomains/eu.httpbin.org/"), self.rv.location, msg=self.dump())
| Aploium/MagicWebsiteMirror | tests/test_custom_response_text_rewrite.py | Python | mit | 1,808 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from .guiconfig import collectView
class BaseToolButton(QtWidgets.QPushButton):
"""docstring for BaseButton"""
def __init__(self, text, parent=None):
super(BaseToolButton, self).__init__()
self.parent = parent
self.setFlat(True)
self.setCheckable(True)
self.setFixedSize(80, 60)
self.setText(text)
class NavgationBar(QtWidgets.QFrame):
viewID = "NavgationBar"
@collectView
def __init__(self, buttonIds, parent=None):
super(NavgationBar, self).__init__(parent)
self.parent = parent
self.setObjectName("NavgationBar")
self.buttonIds = buttonIds
self.initData()
self.initUI()
def initData(self):
self.buttons = {}
def initUI(self):
baseHeight = 66
self.setFixedHeight(baseHeight)
mainLayout = QtWidgets.QHBoxLayout()
for buttonId in self.buttonIds:
setattr(self, "%sButton" % buttonId, BaseToolButton(buttonId))
button = getattr(self, "%sButton" % buttonId)
button.setObjectName(buttonId)
mainLayout.addWidget(button)
self.buttons.update({buttonId: button})
mainLayout.addStretch()
mainLayout.setContentsMargins(10, 0, 0, 0)
mainLayout.setSpacing(1)
self.setLayout(mainLayout)
for buttonId, button in self.buttons.items():
button.clicked.connect(self.checkedButton)
def checkedButton(self):
self.sender().setChecked(True)
for buttonId, button in self.buttons.items():
if button is not self.sender():
button.setChecked(False)
| dragondjf/CloudSetuper | setuper desktop app/gui/mainwindow/navgationbar.py | Python | mit | 1,788 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import copy
from optparse import OptionParser
import os
import pickle
import re
import sys
try:
from xml.etree.ElementTree import parse
except ImportError:
from elementtree.ElementTree import parse
# Make sure we're using Babel source, and not some previously installed version
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..'))
from babel import dates, numbers
from babel.localedata import Alias
weekdays = {'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5,
'sun': 6}
try:
any
except NameError:
def any(iterable):
return filter(None, list(iterable))
def _text(elem):
buf = [elem.text or '']
for child in elem:
buf.append(_text(child))
buf.append(elem.tail or '')
return u''.join(filter(None, buf)).strip()
NAME_RE = re.compile(r"^\w+$")
TYPE_ATTR_RE = re.compile(r"^\w+\[@type='(.*?)'\]$")
NAME_MAP = {
'dateFormats': 'date_formats',
'dateTimeFormats': 'datetime_formats',
'eraAbbr': 'abbreviated',
'eraNames': 'wide',
'eraNarrow': 'narrow',
'timeFormats': 'time_formats'
}
def _translate_alias(ctxt, path):
parts = path.split('/')
keys = ctxt[:]
for part in parts:
if part == '..':
keys.pop()
else:
match = TYPE_ATTR_RE.match(part)
if match:
keys.append(match.group(1))
else:
assert NAME_RE.match(part)
keys.append(NAME_MAP.get(part, part))
return keys
def main():
parser = OptionParser(usage='%prog path/to/cldr')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
srcdir = args[0]
destdir = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
'..', 'babel')
sup = parse(os.path.join(srcdir, 'supplemental', 'supplementalData.xml'))
# Import global data from the supplemental files
global_data = {}
territory_zones = global_data.setdefault('territory_zones', {})
zone_aliases = global_data.setdefault('zone_aliases', {})
zone_territories = global_data.setdefault('zone_territories', {})
for elem in sup.findall('//timezoneData/zoneFormatting/zoneItem'):
tzid = elem.attrib['type']
territory_zones.setdefault(elem.attrib['territory'], []).append(tzid)
zone_territories[tzid] = elem.attrib['territory']
if 'aliases' in elem.attrib:
for alias in elem.attrib['aliases'].split():
zone_aliases[alias] = tzid
# Import Metazone mapping
meta_zones = global_data.setdefault('meta_zones', {})
tzsup = parse(os.path.join(srcdir, 'supplemental', 'metazoneInfo.xml'))
for elem in tzsup.findall('//timezone'):
for child in elem.findall('usesMetazone'):
if 'to' not in child.attrib: # FIXME: support old mappings
meta_zones[elem.attrib['type']] = child.attrib['mzone']
outfile = open(os.path.join(destdir, 'global.dat'), 'wb')
try:
pickle.dump(global_data, outfile, 2)
finally:
outfile.close()
# build a territory containment mapping for inheritance
regions = {}
for elem in sup.findall('//territoryContainment/group'):
regions[elem.attrib['type']] = elem.attrib['contains'].split()
# Resolve territory containment
territory_containment = {}
region_items = regions.items()
region_items.sort()
for group, territory_list in region_items:
for territory in territory_list:
containers = territory_containment.setdefault(territory, set([]))
if group in territory_containment:
containers |= territory_containment[group]
containers.add(group)
filenames = os.listdir(os.path.join(srcdir, 'main'))
filenames.remove('root.xml')
filenames.sort(lambda a,b: len(a)-len(b))
filenames.insert(0, 'root.xml')
for filename in filenames:
stem, ext = os.path.splitext(filename)
if ext != '.xml':
continue
print>>sys.stderr, 'Processing input file %r' % filename
tree = parse(os.path.join(srcdir, 'main', filename))
data = {}
language = None
elem = tree.find('//identity/language')
if elem is not None:
language = elem.attrib['type']
print>>sys.stderr, ' Language: %r' % language
territory = None
elem = tree.find('//identity/territory')
if elem is not None:
territory = elem.attrib['type']
else:
territory = '001' # world
print>>sys.stderr, ' Territory: %r' % territory
regions = territory_containment.get(territory, [])
print>>sys.stderr, ' Regions: %r' % regions
# <localeDisplayNames>
territories = data.setdefault('territories', {})
for elem in tree.findall('//territories/territory'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in territories:
continue
territories[elem.attrib['type']] = _text(elem)
languages = data.setdefault('languages', {})
for elem in tree.findall('//languages/language'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in languages:
continue
languages[elem.attrib['type']] = _text(elem)
variants = data.setdefault('variants', {})
for elem in tree.findall('//variants/variant'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in variants:
continue
variants[elem.attrib['type']] = _text(elem)
scripts = data.setdefault('scripts', {})
for elem in tree.findall('//scripts/script'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in scripts:
continue
scripts[elem.attrib['type']] = _text(elem)
# <dates>
week_data = data.setdefault('week_data', {})
supelem = sup.find('//weekData')
for elem in supelem.findall('minDays'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['min_days'] = int(elem.attrib['count'])
for elem in supelem.findall('firstDay'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['first_day'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendStart'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_start'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendEnd'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_end'] = weekdays[elem.attrib['day']]
zone_formats = data.setdefault('zone_formats', {})
for elem in tree.findall('//timeZoneNames/gmtFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['gmt'] = unicode(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('//timeZoneNames/regionFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['region'] = unicode(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('//timeZoneNames/fallbackFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['fallback'] = unicode(elem.text) \
.replace('{0}', '%(0)s').replace('{1}', '%(1)s')
break
time_zones = data.setdefault('time_zones', {})
for elem in tree.findall('//timeZoneNames/zone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = unicode(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = unicode(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = unicode(child.text)
time_zones[elem.attrib['type']] = info
meta_zones = data.setdefault('meta_zones', {})
for elem in tree.findall('//timeZoneNames/metazone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = unicode(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = unicode(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = unicode(child.text)
info['common'] = elem.findtext('commonlyUsed') == 'true'
meta_zones[elem.attrib['type']] = info
for calendar in tree.findall('//calendars/calendar'):
if calendar.attrib['type'] != 'gregorian':
# TODO: support other calendar types
continue
months = data.setdefault('months', {})
for ctxt in calendar.findall('months/monthContext'):
ctxt_type = ctxt.attrib['type']
ctxts = months.setdefault(ctxt_type, {})
for width in ctxt.findall('monthWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'month':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = unicode(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['months', ctxt_type, width_type],
elem.attrib['path'])
)
days = data.setdefault('days', {})
for ctxt in calendar.findall('days/dayContext'):
ctxt_type = ctxt.attrib['type']
ctxts = days.setdefault(ctxt_type, {})
for width in ctxt.findall('dayWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'day':
dtype = weekdays[elem.attrib['type']]
if ('draft' in elem.attrib or 'alt' not in elem.attrib) \
and dtype in widths:
continue
widths[dtype] = unicode(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['days', ctxt_type, width_type],
elem.attrib['path'])
)
quarters = data.setdefault('quarters', {})
for ctxt in calendar.findall('quarters/quarterContext'):
ctxt_type = ctxt.attrib['type']
ctxts = quarters.setdefault(ctxt.attrib['type'], {})
for width in ctxt.findall('quarterWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'quarter':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib['type'])] = unicode(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['quarters', ctxt_type, width_type],
elem.attrib['path'])
)
eras = data.setdefault('eras', {})
for width in calendar.findall('eras/*'):
width_type = NAME_MAP[width.tag]
widths = eras.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'era':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = unicode(elem.text)
elif elem.tag == 'alias':
eras[width_type] = Alias(
_translate_alias(['eras', width_type],
elem.attrib['path'])
)
# AM/PM
periods = data.setdefault('periods', {})
for elem in calendar.findall('am'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.tag in periods:
continue
periods[elem.tag] = unicode(elem.text)
for elem in calendar.findall('pm'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.tag in periods:
continue
periods[elem.tag] = unicode(elem.text)
date_formats = data.setdefault('date_formats', {})
for format in calendar.findall('dateFormats'):
for elem in format.getiterator():
if elem.tag == 'dateFormatLength':
if 'draft' in elem.attrib and \
elem.attrib.get('type') in date_formats:
continue
try:
date_formats[elem.attrib.get('type')] = \
dates.parse_pattern(unicode(elem.findtext('dateFormat/pattern')))
except ValueError, e:
print>>sys.stderr, 'ERROR: %s' % e
elif elem.tag == 'alias':
date_formats = Alias(_translate_alias(
['date_formats'], elem.attrib['path'])
)
time_formats = data.setdefault('time_formats', {})
for format in calendar.findall('timeFormats'):
for elem in format.getiterator():
if elem.tag == 'timeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in time_formats:
continue
try:
time_formats[elem.attrib.get('type')] = \
dates.parse_pattern(unicode(elem.findtext('timeFormat/pattern')))
except ValueError, e:
print>>sys.stderr, 'ERROR: %s' % e
elif elem.tag == 'alias':
time_formats = Alias(_translate_alias(
['time_formats'], elem.attrib['path'])
)
datetime_formats = data.setdefault('datetime_formats', {})
for format in calendar.findall('dateTimeFormats'):
for elem in format.getiterator():
if elem.tag == 'dateTimeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in datetime_formats:
continue
try:
datetime_formats[elem.attrib.get('type')] = \
unicode(elem.findtext('dateTimeFormat/pattern'))
except ValueError, e:
print>>sys.stderr, 'ERROR: %s' % e
elif elem.tag == 'alias':
datetime_formats = Alias(_translate_alias(
['datetime_formats'], elem.attrib['path'])
)
# <numbers>
number_symbols = data.setdefault('number_symbols', {})
for elem in tree.findall('//numbers/symbols/*'):
number_symbols[elem.tag] = unicode(elem.text)
decimal_formats = data.setdefault('decimal_formats', {})
for elem in tree.findall('//decimalFormats/decimalFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in decimal_formats:
continue
pattern = unicode(elem.findtext('decimalFormat/pattern'))
decimal_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
scientific_formats = data.setdefault('scientific_formats', {})
for elem in tree.findall('//scientificFormats/scientificFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in scientific_formats:
continue
pattern = unicode(elem.findtext('scientificFormat/pattern'))
scientific_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
currency_formats = data.setdefault('currency_formats', {})
for elem in tree.findall('//currencyFormats/currencyFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in currency_formats:
continue
pattern = unicode(elem.findtext('currencyFormat/pattern'))
currency_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
percent_formats = data.setdefault('percent_formats', {})
for elem in tree.findall('//percentFormats/percentFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in percent_formats:
continue
pattern = unicode(elem.findtext('percentFormat/pattern'))
percent_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
currency_names = data.setdefault('currency_names', {})
currency_symbols = data.setdefault('currency_symbols', {})
for elem in tree.findall('//currencies/currency'):
code = elem.attrib['type']
# TODO: support plural rules for currency name selection
for name in elem.findall('displayName'):
if ('draft' in name.attrib or 'count' in name.attrib) \
and code in currency_names:
continue
currency_names[code] = unicode(name.text)
# TODO: support choice patterns for currency symbol selection
symbol = elem.find('symbol')
if symbol is not None and 'draft' not in symbol.attrib \
and 'choice' not in symbol.attrib:
currency_symbols[code] = unicode(symbol.text)
outfile = open(os.path.join(destdir, 'localedata', stem + '.dat'), 'wb')
try:
pickle.dump(data, outfile, 2)
finally:
outfile.close()
if __name__ == '__main__':
main()
| mastizada/kuma | vendor/packages/Babel/scripts/import_cldr.py | Python | mpl-2.0 | 20,534 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
import sys
import json
import math
def explode(coords):
"""Explode a GeoJSON geometry's coordinates object and yield coordinate tuples.
As long as the input is conforming, the type of the geometry doesn't matter."""
for e in coords:
if isinstance(e, (float, int, int)):
yield coords
break
else:
for f in explode(e):
yield f
def bbox(f):
x, y = zip(*list(explode(f['geometry']['coordinates'])))
return min(x), min(y), max(x), max(y)
def to_bbox_polygon(bounding_box):
min_x, min_y, max_x, max_y = bounding_box
polygon = {
'type': 'Polygon',
'coordinates': [[
[min_x, min_y],
[min_x, max_y],
[max_x, max_y],
[max_x, min_y],
[min_x, min_y]
]]
}
return polygon
MAX = 18
def process_us_states(geoJSON_file):
with open(geoJSON_file) as gf:
us = json.load(gf)
logger.info(len(us['features']))
name = []
locations = []
cnt = 0
for state in us['features']:
#logger.info(state['properties'])
#min_x, min_y, max_x, max_y = bbox(state)
name.append(state['properties']['NAME'])
locations.append(','.join(['%s'%(x) for x in list(bbox(state))]))
cnt += 1
if (cnt % MAX == 0):
n = 'US_BY_STATE_%d.json'%(math.ceil(cnt/MAX))
with open(n, 'w') as wf:
json.dump({'name': n, 'locations': ','.join(locations)}, wf)
name = []
locations = []
#logger.info(locations)
#logger.info(json.dumps(to_bbox_polygon(bbox(state))))
else:
if (len(name) > 0):
n = 'US_BY_STATE_%d.json'%(math.ceil(cnt/MAX))
with open(n, 'w') as wf:
json.dump({'name': n, 'locations': ','.join(locations)}, wf)
def process_us_counties(geoJSON_file):
with open(geoJSON_file, 'rb') as gf:
us = json.loads(gf.read().decode('utf-8','ignore'))
logger.info(len(us['features']))
name = []
locations = []
cnt = 0
florida = []
for county in us['features']:
if (county['properties']['STATE'] == '12'):
florida.append(county)
logger.info(len(florida))
def center(geolocations):
"""
Provide a relatively accurate center lat, lon returned as a list pair, given
a list of list pairs.
ex: in: geolocations = ((lat1,lon1), (lat2,lon2),)
out: (center_lat, center_lon)
"""
from math import cos, sin, atan2, sqrt, pi
x = 0
y = 0
z = 0
for lat, lon in geolocations:
lat = float(lat) * pi / 180
lon = float(lon) * pi / 180
x += cos(lat) * cos(lon)
y += cos(lat) * sin(lon)
z += sin(lat)
x = float(x / len(geolocations))
y = float(y / len(geolocations))
z = float(z / len(geolocations))
return (atan2(z, sqrt(x * x + y * y)) * 180 / pi, atan2(y, x) * 180 / pi)
def radius(geolocations):
from geopy.distance import vincenty
point_1, point_2 = geolocations
return vincenty(point_1, point_2).miles/float(2)
#x is lon, y is lat NOTE
def find_county_by_name(name, state, geoJSON_file):
import pandas as pd
with open(geoJSON_file, 'rb') as gf:
us = json.loads(gf.read().decode('utf-8','ignore'))
for county in us['features']:
if (county['properties']['STATE'] == state and name.lower() in county['properties']['NAME'].strip().lower()):
min_x, min_y, max_x, max_y = bbox(county)
logger.info(bbox(county))
#logger.info(county)
center_y, center_x = center(((min_y, min_x), (max_y, max_x),))
r = radius(((min_y, min_x), (max_y, max_x)))
logger.info(county)
logger.info('%s,%s,%sm'%(center_x, center_y, r))
quit()
if __name__=="__main__":
logger.info(sys.version)
process_us_states('gz_2010_us_040_00_20m.json')
#process_us_counties('gz_2010_us_050_00_20m.json')
# California 06
# Texas 48
# New Mexico 35
# Arizona 04
#find_county_by_name('Cochise', '04', 'gz_2010_us_050_00_20m.json')
| bianjiang/tweetf0rm | test_data/geo/process_geojson.py | Python | mit | 4,591 |
import time
from binascii import hexlify, unhexlify
from copy import copy
from tempfile import TemporaryDirectory
import pytest
from ledger.compact_merkle_tree import CompactMerkleTree
from ledger.merkle_verifier import MerkleVerifier
from ledger.stores.hash_store import HashStore
from ledger.tree_hasher import TreeHasher
from ledger.stores.memory_hash_store import MemoryHashStore
from ledger.stores.file_hash_store import FileHashStore
from ledger.test.helper import checkConsistency
from ledger.util import STH
"""
1: 221
[221]
/
221
2: e8b
[e8b]
/ \
221 fa6
3: e8b, 906
fe6
/ \
[e8b] [906]
/ \
221 fa6
4: 4c4
[4c4]
/ \
e8b 9c7
/ \ / \
221 fa6 906 11e
5: 4c4, 533
e10
/ \
[4c4] [533]
/ \
e8b 9c7
/ \ / \
221 fa6 906 11e
6: 4c4, 2b1
ecc
/ \
[4c4] [2b1]
/ \ / \
e8b 9c7 533 3bf
/ \ / \
221 fa6 906 11e
7: 4c4, 2b1, 797
74f
/ \
[4c4] 429
/ \ / \
e8b 9c7 [2b1] [797]
/ \ / \ / \
221 fa6 906 11e 533 3bf
8: 50f
[50f]
/ \
4c4 fed
/ \ / \
e8b 9c7 2b1 800
/ \ / \ / \ / \
221 fa6 906 11e 533 3bf 797 754
"""
"""
hexlify(c(
c(
c(
l(d[0]), l(d[1])
),
c(
l(d[2]), l(d[3])
)
),
c(
c(
l(d[4]),l(d[5])
),
l(d[6])
)
))
"""
@pytest.yield_fixture(scope="module", params=['File', 'Memory'])
def hashStore(request, tdir):
if request.param == 'File':
fhs = FileHashStore(tdir)
yield fhs
elif request.param == 'Memory':
yield MemoryHashStore()
@pytest.fixture()
def hasher():
return TreeHasher()
@pytest.fixture()
def verifier(hasher):
return MerkleVerifier(hasher=hasher)
@pytest.fixture()
def hasherAndTree(hasher):
tdir = TemporaryDirectory().name
store = FileHashStore(tdir)
m = CompactMerkleTree(hasher=hasher, hashStore=store)
return hasher, m
@pytest.fixture()
def addTxns(hasherAndTree):
h, m = hasherAndTree
txn_count = 1000
auditPaths = []
for d in range(txn_count):
serNo = d+1
data = str(serNo).encode()
auditPaths.append([hexlify(h) for h in m.append(data)])
return txn_count, auditPaths
@pytest.fixture()
def storeHashes(hasherAndTree, addTxns, hashStore):
h, m = hasherAndTree
mhs = m.hashStore
return mhs
'''
14
pwr = 3
c = 8
14,8
pwr = 2
c = 4 + 8 = 12
12,2
14, 12
pwr = 1
c = 2 + 12 = 14
14,1
'''
def show(h, m, data):
print("-" * 60)
print("appended : {}".format(data))
print("hash : {}".format(hexlify(h.hash_leaf(data))[:3]))
print("tree size : {}".format(m.tree_size))
print("root hash : {}".format(m.root_hash_hex[:3]))
for i, hash in enumerate(m.hashes):
lead = "Hashes" if i == 0 else " "
print("{} : {}".format(lead, hexlify(hash)[:3]))
def testCompactMerkleTree2(hasherAndTree, verifier):
h, m = hasherAndTree
v = verifier
for serNo in range(1, 4):
data = hexlify(str(serNo).encode())
m.append(data)
def testCompactMerkleTree(hasherAndTree, verifier):
h, m = hasherAndTree
printEvery = 1000
count = 1000
for d in range(count):
data = str(d + 1).encode()
data_hex = hexlify(data)
audit_path = m.append(data)
audit_path_hex = [hexlify(h) for h in audit_path]
incl_proof = m.inclusion_proof(d, d+1)
assert audit_path == incl_proof
if d % printEvery == 0:
show(h, m, data_hex)
print("audit path is {}".format(audit_path_hex))
print("audit path length is {}".format(verifier.audit_path_length(
d, d+1)))
print("audit path calculated length is {}".format(
len(audit_path)))
calculated_root_hash = verifier._calculate_root_hash_from_audit_path(
h.hash_leaf(data), d, audit_path[:], d+1)
if d % printEvery == 0:
print("calculated root hash is {}".format(calculated_root_hash))
sth = STH(d+1, m.root_hash)
verifier.verify_leaf_inclusion(data, d, audit_path, sth)
checkConsistency(m, verifier=verifier)
for d in range(1, count):
verifier.verify_tree_consistency(d, d + 1,
m.merkle_tree_hash(0, d),
m.merkle_tree_hash(0, d + 1),
m.consistency_proof(d, d + 1))
newTree = CompactMerkleTree(hasher=h)
m.save(newTree)
assert m.root_hash == newTree.root_hash
assert m.hashes == newTree.hashes
newTree = CompactMerkleTree(hasher=h)
newTree.load(m)
assert m.root_hash == newTree.root_hash
assert m.hashes == newTree.hashes
newTree = copy(m)
assert m.root_hash == newTree.root_hash
assert m.hashes == newTree.hashes
def testEfficientHashStore(hasherAndTree, addTxns, storeHashes):
h, m = hasherAndTree
mhs = storeHashes # type: HashStore
txnCount, auditPaths = addTxns
for leaf_ptr in range(1, txnCount + 1):
print("leaf hash: {}".format(hexlify(mhs.readLeaf(leaf_ptr))))
# make sure that there are not more leafs than we expect
try:
mhs.readLeaf(txnCount + 1)
assert False
except Exception as ex:
assert isinstance(ex, IndexError)
node_ptr = 0
while True:
node_ptr += 1
try:
# start, height, node_hash = mhs.readNode(node_ptr)
node_hash = mhs.readNode(node_ptr)
except IndexError:
break
print("node hash: {}".format(hexlify(node_hash)))
# TODO: The api has changed for FileHashStore and OrientDBStore,
# HashStore should implement methods for calculating start and
# height of a node
# end = start - pow(2, height) + 1
# print("node hash start-end: {}-{}".format(start, end))
# print("node hash height: {}".format(height))
# print("node hash end: {}".format(end)s)
# _, _, nhByTree = mhs.readNodeByTree(start, height)
# assert nhByTree == node_hash
def testLocate(hasherAndTree, addTxns, storeHashes):
h, m = hasherAndTree
mhs = storeHashes
txnCount, auditPaths = addTxns
verifier = MerkleVerifier()
startingTime = time.perf_counter()
for d in range(50):
print()
pos = d+1
print("Audit Path for Serial No: {}".format(pos))
leafs, nodes = mhs.getPath(pos)
calculatedAuditPath = []
for i, leaf_pos in enumerate(leafs):
hexLeafData = hexlify(mhs.readLeaf(leaf_pos))
print("leaf: {}".format(hexLeafData))
calculatedAuditPath.append(hexLeafData)
for node_pos in nodes:
node = mhs.readNode(node_pos)
hexNodeData = hexlify(node)
print("node: {}".format(hexNodeData))
calculatedAuditPath.append(hexNodeData)
print("{} -> leafs: {}, nodes: {}".format(pos, leafs, nodes))
print("Audit path built using formula {}".format(calculatedAuditPath))
print("Audit path received while appending leaf {}".format(auditPaths[d]))
# Testing equality of audit path calculated using formula and audit path
# received while inserting leaf into the tree
assert calculatedAuditPath == auditPaths[d]
auditPathLength = verifier.audit_path_length(d, d+1)
assert auditPathLength == len(calculatedAuditPath)
# Testing root hash generation
leafHash = storeHashes.readLeaf(d + 1)
rootHashFrmCalc = hexlify(verifier._calculate_root_hash_from_audit_path(
leafHash, d, [unhexlify(h) for h in calculatedAuditPath], d+1))
rootHash = hexlify(verifier._calculate_root_hash_from_audit_path(
leafHash, d, [unhexlify(h) for h in auditPaths[d]], d + 1))
assert rootHash == rootHashFrmCalc
print("Root hash from audit path built using formula {}".
format(calculatedAuditPath))
print("Root hash from audit path received while appending leaf {}".
format(auditPaths[d]))
print("Leaf hash length is {}".format(len(leafHash)))
print("Root hash length is {}".format(len(rootHash)))
# Testing verification, do not need `assert` since
# `verify_leaf_hash_inclusion` will throw an exception
sthFrmCalc = STH(d + 1, unhexlify(rootHashFrmCalc))
verifier.verify_leaf_hash_inclusion(
leafHash, d,
[unhexlify(h) for h in calculatedAuditPath],
sthFrmCalc)
sth = STH(d + 1, unhexlify(rootHash))
verifier.verify_leaf_hash_inclusion(
leafHash, d,
[unhexlify(h) for h in auditPaths[d]], sth)
print(time.perf_counter()-startingTime)
| evernym/ledger | ledger/test/test_merkle_proof.py | Python | apache-2.0 | 9,276 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_policy
short_description: Manage IAM policies for users, groups, and roles
description:
- Allows uploading or removing IAM policies for IAM users, groups or roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
iam_name:
description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true
policy_name:
description:
- The name label for the policy to create or remove.
required: true
policy_document:
description:
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
required: false
policy_json:
description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document),
see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false
state:
description:
- Whether to create or delete the IAM policy.
required: true
default: null
choices: [ "present", "absent"]
skip_duplicates:
description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with
the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false
default: "/"
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a policy with the name of 'Admin' to the group 'administrators'
tasks:
- name: Assign a policy called Admin to the administrators group
iam_policy:
iam_type: group
iam_name: administrators
policy_name: Admin
state: present
policy_document: admin_policy.json
# Advanced example, create two new groups and add a READ-ONLY policy to both
# groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name: Apply READ-ONLY policy to new groups that have been recently created
iam_policy:
iam_type: group
iam_name: "{{ item.created_group.group_name }}"
policy_name: "READ-ONLY"
policy_document: readonlypolicy.json
state: present
with_items: "{{ new_groups.results }}"
# Create a new S3 policy with prefix per user
tasks:
- name: Create S3 policy from template
iam_policy:
iam_type: user
iam_name: "{{ item.user }}"
policy_name: "s3_limited_access_{{ item.prefix }}"
state: present
policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
with_items:
- user: s3_user
prefix: s3_user_prefix
'''
import json
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info, boto_exception
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import urllib
def user_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.parse.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_user_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def role_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as e:
if e.error_code == "NoSuchEntity":
# Role doesn't exist so it's safe to assume the policy doesn't either
module.exit_json(changed=False, msg="No such role, policy will be skipped.")
else:
module.fail_json(msg=e.message)
try:
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_role_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
else:
module.fail_json(msg=err.message)
updated_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
msg=''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
if urllib.parse.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_group_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies, msg
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
state=dict(
default=None, required=True, choices=['present', 'absent']),
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
policy_json=dict(type='json', default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
state = module.params.get('state')
name = module.params.get('iam_name')
policy_name = module.params.get('policy_name')
skip = module.params.get('skip_duplicates')
if module.params.get('policy_document') is not None and module.params.get('policy_json') is not None:
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
if module.params.get('policy_document') is not None:
with open(module.params.get('policy_document'), 'r') as json_data:
pdoc = json.dumps(json.load(json_data))
json_data.close()
elif module.params.get('policy_json') is not None:
pdoc = module.params.get('policy_json')
# if its a string, assume it is already JSON
if not isinstance(pdoc, string_types):
try:
pdoc = json.dumps(pdoc)
except Exception as e:
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
else:
pdoc=None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
changed = False
if iam_type == 'user':
changed, user_name, current_policies = user_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, user_name=name, policies=current_policies)
elif iam_type == 'role':
changed, role_name, current_policies = role_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
if __name__ == '__main__':
main()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/iam_policy.py | Python | bsd-3-clause | 14,055 |
#-------------------------------------------------------------------------------
#License GPL v3.0
#Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
#Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
#Universidad del Pais Vasco UPV/EHU
#Use this at your own risk!
#-------------------------------------------------------------------------------
#from IPython.core.debugger import Tracer; debug_here = Tracer()
import os
import sys
import numpy as np
import nibabel as nib
import aizkolari_utils as au
def get_stats_fnames (groupnames, outdir=''):
if np.ndim(groupnames) == 0:
groupnames = [groupnames]
if outdir:
outdir += outdir + os.path.sep
mnames = [au.sums_str(), au.mean_str(), au.var_str(), au.std_str()]
ngroups = len(groupnames)
statfnames = np.zeros ([ngroups, len(mnames)], dtype=np.dtype('a2000'))
for g in np.arange(ngroups):
group = groupnames[g]
for m in np.arange(len(mnames)):
measure = mnames[m]
statfnames[g,m] = outdir + group + '_' + measure + au.ext_str()
return [statfnames, mnames]
#-------------------------------------------------------------------------------
def merge_stats_slices (datadir, group):
slice_str = au.slice_str()
groupfregex = group + 's_' + slice_str + '_????' + '.'
#This is a 4D volume with all subjects, it can be a big file, so I'm not creating it
#merge_slices (datadir, groupfregex, group + 's')
au.imrm(datadir + os.path.sep + groupfregex)
[statfnames, mnames] = get_stats_fnames (group, outdir='')
statfnames = statfnames[0]
out = []
for i in np.arange(len(statfnames)):
fname = statfnames[i]
m = mnames[i]
regex = group + 's_' + slice_str + '_????' + '_' + m
o = merge_slices (datadir, regex , fname, datadir, cleanup=False)
au.imrm(datadir + os.path.sep + regex)
out.append(o)
return out
#-------------------------------------------------------------------------------
def merge_slices (datadir, fileregex, outfname, outdir='', cleanup=True):
if not outdir:
outdir = datadir
au.log.info ('Merging the ' + fileregex + ' files in ' + outdir)
fregex = datadir + os.path.sep + fileregex
imglob = ''
imglob = au.exec_comm(['imglob', fregex])
imglob = imglob.strip()
outdata = ''
if imglob:
if os.path.isabs (outfname): outdata = outfname
else: outdata = outdir + os.path.sep + outfname
os.system('fslmerge -z ' + outdata + ' ' + imglob)
if cleanup:
au.imrm(fregex)
else:
au.log.error ('aizkolari_postproc: Error: could not find ' + fregex + ' in ' + datadir)
return outdata
#-------------------------------------------------------------------------------
def group_stats (datadir, groupname, groupsize, outdir=''):
lst = os.listdir(datadir)
n = au.count_match(lst, groupname + 's_' + au.slice_regex() + au.ext_str())
if not outdir:
outdir = datadir
au.log.info ('Calculating stats from group ' + groupname + ' in ' + outdir)
for i in range(n):
slino = au.zeropad(i)
dataf = datadir + os.path.sep + groupname + 's_' + au.slice_str() + '_' + slino + au.ext_str()
volstats (dataf, groupname, groupsize, outdir)
#-------------------------------------------------------------------------------
def volstats (invol, groupname, groupsize, outdir=''):
slicesdir = os.path.dirname(invol)
if not outdir:
outdir = slicesdir
base = os.path.basename(au.remove_ext(invol))
outmeanf = outdir + os.path.sep + base + '_' + au.mean_str()
outvarf = outdir + os.path.sep + base + '_' + au.var_str()
outstdf = outdir + os.path.sep + base + '_' + au.std_str()
outsumsf = outdir + os.path.sep + base + '_' + au.sums_str()
vol = nib.load(invol).get_data()
aff = nib.load(invol).get_affine()
if not os.path.exists(outmeanf):
mean = np.mean(vol, axis=3)
au.save_nibabel(outmeanf, mean, aff)
if not os.path.exists(outstdf):
std = np.std(vol, axis=3)
au.save_nibabel(outstdf, std, aff)
if not os.path.exists(outvarf):
var = np.var(vol, axis=3)
au.save_nibabel(outvarf, var, aff)
if not os.path.exists(outsumsf):
sums = np.sum(vol, axis=3)
au.save_nibabel(outsumsf, sums, aff)
return [outsumsf,outmeanf,outvarf,outstdf]
#-------------------------------------------------------------------------------
def remove_subject_from_stats (meanfname, varfname, samplesize, subjvolfname, newmeanfname, newvarfname, newstdfname=''):
meanfname = au.add_extension_if_needed(meanfname, au.ext_str())
varfname = au.add_extension_if_needed(varfname, au.ext_str())
subjvolfname = au.add_extension_if_needed(subjvolfname, au.ext_str())
newmeanfname = au.add_extension_if_needed(newmeanfname, au.ext_str())
newvarfname = au.add_extension_if_needed(newvarfname, au.ext_str())
if newstdfname:
newstdfname = au.add_extension_if_needed(newstdfname, au.ext_str())
#load data
n = samplesize
meanv = nib.load(meanfname).get_data()
varv = nib.load( varfname).get_data()
subjv = nib.load(subjvolfname).get_data()
aff = nib.load(meanfname).get_affine()
#calculate new mean: ((oldmean*N) - x)/(N-1)
newmean = meanv.copy()
newmean = ((newmean * n) - subjv)/(n-1)
newmean = np.nan_to_num(newmean)
#calculate new variance:
# oldvar = (n/(n-1)) * (sumsquare/n - oldmu^2)
# s = ((oldvar * (n/(n-1)) ) + oldmu^2) * n
# newvar = ((n-1)/(n-2)) * (((s - x^2)/(n-1)) - newmu^2)
s = varv.copy()
s = ((s * (n/(n-1)) ) + np.square(meanv)) * n
newvar = ((n-1)/(n-2)) * (((s - np.square(subjv))/(n-1)) - np.square(newmean))
newvar = np.nan_to_num(newvar)
#save nifti files
au.save_nibabel (newmeanfname, newmean, aff)
au.save_nibabel (newvarfname , newvar, aff)
#calculate new standard deviation: sqrt(newvar)
if newstdfname:
newstd = np.sqrt(newvar)
newstd = np.nan_to_num(newstd)
au.save_nibabel (newstdfname, newstd, aff)
#(distance_func, mdir, classnames, gsize, chkf, foldno, expname, absval, leave, exsubf, exclas)
#-------------------------------------------------------------------------------
def group_distance (measure_function, datadir, groups, groupsizes, chkf, absolute=False, outdir='', foldno='', expname='', exclude_idx=-1, exclude_subj='', exclude_subjclass=''):
olddir = os.getcwd()
if not outdir:
outdir = datadir
ngroups = len(groups)
#matrix of strings of 2000 characters maximum, to save filepaths
gfnames = np.zeros ([ngroups,3], dtype=np.dtype('a2000'))
subject_excluded = False
for g1 in range(ngroups):
g1name = groups[g1]
#mean1fname
gfnames[g1,0] = datadir + os.path.sep + g1name + '_' + au.mean_str()
#var1fname
gfnames[g1,1] = datadir + os.path.sep + g1name + '_' + au.var_str()
#std1fname
gfnames[g1,2] = datadir + os.path.sep + g1name + '_' + au.std_str()
for g2 in range(g1+1, ngroups):
g2name = groups[g2]
gfnames[g2,0] = datadir + os.path.sep + g2name + '_' + au.mean_str()
gfnames[g2,1] = datadir + os.path.sep + g2name + '_' + au.var_str()
gfnames[g2,2] = datadir + os.path.sep + g2name + '_' + au.std_str()
experiment = g1name + '_vs_' + g2name
#check if exclude_subjclass is any of both current groups
eg = -1
if exclude_idx > -1:
if exclude_subjclass == g1name: eg = g2
elif exclude_subjclass == g2name: eg = g1
step = au.measure_str() + ' ' + measure_function.func_name + ' ' + experiment + ' ' + datadir
#remove subject from stats
if eg > -1:
exclude_str = '_' + au.excluded_str() + str(exclude_idx)
step += exclude_str
experiment += exclude_str
if not au.is_done(chkf, step):
if not subject_excluded:
newmeanfname = gfnames[eg,0] + exclude_str
newvarfname = gfnames[eg,1] + exclude_str
newstdfname = gfnames[eg,2] + exclude_str
rstep = au.remove_str() + ' ' + au.subject_str() + ' ' + str(exclude_subj) + ' ' + au.fromstats_str() + ' ' + datadir
if not au.is_done(chkf, rstep):
#(meanfname, varfname, samplesize, subjvolfname, newmeanfname, newvarfname, newstdfname='')
remove_subject_from_stats (gfnames[eg,0], gfnames[eg,1], groupsizes[eg][1], exclude_subj, newmeanfname, newvarfname, newstdfname)
au.checklist_add (chkf, rstep)
gfnames[eg,0] += exclude_str
gfnames[eg,1] += exclude_str
gfnames[eg,2] += exclude_str
groupsizes[eg][1] -= 1
subject_excluded = True
#calculating distance
if not au.is_done(chkf, step):
mean1fname = au.add_extension_if_needed (gfnames[g1,0], au.ext_str())
mean2fname = au.add_extension_if_needed (gfnames[g2,0], au.ext_str())
var1fname = au.add_extension_if_needed (gfnames[g1,1], au.ext_str())
var2fname = au.add_extension_if_needed (gfnames[g2,1], au.ext_str())
std1fname = au.add_extension_if_needed (gfnames[g1,2], au.ext_str())
std2fname = au.add_extension_if_needed (gfnames[g2,2], au.ext_str())
outfname = measure_function (mean1fname, mean2fname, var1fname, var2fname, std1fname, std2fname, groupsizes[g1][1], groupsizes[g2][1], experiment, outdir, exclude_idx)
if absolute:
change_to_absolute_values (outfname)
au.checklist_add (chkf, step)
return outfname
#-------------------------------------------------------------------------------
def change_to_absolute_values (niifname, outfname=''):
niifname = au.add_extension_if_needed(niifname, au.ext_str())
if not outfname:
outfname = niifname
try:
#load data
vol = nib.load(niifname).get_data()
aff = nib.load(niifname).get_affine()
vol = np.abs(vol)
#save nifti file
au.save_nibabel (outfname, vol, aff)
except:
au.log.error ("Change_to_absolute_values:: Unexpected error: ", sys.exc_info()[0])
raise
| alexsavio/aizkolari | aizkolari_postproc.py | Python | bsd-3-clause | 10,748 |
from datetime import timedelta, datetime
from django.core.management.base import BaseCommand, CommandError
from dasdocc.aggregator.models import Feed
try: from settings import TRASH_EXPIRATION
except ImportError: from dasdocc.aggregator.aggregator_settings import TRASH_EXPIRATION
class Command(BaseCommand):
help = 'trashes all invalid feeds'
def handle(self, *args, **options):
delta = timedelta(seconds=TRASH_EXPIRATION)
treshold = datetime.now() - delta
Feed.trashed.filter(trashed_at__lte=treshold).delete()
| JohnRandom/django-aggregator | dasdocc/aggregator/management/commands/trashfeeds.py | Python | bsd-3-clause | 549 |
from __future__ import print_function
import filecmp
import glob
import itertools
import os
import sys
import sysconfig
import tempfile
import unittest
project_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
src_dir = os.path.join(project_dir, 'python')
test_dir = os.path.join(project_dir, 'tests')
python_exe = sys.executable or 'python'
bro_path = os.path.join(src_dir, 'bro.py')
BRO_ARGS = [python_exe, bro_path]
# Get the platform/version-specific build folder.
# By default, the distutils build base is in the same location as setup.py.
platform_lib_name = 'lib.{platform}-{version[0]}.{version[1]}'.format(
platform=sysconfig.get_platform(), version=sys.version_info)
build_dir = os.path.join(project_dir, 'bin', platform_lib_name)
# Prepend the build folder to sys.path and the PYTHONPATH environment variable.
if build_dir not in sys.path:
sys.path.insert(0, build_dir)
TEST_ENV = os.environ.copy()
if 'PYTHONPATH' not in TEST_ENV:
TEST_ENV['PYTHONPATH'] = build_dir
else:
TEST_ENV['PYTHONPATH'] = build_dir + os.pathsep + TEST_ENV['PYTHONPATH']
TESTDATA_DIR = os.path.join(test_dir, 'testdata')
TESTDATA_FILES = [
'empty', # Empty file
'10x10y', # Small text
'alice29.txt', # Large text
'random_org_10k.bin', # Small data
'mapsdatazrh', # Large data
]
TESTDATA_PATHS = [os.path.join(TESTDATA_DIR, f) for f in TESTDATA_FILES]
TESTDATA_PATHS_FOR_DECOMPRESSION = glob.glob(
os.path.join(TESTDATA_DIR, '*.compressed'))
TEMP_DIR = tempfile.mkdtemp()
def get_temp_compressed_name(filename):
return os.path.join(TEMP_DIR, os.path.basename(filename + '.bro'))
def get_temp_uncompressed_name(filename):
return os.path.join(TEMP_DIR, os.path.basename(filename + '.unbro'))
def bind_method_args(method, *args, **kwargs):
return lambda self: method(self, *args, **kwargs)
def generate_test_methods(test_case_class,
for_decompression=False,
variants=None):
# Add test methods for each test data file. This makes identifying problems
# with specific compression scenarios easier.
if for_decompression:
paths = TESTDATA_PATHS_FOR_DECOMPRESSION
else:
paths = TESTDATA_PATHS
opts = []
if variants:
opts_list = []
for k, v in variants.items():
opts_list.append([r for r in itertools.product([k], v)])
for o in itertools.product(*opts_list):
opts_name = '_'.join([str(i) for i in itertools.chain(*o)])
opts_dict = dict(o)
opts.append([opts_name, opts_dict])
else:
opts.append(['', {}])
for method in [m for m in dir(test_case_class) if m.startswith('_test')]:
for testdata in paths:
for (opts_name, opts_dict) in opts:
f = os.path.splitext(os.path.basename(testdata))[0]
name = 'test_{method}_{options}_{file}'.format(
method=method, options=opts_name, file=f)
func = bind_method_args(
getattr(test_case_class, method), testdata, **opts_dict)
setattr(test_case_class, name, func)
class TestCase(unittest.TestCase):
def tearDown(self):
for f in TESTDATA_PATHS:
try:
os.unlink(get_temp_compressed_name(f))
except OSError:
pass
try:
os.unlink(get_temp_uncompressed_name(f))
except OSError:
pass
def assertFilesMatch(self, first, second):
self.assertTrue(
filecmp.cmp(first, second, shallow=False),
'File {} differs from {}'.format(first, second))
| youtube/cobalt | third_party/brotli/python/tests/_test_utils.py | Python | bsd-3-clause | 3,695 |
import os
import glob
import shutil
from nose.tools import (assert_equal, assert_is_not_none)
from qipipe.staging.map_ctp import CTPPatientIdMap
from ...helpers.logging import logger
COLLECTION = 'Sarcoma'
"""The test collection."""
SUBJECTS = ["Sarcoma%03d" % i for i in range(8, 12)]
"""The test subjects."""
PAT = "ptid/(Sarcoma\d{3})\s*=\s*QIN-\w+-\d{2}-(\d{4})"
"""The CTP map pattern specified by the QIN TCIA curator."""
class TestMapCTP(object):
"""Map CTP unit tests."""
def test_map_ctp(self):
logger(__name__).debug("Testing Map CTP on %s..." % SUBJECTS)
ctp_map = CTPPatientIdMap()
ctp_map.add_subjects(COLLECTION, *SUBJECTS)
for sbj in SUBJECTS:
ctp_id = ctp_map.get(sbj)
assert_is_not_none(ctp_id, "Subject was not mapped: %s" % sbj)
qin_nbr = int(sbj[-2:])
ctp_nbr = int(ctp_id[-4:])
assert_equal(ctp_nbr, qin_nbr, "Patient number incorrect; expected:"
" %d found: %d" % (qin_nbr, ctp_nbr))
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
| ohsu-qin/qipipe | test/unit/staging/test_map_ctp.py | Python | bsd-2-clause | 1,122 |
from database.models.ApiUser import ApiUser
from flask_mail import Message
from flask_app.flask_app import config, mail
from flask_app.flask_app import db
from flask import Blueprint, request, abort, Response, render_template, jsonify
import json
import datetime
from utils.email_utils import EmailValidator
security_bp = Blueprint('security', __name__)
@security_bp.route("/register", methods=['POST'])
def register():
email = json.loads(request.data)['email']
email_validator = EmailValidator()
email_validator.validate(email)
# check for existing user
existing_user = ApiUser.query.filter_by(email=email).all()
if len(existing_user):
abort(409)
user = ApiUser(email)
db.session.add(user)
if email_validator.is_valid:
msg = Message(subject="StarTrek Corpora registration",
sender=config.MAIL_OPTIONS['MAIL_USERNAME'],
recipients=[email])
msg.html = render_template("registration.html", api_key=user.key)
try:
mail.send(msg)
db.session.commit()
return Response(status=201, mimetype='application/json')
except:
abort(500)
else:
abort(409)
@security_bp.route("/handshake", methods=['POST'])
def handshake():
passed_key = request.headers.get('Authorization')
if passed_key:
current_user = ApiUser.query.filter_by(key=passed_key).first()
if current_user:
current_user.confirmed = current_user.confirmed or True
db.session.commit()
return jsonify({
"timestamps": datetime.datetime.now(),
"id": current_user.id,
"response": {
"apiKey": current_user.key,
"associatedEmail": current_user.email},
"error": None
}
)
return Response(response=jsonify({
"timestamps": datetime.datetime.now(),
"id": None,
"response": {
"apiKey": None,
"associatedEmail": None},
"error": {
"code": "NoSuchUserError",
"message": "The key you sent was not recognized"
}
}
), status=404, mimetype='application/json')
abort(401)
| MaximeGir/StarTrekCorpora | api/blueprints/security_api_bp.py | Python | mit | 2,329 |
#-*- coding: utf-8 -*-
"""OAuth 2.0 Django Models"""
import time
from hashlib import sha512
from uuid import uuid4
from django.db import models
from django.contrib.auth.models import User
from .consts import CLIENT_KEY_LENGTH, CLIENT_SECRET_LENGTH
from .consts import SCOPE_LENGTH
from .consts import ACCESS_TOKEN_LENGTH, REFRESH_TOKEN_LENGTH
from .consts import ACCESS_TOKEN_EXPIRATION, MAC_KEY_LENGTH, REFRESHABLE
from .consts import CODE_KEY_LENGTH, CODE_EXPIRATION
from djangotoolbox.fields import ListField
class TimestampGenerator(object):
"""Callable Timestamp Generator that returns a UNIX time integer.
**Kwargs:**
* *seconds:* A integer indicating how many seconds in the future the
timestamp should be. *Default 0*
*Returns int*
"""
def __init__(self, seconds=0):
self.seconds = seconds
def __call__(self):
return int(time.time()) + self.seconds
class KeyGenerator(object):
"""Callable Key Generator that returns a random keystring.
**Args:**
* *length:* A integer indicating how long the key should be.
*Returns str*
"""
def __init__(self, length):
self.length = length
def __call__(self):
return sha512(uuid4().hex).hexdigest()[0:self.length]
class Client(models.Model):
"""Stores client authentication data.
**Args:**
* *name:* A string representing the client name.
* *user:* A django.contrib.auth.models.User object representing the client
owner.
**Kwargs:**
* *description:* A string representing the client description.
*Default None*
* *key:* A string representing the client key. *Default 30 character
random string*
* *secret:* A string representing the client secret. *Default 30 character
random string*
* *redirect_uri:* A string representing the client redirect_uri.
*Default None*
"""
name = models.CharField(max_length=256)
user = models.ForeignKey(User)
description = models.TextField(null=True, blank=True)
key = models.CharField(
unique=True,
max_length=CLIENT_KEY_LENGTH,
default=KeyGenerator(CLIENT_KEY_LENGTH),
db_index=True)
secret = models.CharField(
unique=True,
max_length=CLIENT_SECRET_LENGTH,
default=KeyGenerator(CLIENT_SECRET_LENGTH))
redirect_uri = models.URLField(null=True)
class AccessRange(models.Model):
"""Stores access range data, also known as scope.
**Args:**
* *key:* A string representing the access range scope. Used in access
token requests.
**Kwargs:**
* *description:* A string representing the access range description.
*Default None*
"""
key = models.CharField(unique=True, max_length=SCOPE_LENGTH, db_index=True)
description = models.TextField(blank=True)
class AccessToken(models.Model):
"""Stores access token data.
**Args:**
* *client:* A oauth2app.models.Client object
* *user:* A django.contrib.auth.models.User object
**Kwargs:**
* *token:* A string representing the access key token. *Default 10
character random string*
* *refresh_token:* A string representing the access key token. *Default 10
character random string*
* *mac_key:* A string representing the MAC key. *Default None*
* *expire:* A positive integer timestamp representing the access token's
expiration time.
* *scope:* A list of oauth2app.models.AccessRange objects. *Default None*
* *refreshable:* A boolean that indicates whether this access token is
refreshable. *Default False*
"""
client = models.ForeignKey(Client)
user = models.ForeignKey(User)
token = models.CharField(
unique=True,
max_length=ACCESS_TOKEN_LENGTH,
default=KeyGenerator(ACCESS_TOKEN_LENGTH),
db_index=True)
refresh_token = models.CharField(
unique=True,
blank=True,
null=True,
max_length=REFRESH_TOKEN_LENGTH,
default=KeyGenerator(REFRESH_TOKEN_LENGTH),
db_index=True)
mac_key = models.CharField(
blank=True,
null=True,
max_length=MAC_KEY_LENGTH,
default=None)
issue = models.PositiveIntegerField(
editable=False,
default=TimestampGenerator())
expire = models.PositiveIntegerField(
default=TimestampGenerator(ACCESS_TOKEN_EXPIRATION))
scope = ListField()
refreshable = models.BooleanField(default=REFRESHABLE)
class Code(models.Model):
"""Stores authorization code data.
**Args:**
* *client:* A oauth2app.models.Client object
* *user:* A django.contrib.auth.models.User object
**Kwargs:**
* *key:* A string representing the authorization code. *Default 30
character random string*
* *expire:* A positive integer timestamp representing the access token's
expiration time.
* *redirect_uri:* A string representing the redirect_uri provided by the
requesting client when the code was issued. *Default None*
* *scope:* A list of oauth2app.models.AccessRange objects. *Default None*
"""
client = models.ForeignKey(Client)
user = models.ForeignKey(User)
key = models.CharField(
unique=True,
max_length=CODE_KEY_LENGTH,
default=KeyGenerator(CODE_KEY_LENGTH),
db_index=True)
issue = models.PositiveIntegerField(
editable=False,
default=TimestampGenerator())
expire = models.PositiveIntegerField(
default=TimestampGenerator(CODE_EXPIRATION))
redirect_uri = models.URLField(null=True)
scope = ListField()
class MACNonce(models.Model):
"""Stores Nonce strings for use with MAC Authentication.
**Args:**
* *access_token:* A oauth2app.models.AccessToken object
* *nonce:* A unique nonce string.
"""
access_token = models.ForeignKey(AccessToken)
nonce = models.CharField(max_length=30, db_index=True)
| xrage/oauth2app-mongoDb | oauth2app/models.py | Python | mit | 5,945 |
#! /usr/bin/python2.7
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision: 85213 $"
# $Source$
version = "0.9.0"
__author__ = "Lars Gustäbel (lars@gustaebel.de)"
__date__ = "$Date: 2010-10-04 10:37:53 -0500 (Mon, 04 Oct 2010) $"
__cvsid__ = "$Id: tarfile.py 85213 2010-10-04 15:37:53Z lars.gustaebel $"
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
import operator
from duplicity import cached_ops
grp = pwd = cached_ops
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
except UnicodeEncodeError:
x = []
for c in s:
try:
x.append(c.encode(encoding, "strict"))
except UnicodeEncodeError:
x.append(c.encode("utf8"))
return "".join(x)
else:
return s.encode(encoding, errors)
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ""
self.pos = 0L
self.closed = False
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32("") & 0xffffffffL
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = ""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", long(time.time()))
self.__write("\037\213\010\010%s\002\377" % timestamp)
if self.name.endswith(".gz"):
self.name = self.name[:-3]
self.__write(self.name + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc) & 0xffffffffL
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = ""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = ""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != "\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != "\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
t.append(buf)
c += len(buf)
t = "".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = "".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith("\037\213\010"):
return "gz"
if self.buf.startswith("BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = ""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
b = [self.buf]
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
b.append(data)
x += len(data)
self.buf = "".join(b)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, sparse=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.sparse = sparse
self.position = 0
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
if self.sparse is None:
return self.readnormal(size)
else:
return self.readsparse(size)
def readnormal(self, size):
"""Read operation for regular files.
"""
self.fileobj.seek(self.offset + self.position)
self.position += size
return self.fileobj.read(size)
def readsparse(self, size):
"""Read operation for sparse files.
"""
data = []
while size > 0:
buf = self.readsparsesection(size)
if not buf:
break
size -= len(buf)
data.append(buf)
return "".join(data)
def readsparsesection(self, size):
"""Read a single section of a sparse file.
"""
section = self.sparse.find(self.position)
if section is None:
return ""
size = min(size, section.offset + section.size - self.position)
if isinstance(section, _data):
realpos = section.realpos + self.position - section.offset
self.fileobj.seek(self.offset + realpos)
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return NUL * size
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
getattr(tarinfo, "sparse", None))
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = ""
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = ""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if "\n" in self.buffer:
pos = self.buffer.find("\n") + 1
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if not buf or "\n" in buf:
self.buffer = "".join(buffers)
pos = self.buffer.find("\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=0):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == 0:
self.position = min(max(pos, 0), self.size)
elif whence == 1:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == 2:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = ""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self, encoding, errors):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 07777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
for key in ("name", "linkname", "uname", "gname"):
if type(info[key]) is unicode:
info[key] = info[key].encode(encoding, errors)
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info(encoding, errors)
if format == USTAR_FORMAT:
return self.create_ustar_header(info)
elif format == GNU_FORMAT:
return self.create_gnu_header(info)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT)
def create_gnu_header(self, info):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = ""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
return buf + self._create_header(info, GNU_FORMAT)
def create_pax_header(self, info, encoding, errors):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
val = info[name].decode(encoding, errors)
# Try to encode the string as ASCII.
try:
val.encode("ascii")
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if len(info[name]) > length:
pax_headers[hname] = val
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = unicode(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ""
return buf + self._create_header(info, USTAR_FORMAT)
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100),
itn(info.get("mode", 0) & 07777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100),
stn(info.get("magic", POSIX_MAGIC), 8),
stn(info.get("uname", ""), 32),
stn(info.get("gname", ""), 32),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155)
]
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name += NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
keyword = keyword.encode("utf8")
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
buf = self.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
self.sparse = sp
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
value = value.decode("utf8")
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.iteritems():
if keyword not in PAX_FIELDS:
continue
if keyword == "path":
value = value.rstrip("/")
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors=None, pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
if name:
self.name = os.path.abspath(name)
else:
self.name = None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
if errors is not None:
self.errors = errors
elif mode == "r":
self.errors = "utf-8"
else:
self.errors = "strict"
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError, e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def _getposix(self):
return self.format == USTAR_FORMAT
def _setposix(self, value):
import warnings
warnings.warn("use the format attribute instead", DeprecationWarning,
2)
if value:
self.format = USTAR_FORMAT
else:
self.format = GNU_FORMAT
posix = property(_getposix, _setposix)
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError), e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
_Stream(name, filemode, comptype, fileobj, bufsize),
**kwargs)
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
if fileobj is None:
fileobj = bltn_open(name, mode + "b")
try:
t = cls.taropen(name, mode,
gzip.GzipFile(name, mode, compresslevel, fileobj),
**kwargs)
except IOError:
raise ReadError("not a gzip file")
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid),
if tarinfo.ischr() or tarinfo.isblk():
print "%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)),
else:
print "%10d" % tarinfo.size,
print "%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6],
if tarinfo.isdir():
print tarinfo.name + "/",
else:
print tarinfo.name,
if verbose:
if tarinfo.issym():
print "->", tarinfo.linkname,
if tarinfo.islnk():
print "link to", tarinfo.linkname,
print
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path=""):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'.
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError, e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0700)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
target = bltn_open(targetpath, "wb")
copyfileobj(source, target)
source.close()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
if hasattr(os, "symlink") and hasattr(os, "link"):
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo), targetpath)
else:
try:
self._extract_member(self._find_link_target(tarinfo), targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
try:
g = grp.getgrgid(tarinfo.gid)[2]
except KeyError:
g = os.getgid()
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
try:
u = pwd.getpwuid(tarinfo.uid)[2]
except KeyError:
u = os.getuid()
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError, e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError, e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError, e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError, e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError, e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print >> sys.stderr, msg
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
#---------------------------------------------
# zipfile compatible TarFile class
#---------------------------------------------
TAR_PLAIN = 0 # zipfile.ZIP_STORED
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
class TarFileCompat:
"""TarFile class compatible with standard module zipfile's
ZipFile class.
"""
def __init__(self, file, mode="r", compression=TAR_PLAIN):
from warnings import warnpy3k
warnpy3k("the TarFileCompat class has been removed in Python 3.0",
stacklevel=2)
if compression == TAR_PLAIN:
self.tarfile = TarFile.taropen(file, mode)
elif compression == TAR_GZIPPED:
self.tarfile = TarFile.gzopen(file, mode)
else:
raise ValueError("unknown compression constant")
if mode[0:1] == "r":
members = self.tarfile.getmembers()
for m in members:
m.filename = m.name
m.file_size = m.size
m.date_time = time.gmtime(m.mtime)[:6]
def namelist(self):
return map(lambda m: m.name, self.infolist())
def infolist(self):
return filter(lambda m: m.type in REGULAR_TYPES,
self.tarfile.getmembers())
def printdir(self):
self.tarfile.list()
def testzip(self):
return
def getinfo(self, name):
return self.tarfile.getmember(name)
def read(self, name):
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
def write(self, filename, arcname=None, compress_type=None):
self.tarfile.add(filename, arcname)
def writestr(self, zinfo, bytes):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import calendar
tinfo = TarInfo(zinfo.filename)
tinfo.size = len(bytes)
tinfo.mtime = calendar.timegm(zinfo.date_time)
self.tarfile.addfile(tinfo, StringIO(bytes))
def close(self):
self.tarfile.close()
#class TarFileCompat
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| krux/duplicity-pkg | duplicity/tarfile.py | Python | gpl-2.0 | 89,049 |
import os
import pytest
@pytest.fixture
def virtualenv_path(host):
return os.path.join(
host.user().home,
# Molecule playbook vars can't be passed into Testinfra tests, so hardcode the path
'.virtualenvs/girder'
)
@pytest.fixture
def config_path(host):
return os.path.join(
host.user().home,
'.girder/girder.cfg'
)
def test_girder_package(host, virtualenv_path):
pip_path = os.path.join(virtualenv_path, 'bin/pip')
packages = host.pip_package.get_packages(pip_path=pip_path)
assert 'girder' in packages
def test_girder_web_build(host, virtualenv_path):
web_file_path = os.path.join(virtualenv_path, 'share/girder/static/built/girder_app.min.js')
assert host.file(web_file_path).is_file
def test_girder_service(host):
girder_service = host.service('girder')
assert girder_service.is_enabled
assert girder_service.is_running
def test_girder_socket_private(host):
girder_socket = host.socket('tcp://127.0.0.1:8080')
assert girder_socket.is_listening
def test_girder_socket_public(host):
girder_socket = host.socket('tcp://0.0.0.0:8080')
assert not girder_socket.is_listening
def test_girder_config_exists(host, config_path):
assert host.file(config_path).exists
@pytest.mark.parametrize('option, value', [
(r'server\.socket_host', r'"127\.0\.0\.1"'),
(r'tools\.proxy\.on', r'True'),
(r'uri', r'"mongodb://mongodb:27017/girder"'),
(r'mode', r'"production"')
])
def test_girder_config_content(host, config_path, option, value):
config_line_re = r'^%s *= *%s$' % (option, value)
assert host.file(config_path).contains(config_line_re)
| girder/ansible-role-girder | molecule/default/tests/test_default.py | Python | apache-2.0 | 1,679 |
import io
from typing import Dict, Type, Union
from elftools.elf.elffile import ELFFile
class Binary:
magics: Dict[bytes, Type["Binary"]] = {}
def __new__(cls, path):
if cls is Binary:
with open(path, "rb") as f:
cl = cls.magics[f.read(4)]
return cl(path)
else:
return super().__new__(cls)
def __init__(self, path):
self.path = path
with open(path, "rb") as f:
self.magic = Binary.magics[f.read(4)]
def arch(self):
pass
def maps(self):
pass
def threads(self):
pass
class BinaryException(Exception):
"""
Binary file exception
"""
pass
class CGCElf(Binary):
@staticmethod
def _cgc2elf(filename):
# hack begin so we can use upstream Elftool
with open(filename, "rb") as fd:
stream = io.BytesIO(fd.read())
stream.write(b"\x7fELF")
stream.name = fd.name
return stream
def __init__(self, filename):
super().__init__(filename)
stream = self._cgc2elf(filename)
self.elf = ELFFile(stream)
self.arch = {"x86": "i386", "x64": "amd64"}[self.elf.get_machine_arch()]
assert "i386" == self.arch
assert self.elf.header.e_type in ["ET_EXEC"]
def maps(self):
for elf_segment in self.elf.iter_segments():
if elf_segment.header.p_type not in ["PT_LOAD", "PT_NULL", "PT_PHDR", "PT_CGCPOV2"]:
raise BinaryException("Not Supported Section")
if elf_segment.header.p_type != "PT_LOAD" or elf_segment.header.p_memsz == 0:
continue
flags = elf_segment.header.p_flags
# PF_X 0x1 Execute - PF_W 0x2 Write - PF_R 0x4 Read
perms = [" ", " x", " w ", " wx", "r ", "r x", "rw ", "rwx"][flags & 7]
if "r" not in perms:
raise BinaryException("Not readable map from cgc elf not supported")
# CGCMAP--
assert elf_segment.header.p_filesz != 0 or elf_segment.header.p_memsz != 0
yield (
(
elf_segment.header.p_vaddr,
elf_segment.header.p_memsz,
perms,
elf_segment.stream.name,
elf_segment.header.p_offset,
elf_segment.header.p_filesz,
)
)
def threads(self):
yield (("Running", {"EIP": self.elf.header.e_entry}))
class Elf(Binary):
def __init__(self, filename):
super().__init__(filename)
self.elf = ELFFile(open(filename, "rb"))
self.arch = {"x86": "i386", "x64": "amd64"}[self.elf.get_machine_arch()]
assert self.elf.header.e_type in ["ET_DYN", "ET_EXEC", "ET_CORE"]
# Get interpreter elf
self.interpreter = None
for elf_segment in self.elf.iter_segments():
if elf_segment.header.p_type != "PT_INTERP":
continue
self.interpreter = Elf(elf_segment.data()[:-1])
break
if self.interpreter is not None:
assert self.interpreter.arch == self.arch
assert self.interpreter.elf.header.e_type in ["ET_DYN", "ET_EXEC"]
def __del__(self):
if self.elf is not None:
self.elf.stream.close()
def maps(self):
for elf_segment in self.elf.iter_segments():
if elf_segment.header.p_type != "PT_LOAD" or elf_segment.header.p_memsz == 0:
continue
flags = elf_segment.header.p_flags
# PF_X 0x1 Execute - PF_W 0x2 Write - PF_R 0x4 Read
perms = [" ", " x", " w ", " wx", "r ", "r x", "rw ", "rwx"][flags & 7]
if "r" not in perms:
raise BinaryException("Not readable map from cgc elf not supported")
# CGCMAP--
assert elf_segment.header.p_filesz != 0 or elf_segment.header.p_memsz != 0
yield (
(
elf_segment.header.p_vaddr,
elf_segment.header.p_memsz,
perms,
elf_segment.stream.name,
elf_segment.header.p_offset,
elf_segment.header.p_filesz,
)
)
def getInterpreter(self):
return self.interpreter
def threads(self):
yield (("Running", {"EIP": self.elf.header.e_entry}))
Binary.magics = {b"\x7fCGC": CGCElf, b"\x7fELF": Elf}
| trailofbits/manticore | manticore/binary/binary.py | Python | agpl-3.0 | 4,521 |
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
from timelinelib.utilities.encodings import to_unicode
TESTFILE = "testdata.txt"
class describe_to_unicode_function(unittest.TestCase):
def test_can_open_url(self):
self.save_file_data()
self.assertTrue(isinstance(to_unicode(self.read_file_data()), unicode))
self.remove_file()
def test_can_url(self):
self.assertTrue(isinstance(to_unicode(u"123abc"), unicode))
def save_file_data(self):
f = open(TESTFILE, "w")
for i in range(256):
try:
f.write(chr(i))
except:
pass
f.close()
def read_file_data(self):
f = open(TESTFILE, "r")
url = f.read()
f.close()
return url
def remove_file(self):
os.remove(TESTFILE)
| linostar/timeline-clone | test/specs/utilities/encodings.py | Python | gpl-3.0 | 1,544 |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SQLAlchemy models for application data.
"""
import json
import six
from oslo.config import cfg
from sqlalchemy.ext import declarative
from sqlalchemy.orm import exc
from sqlalchemy import types
from solum.common import exception
from solum import objects
from solum.openstack.common.db import exception as db_exc
from solum.openstack.common.db.sqlalchemy import models
from solum.openstack.common.db.sqlalchemy import session as db_session
from solum.openstack.common.py3kcompat import urlutils
def table_args():
engine_name = urlutils.urlparse(cfg.CONF.database.connection).scheme
if engine_name == 'mysql':
return {'mysql_engine': 'InnoDB',
'mysql_charset': "utf8"}
return None
def model_query(context, model, *args, **kwargs):
"""Query helper.
:param context: context to query under
:param session: if present, the session to use
"""
session = kwargs.get('session') or db_session.get_session(
mysql_traditional_mode=True)
query = session.query(model, *args)
return query
class SolumBase(models.TimestampMixin, models.ModelBase):
metadata = None
@classmethod
def obj_name(cls):
return cls.__name__
def as_dict(self):
d = {}
for c in self.__table__.columns:
d[c.name] = self[c.name]
for k in self._extra_keys:
d[k] = self[k]
return d
@classmethod
def get_session(cls):
return db_session.get_session(mysql_traditional_mode=True)
@classmethod
def get_by_id(cls, context, item_id):
try:
session = SolumBase.get_session()
return session.query(cls).filter_by(id=item_id).one()
except exc.NoResultFound:
cls._raise_not_found(item_id)
@classmethod
def get_by_uuid(cls, context, item_uuid):
try:
session = SolumBase.get_session()
return session.query(cls).filter_by(uuid=item_uuid).one()
except exc.NoResultFound:
cls._raise_not_found(item_uuid)
@classmethod
def _raise_duplicate_object(cls):
if hasattr(cls, '__resource__'):
raise exception.ResourceExists(name=cls.__resource__)
else:
raise exception.ObjectNotUnique(name=cls.__tablename__)
def _non_updatable_fields(self):
return set(('uuid', 'id'))
def _lazyhasattr(self, name):
return any(name in d for d in (self.__dict__,
self.__class__.__dict__))
def update(self, data):
for field in set(six.iterkeys(data)) - self._non_updatable_fields():
if self._lazyhasattr(field):
setattr(self, field, data[field])
def save(self, context):
if objects.transition_schema():
self.add_forward_schema_changes()
session = SolumBase.get_session()
with session.begin():
session.merge(self)
def create(self, context):
session = SolumBase.get_session()
try:
with session.begin():
session.add(self)
except (db_exc.DBDuplicateEntry):
self.__class__._raise_duplicate_object()
def destroy(self, context):
session = SolumBase.get_session()
with session.begin():
session.query(self.__class__).filter_by(
id=self.id).delete()
@classmethod
def _raise_not_found(cls, item_id):
"""Raise a not found exception."""
if hasattr(cls, '__resource__'):
raise exception.ResourceNotFound(name=cls.__resource__, id=item_id)
else:
raise exception.ObjectNotFound(name=cls.__tablename__, id=item_id)
Base = declarative.declarative_base(cls=SolumBase)
class JSONEncodedDict(types.TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = types.VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
| jamesyli/solum | solum/objects/sqlalchemy/models.py | Python | apache-2.0 | 4,756 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.