repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kongseokhwan/kulcloud-prism-chromecast-agent
|
refs/heads/master
|
pychromecast/error.py
|
6
|
"""
Errors to be used by PyChromecast.
"""
class PyChromecastError(Exception):
""" Base error for PyChromecast. """
pass
class NoChromecastFoundError(PyChromecastError):
"""
When a command has to auto-discover a Chromecast and cannot find one.
"""
pass
class MultipleChromecastsFoundError(PyChromecastError):
"""
When getting a singular chromecast results in getting multiple chromecasts.
"""
pass
class ChromecastConnectionError(PyChromecastError):
""" When a connection error occurs within PyChromecast. """
pass
class LaunchError(PyChromecastError):
""" When an app fails to launch. """
pass
class PyChromecastStopped(PyChromecastError):
""" Raised when a command is invoked while the Chromecast's socket_client
is stopped.
"""
pass
class NotConnected(PyChromecastError):
"""
Raised when a command is invoked while not connected to a Chromecast.
"""
pass
class UnsupportedNamespace(PyChromecastError):
"""
Raised when trying to send a message with a namespace that is not
supported by the current running app.
"""
pass
class ControllerNotRegistered(PyChromecastError):
"""
Raised when trying to interact with a controller while it is
not registered with a ChromeCast object.
"""
pass
|
mahak/neutron
|
refs/heads/master
|
neutron/db/dns_db.py
|
2
|
# Copyright (c) 2016 IBM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import dns as dns_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api import extensions
from neutron_lib.api import validators
from neutron_lib.db import resource_extend
from neutron_lib import exceptions as n_exc
from neutron_lib.exceptions import dns as dns_exc
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.objects import floatingip as fip_obj
from neutron.objects import network
from neutron.objects import ports as port_obj
from neutron.services.externaldns import driver
LOG = logging.getLogger(__name__)
class DNSActionsData(object):
def __init__(self, current_dns_name=None, current_dns_domain=None,
previous_dns_name=None, previous_dns_domain=None):
self.current_dns_name = current_dns_name
self.current_dns_domain = current_dns_domain
self.previous_dns_name = previous_dns_name
self.previous_dns_domain = previous_dns_domain
@resource_extend.has_resource_extenders
class DNSDbMixin(object):
"""Mixin class to add DNS methods to db_base_plugin_v2."""
_dns_driver = None
@property
def dns_driver(self):
if self._dns_driver:
return self._dns_driver
if not cfg.CONF.external_dns_driver:
return
try:
self._dns_driver = driver.ExternalDNSService.get_instance()
LOG.debug("External DNS driver loaded: %s",
cfg.CONF.external_dns_driver)
return self._dns_driver
except ImportError:
LOG.exception("ImportError exception occurred while loading "
"the external DNS service driver")
raise dns_exc.ExternalDNSDriverNotFound(
driver=cfg.CONF.external_dns_driver)
@staticmethod
@resource_extend.extends([l3_apidef.FLOATINGIPS])
def _extend_floatingip_dict_dns(floatingip_res, floatingip_db):
floatingip_res['dns_domain'] = ''
floatingip_res['dns_name'] = ''
if floatingip_db.dns:
floatingip_res['dns_domain'] = floatingip_db.dns['dns_domain']
floatingip_res['dns_name'] = floatingip_db.dns['dns_name']
return floatingip_res
def _process_dns_floatingip_create_precommit(self, context,
floatingip_data, req_data):
# expects to be called within a plugin's session
dns_domain = req_data.get(dns_apidef.DNSDOMAIN)
if not validators.is_attr_set(dns_domain):
return
if not self.dns_driver:
return
dns_name = req_data[dns_apidef.DNSNAME]
self._validate_floatingip_dns(dns_name, dns_domain)
current_dns_name, current_dns_domain = (
self._get_requested_state_for_external_dns_service_create(
context, floatingip_data, req_data))
dns_actions_data = None
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(
context,
floatingip_id=floatingip_data['id'],
dns_name=req_data[dns_apidef.DNSNAME],
dns_domain=req_data[dns_apidef.DNSDOMAIN],
published_dns_name=current_dns_name,
published_dns_domain=current_dns_domain).create()
dns_actions_data = DNSActionsData(
current_dns_name=current_dns_name,
current_dns_domain=current_dns_domain)
floatingip_data['dns_name'] = dns_name
floatingip_data['dns_domain'] = dns_domain
return dns_actions_data
def _process_dns_floatingip_create_postcommit(self, context,
floatingip_data,
dns_actions_data):
if not dns_actions_data:
return
self._add_ips_to_external_dns_service(
context, dns_actions_data.current_dns_domain,
dns_actions_data.current_dns_name,
[floatingip_data['floating_ip_address']])
def _process_dns_floatingip_update_precommit(self, context,
floatingip_data):
# expects to be called within a plugin's session
if not extensions.is_extension_supported(
self._core_plugin, dns_apidef.ALIAS):
return
if not self.dns_driver:
return
dns_data_db = fip_obj.FloatingIPDNS.get_object(
context, floatingip_id=floatingip_data['id'])
if dns_data_db and dns_data_db['dns_name']:
# dns_name and dns_domain assigned for floating ip. It doesn't
# matter whether they are defined for internal port
return
current_dns_name, current_dns_domain = (
self._get_requested_state_for_external_dns_service_update(
context, floatingip_data))
if dns_data_db:
if (dns_data_db['published_dns_name'] != current_dns_name or
dns_data_db['published_dns_domain'] != current_dns_domain):
dns_actions_data = DNSActionsData(
previous_dns_name=dns_data_db['published_dns_name'],
previous_dns_domain=dns_data_db['published_dns_domain'])
if current_dns_name and current_dns_domain:
dns_data_db['published_dns_name'] = current_dns_name
dns_data_db['published_dns_domain'] = current_dns_domain
dns_actions_data.current_dns_name = current_dns_name
dns_actions_data.current_dns_domain = current_dns_domain
else:
dns_data_db.delete()
return dns_actions_data
else:
return
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(
context,
floatingip_id=floatingip_data['id'],
dns_name='',
dns_domain='',
published_dns_name=current_dns_name,
published_dns_domain=current_dns_domain).create()
return DNSActionsData(current_dns_name=current_dns_name,
current_dns_domain=current_dns_domain)
def _process_dns_floatingip_update_postcommit(self, context,
floatingip_data,
dns_actions_data):
if not dns_actions_data:
return
if dns_actions_data.previous_dns_name:
self._delete_floatingip_from_external_dns_service(
context, dns_actions_data.previous_dns_domain,
dns_actions_data.previous_dns_name,
[floatingip_data['floating_ip_address']])
if dns_actions_data.current_dns_name:
self._add_ips_to_external_dns_service(
context, dns_actions_data.current_dns_domain,
dns_actions_data.current_dns_name,
[floatingip_data['floating_ip_address']])
def _process_dns_floatingip_delete(self, context, floatingip_data):
if not extensions.is_extension_supported(
self._core_plugin, dns_apidef.ALIAS):
return
dns_data_db = fip_obj.FloatingIPDNS.get_object(
context, floatingip_id=floatingip_data['id'])
if dns_data_db:
self._delete_floatingip_from_external_dns_service(
context, dns_data_db['published_dns_domain'],
dns_data_db['published_dns_name'],
[floatingip_data['floating_ip_address']])
def _validate_floatingip_dns(self, dns_name, dns_domain):
if dns_domain and not dns_name:
msg = _("dns_domain cannot be specified without a dns_name")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if dns_name and not dns_domain:
msg = _("dns_name cannot be specified without a dns_domain")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
def _get_internal_port_dns_data(self, context, floatingip_data):
port_dns = port_obj.PortDNS.get_object(
context, port_id=floatingip_data['port_id'])
if not (port_dns and port_dns['dns_name']):
return None, None
net_dns = network.NetworkDNSDomain.get_net_dns_from_port(
context=context, port_id=floatingip_data['port_id'])
if not net_dns:
return port_dns['dns_name'], None
return port_dns['dns_name'], net_dns['dns_domain']
def _delete_floatingip_from_external_dns_service(self, context, dns_domain,
dns_name, records):
ips = [str(r) for r in records]
try:
self.dns_driver.delete_record_set(context, dns_domain, dns_name,
ips)
except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e:
LOG.exception("Error deleting Floating IP data from external "
"DNS service. Name: '%(name)s'. Domain: "
"'%(domain)s'. IP addresses '%(ips)s'. DNS "
"service driver message '%(message)s'",
{"name": dns_name,
"domain": dns_domain,
"message": e.msg,
"ips": ', '.join(ips)})
def _get_requested_state_for_external_dns_service_create(self, context,
floatingip_data,
req_data):
fip_dns_name = req_data[dns_apidef.DNSNAME]
if fip_dns_name:
return fip_dns_name, req_data[dns_apidef.DNSDOMAIN]
if floatingip_data['port_id']:
return self._get_internal_port_dns_data(context, floatingip_data)
return None, None
def _get_requested_state_for_external_dns_service_update(self, context,
floatingip_data):
if floatingip_data['port_id']:
return self._get_internal_port_dns_data(context, floatingip_data)
return None, None
def _add_ips_to_external_dns_service(self, context, dns_domain, dns_name,
records):
ips = [str(r) for r in records]
try:
self.dns_driver.create_record_set(context, dns_domain, dns_name,
ips)
except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e:
LOG.exception("Error publishing floating IP data in external "
"DNS service. Name: '%(name)s'. Domain: "
"'%(domain)s'. DNS service driver message "
"'%(message)s'",
{"name": dns_name,
"domain": dns_domain,
"message": e.msg})
|
ericchang/incubator-toree
|
refs/heads/master
|
etc/pip_install/toree/_version.py
|
1
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is populated when doing a make release. It should be empty by default.
|
scrollback/kuma
|
refs/heads/master
|
vendor/packages/pylint/test/input/func_method_could_be_function.py
|
6
|
# pylint: disable-msg=R0903,R0922,W0232
"""test detection of method which could be a function"""
__revision__ = None
class Toto(object):
"""bla bal abl"""
def __init__(self):
self.aaa = 2
def regular_method(self):
"""this method is a real method since it access to self"""
self.function_method()
def function_method(self):
"""this method isn' a real method since it doesn't need self"""
print 'hello'
class Base:
"""an abstract class"""
def __init__(self):
self.aaa = 2
def check(self, arg):
"""an abstract method, could not be a function"""
raise NotImplementedError
class Sub(Base):
"""a concret class"""
def check(self, arg):
"""a concret method, could not be a function since it need
polymorphism benefits
"""
return arg == 0
class Super:
"""same as before without abstract"""
x = 1
def method(self):
"""regular"""
print self.x
class Sub1(Super):
"""override method with need for self"""
def method(self):
"""no i can not be a function"""
print 42
def __len__(self):
"""no i can not be a function"""
print 42
def __cmp__(self, other):
"""no i can not be a function"""
print 42
|
nuagenetworks/vspk-python
|
refs/heads/master
|
vspk/v5_0/fetchers/nuwirelessports_fetcher.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUWirelessPortsFetcher(NURESTFetcher):
""" Represents a NUWirelessPorts fetcher
Notes:
This fetcher enables to fetch NUWirelessPort objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUWirelessPort class that is managed.
Returns:
.NUWirelessPort: the managed class
"""
from .. import NUWirelessPort
return NUWirelessPort
|
DataDog/gunicorn
|
refs/heads/master
|
examples/frameworks/django/djangotest/testing/views.py
|
7
|
# Create your views here.
import csv
import os
from django import forms
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
import tempfile
class MsgForm(forms.Form):
subject = forms.CharField(max_length=100)
message = forms.CharField()
f = forms.FileField()
def home(request):
from django.conf import settings
print(settings.SOME_VALUE)
subject = None
message = None
size = 0
print(request.META)
if request.POST:
form = MsgForm(request.POST, request.FILES)
print(request.FILES)
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
f = request.FILES['f']
size = int(os.fstat(f.fileno())[6])
else:
form = MsgForm()
return render_to_response('home.html', {
'form': form,
'subject': subject,
'message': message,
'size': size
}, RequestContext(request))
def acsv(request):
rows = [
{'a': 1, 'b': 2},
{'a': 3, 'b': 3}
]
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=report.csv'
writer = csv.writer(response)
writer.writerow(['a', 'b'])
for r in rows:
writer.writerow([r['a'], r['b']])
return response
|
ARMmbed/yotta_osx_installer
|
refs/heads/master
|
workspace/lib/python2.7/site-packages/cryptography/x509/name.py
|
6
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import six
from cryptography import utils
from cryptography.x509.oid import ObjectIdentifier
class NameAttribute(object):
def __init__(self, oid, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(value, six.text_type):
raise TypeError(
"value argument must be a text type."
)
self._oid = oid
self._value = value
oid = utils.read_only_property("_oid")
value = utils.read_only_property("_value")
def __eq__(self, other):
if not isinstance(other, NameAttribute):
return NotImplemented
return (
self.oid == other.oid and
self.value == other.value
)
def __ne__(self, other):
return not self == other
def __repr__(self):
return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self)
class Name(object):
def __init__(self, attributes):
self._attributes = attributes
def get_attributes_for_oid(self, oid):
return [i for i in self if i.oid == oid]
def __eq__(self, other):
if not isinstance(other, Name):
return NotImplemented
return self._attributes == other._attributes
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self._attributes)
def __len__(self):
return len(self._attributes)
def __repr__(self):
return "<Name({0!r})>".format(self._attributes)
|
0Chencc/CTFCrackTools
|
refs/heads/master
|
Lib/Lib/weakref.py
|
17
|
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Changed for Jython to use MapMaker in Google Collections
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet
from exceptions import ReferenceError
from jythonlib import MapMaker, dict_builder
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary", 'WeakSet']
class WeakValueDictionary(dict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
def __new__(cls, *args, **kw):
return WeakValueDictionaryBuilder(*args, **kw)
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
for value in self.itervalues():
yield ref(value)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return [ref(value) for value in self.itervalues()]
WeakValueDictionaryBuilder = dict_builder(MapMaker().weakValues().makeMap, WeakValueDictionary)
class WeakKeyDictionary(dict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __new__(cls, *args, **kw):
return WeakKeyDictionaryBuilder(*args, **kw)
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
for key in self.iterkeys():
yield ref(key)
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return [ref(key) for key in self.iterkeys()]
WeakKeyDictionaryBuilder = dict_builder(MapMaker().weakKeys().makeMap, WeakKeyDictionary)
# Jython does not use below, however retaining in the case of any user code that might
# be using it. Note that it is not exported.
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
|
leohahn/TIM
|
refs/heads/master
|
TIM/urls.py
|
1
|
"""TIM URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.http import HttpResponseRedirect
from tim_app import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# Redirect user to /tim if no path is specified.
url(r'^$', lambda r: HttpResponseRedirect('tim/ranking')),
# Admin page.
url(r'^admin/', admin.site.urls),
# Initial app page.
url(r'^tim/', include('tim_app.urls')),
# Our REST api
url(r'^api/', include('api.urls'))
]
# Icon folder
if settings.DEBUG is True:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
romankagan/DDBWorkbench
|
refs/heads/master
|
python/testData/inspections/AddCallSuper_after.py
|
9
|
class A:
def __init__(self, c, a = 5, *arg, **kwargs):
pass
class B(A):
def __init__(self, r, c, b=6, *args, **kwargs):
A.__init__(self, c, *args, **kwargs)
print "Constructor B was called"
|
towerjoo/mindsbook
|
refs/heads/master
|
django/core/cache/backends/base.py
|
15
|
"Base Cache class."
import warnings
from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', 300)
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
def add(self, key, value, timeout=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError
def get(self, key, default=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError
def set(self, key, value, timeout=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError
def delete(self, key):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError
def get_many(self, keys):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k)
if val is not None:
d[k] = val
return d
def has_key(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key) is not None
def incr(self, key, delta=1):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
if key not in self:
raise ValueError("Key '%s' not found" % key)
new_value = self.get(key) + delta
self.set(key, new_value)
return new_value
def decr(self, key, delta=1):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout)
def delete_many(self, keys):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
|
UnbDroid/robomagellan
|
refs/heads/master
|
Codigos/Raspberry/ROS/catkin_Leticia/build/catkin_generated/installspace/_setup_util.py
|
12
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'arm-linux-gnueabihf')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'arm-linux-gnueabihf', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/pi/Documents/robomagellan/Codigos/Raspberry/ROS/catkin_Bauchspiess/devel;/home/pi/Documents/robomagellan/Codigos/Raspberry/ROS/catkin_Leticia/devel;/home/pi/Documents/catkin_rosserial/devel;/home/pi/Documents/robomagellan/Codigos/Raspberry/ROS/catkin_camila/devel;/home/pi/Documents/robomagellan/Codigos/Raspberry/ROS/catkin_BUZZ/devel;/home/pi/Documents/desenvolvimentoRos/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
the-adrian/KernotekV2.0
|
refs/heads/master
|
venv/lib/python2.7/site-packages/werkzeug/testsuite/contrib/cache.py
|
94
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class SimpleCacheTestCase(WerkzeugTestCase):
def test_get_dict(self):
c = cache.SimpleCache()
c.set('a', 'a')
c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_set_many(self):
c = cache.SimpleCache()
c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
class FileSystemCacheTestCase(WerkzeugTestCase):
def test_set_get(self):
tmp_dir = tempfile.mkdtemp()
try:
c = cache.FileSystemCache(cache_dir=tmp_dir)
for i in range(3):
c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i
finally:
shutil.rmtree(tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
c.set(str(i), i)
cache_files = os.listdir(tmp_dir)
shutil.rmtree(tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir)
c.set('foo', 'bar')
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 1
c.clear()
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 0
shutil.rmtree(tmp_dir)
class RedisCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + 'foo', b'Awesome')
self.assert_equal(c.get('foo'), b'Awesome')
c._client.set(c.key_prefix + 'foo', b'42')
self.assert_equal(c.get('foo'), 42)
def test_get_set(self):
c = self.make_cache()
c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_get_many(self):
c = self.make_cache()
c.set('foo', ['bar'])
c.set('spam', 'eggs')
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_add(self):
c = self.make_cache()
# sanity check that add() works like set()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.delete('foo')
assert c.get('foo') is None
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
self.assert_equal(c.inc('foo'), 2)
self.assert_equal(c.dec('foo'), 1)
c.delete('foo')
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
assert c.get('foo') == True
c.set('bar', False)
assert c.get('bar') == False
class MemcachedCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + b'foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_set(self):
c = self.make_cache()
c.set('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_many(self):
c = self.make_cache()
c.set('foo', 'bar')
c.set('spam', 'eggs')
self.assert_equal(c.get_many('foo', 'spam'), ['bar', 'eggs'])
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': 'eggs'})
self.assert_equal(c.get('foo'), 'bar')
self.assert_equal(c.get('spam'), 'eggs')
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
self.assert_is_none(c.get('foo'))
def test_add(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.add('foo', 'baz')
self.assert_equal(c.get('foo'), 'bar')
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.delete('foo')
self.assert_is_none(c.get('foo'))
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
self.assert_is_none(c.get('foo'))
self.assert_is_none(c.get('spam'))
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
# XXX: Is this an intended difference?
c.inc('foo')
self.assert_equal(c.get('foo'), 2)
c.dec('foo')
self.assert_equal(c.get('foo'), 1)
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
self.assert_equal(c.get('foo'), True)
c.set('bar', False)
self.assert_equal(c.get('bar'), False)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
if memcache is not None:
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
return suite
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/scatterpolar/textfont/_size.py
|
1
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="scatterpolar.textfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
Pirata-Repository/Pirata
|
refs/heads/master
|
plugin.program.addoninstaller/t0mm0_common_addon.py
|
37
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cgi,re,os
try:
import cPickle as pickle
except:
import pickle
import unicodedata
import urllib
import xbmc,xbmcaddon,xbmcgui,xbmcplugin
class Addon:
'''
This class provides a lot of code that is used across many XBMC addons
in the hope that it will simplify some of the common tasks an addon needs
to perform.
Mostly this is achieved by providing a wrapper around commonly used parts
of :mod:`xbmc`, :mod:`xbmcaddon`, :mod:`xbmcgui` and :mod:`xbmcplugin`.
You probably want to have exactly one instance of this class in your addon
which you can call from anywhere in your code.
Example::
import sys
from t0mm0.common.addon import Addon
addon = Addon('my.plugin.id', argv=sys.argv)
'''
def __init__(self, addon_id, argv=None):
'''
Args:
addon_id (str): Your addon's id (eg. 'plugin.video.t0mm0.test').
Kwargs:
argv (list): List of arguments passed to your addon if applicable
(eg. sys.argv).
'''
self.addon = xbmcaddon.Addon(id=addon_id)
if argv:
self.url = argv[0]
self.handle = int(argv[1])
self.queries = self.parse_query(argv[2][1:])
def get_author(self):
'''Returns the addon author as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('author')
def get_changelog(self):
'''Returns the addon changelog.'''
return self.addon.getAddonInfo('changelog')
def get_description(self):
'''Returns the addon description as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('description')
def get_disclaimer(self):
'''Returns the addon disclaimer as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('disclaimer')
def get_fanart(self):
'''Returns the full path to the addon fanart.'''
return self.addon.getAddonInfo('fanart')
def get_icon(self):
'''Returns the full path to the addon icon.'''
return self.addon.getAddonInfo('icon')
def get_id(self):
'''Returns the addon id as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('id')
def get_name(self):
'''Returns the addon name as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('name')
def get_path(self):
'''Returns the full path to the addon directory.'''
return self.addon.getAddonInfo('path')
def get_profile(self):
'''
Returns the full path to the addon profile directory
(useful for storing files needed by the addon such as cookies).
'''
return xbmc.translatePath(self.addon.getAddonInfo('profile'))
def get_stars(self):
'''Returns the number of stars for this addon.'''
return self.addon.getAddonInfo('stars')
def get_summary(self):
'''Returns the addon summary as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('summary')
def get_type(self):
'''
Returns the addon summary as defined in ``addon.xml``
(eg. xbmc.python.pluginsource).
'''
return self.addon.getAddonInfo('type')
def get_version(self):
'''Returns the addon version as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('version')
def get_setting(self, setting):
'''
Returns an addon setting. Settings must be defined in your addon's
``resources/settings.xml`` file.
Args:
setting (str): Name of the setting to be retrieved.
Returns:
str containing the requested setting.
'''
return self.addon.getSetting(setting)
def get_string(self, string_id):
'''
Returns a localized string. Strings must be defined in your addon's
``resources/language/[lang_name]/strings.xml`` file.
Args:
string_id (int): id of the translated string to retrieve.
Returns:
str containing the localized requested string.
'''
return self.addon.getLocalizedString(string_id)
def parse_query(self, query, defaults={'mode': 'main'}):
'''
Parse a query string as used in a URL or passed to your addon by XBMC.
Example:
>>> addon.parse_query('name=test&type=basic')
{'mode': 'main', 'name': 'test', 'type': 'basic'}
Args:
query (str): A query string.
Kwargs:
defaults (dict): A dictionary containing key/value pairs parsed
from the query string. If a key is repeated in the query string
its value will be a list containing all of that keys values.
'''
queries = cgi.parse_qs(query)
q = defaults
for key, value in queries.items():
if len(value) == 1: q[key] = value[0]
else: q[key] = value
return q
def build_plugin_url(self, queries):
'''
Returns a ``plugin://`` URL which can be used to call the addon with
the specified queries.
Example:
>>> addon.build_plugin_url({'name': 'test', 'type': 'basic'})
'plugin://your.plugin.id/?name=test&type=basic'
Args:
queries (dict): A dctionary of keys/values to be added to the
``plugin://`` URL.
Retuns:
A string containing a fully formed ``plugin://`` URL.
'''
out_dict = {}
for k, v in queries.iteritems():
if isinstance(v, unicode): v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
out_dict[k] = v
return self.url + '?' + urllib.urlencode(out_dict)
def log(self, msg, level=xbmc.LOGNOTICE):
'''
Writes a string to the XBMC log file. The addon name is inserted into
the beginning of the message automatically to help you find relevent
messages in the log file.
The available log levels are defined in the :mod:`xbmc` module and are
currently as follows::
xbmc.LOGDEBUG = 0
xbmc.LOGERROR = 4
xbmc.LOGFATAL = 6
xbmc.LOGINFO = 1
xbmc.LOGNONE = 7
xbmc.LOGNOTICE = 2
xbmc.LOGSEVERE = 5
xbmc.LOGWARNING = 3
Args:
msg (str or unicode): The message to be written to the log file.
Kwargs:
level (int): The XBMC log level to write at.
'''
#msg = unicodedata.normalize('NFKD', unicode(msg)).encode('ascii','ignore')
xbmc.log('%s: %s' % (self.get_name(), msg), level)
def log_error(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGERROR`` error level. Use when something has gone wrong in
your addon code. This will show up in the log prefixed with 'ERROR:'
whether you have debugging switched on or not.
'''
self.log(msg, xbmc.LOGERROR)
def log_debug(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGDEBUG`` error level. Use this when you want to print out lots
of detailed information that is only usefull for debugging. This will
show up in the log only when debugging is enabled in the XBMC settings,
and will be prefixed with 'DEBUG:'.
'''
self.log(msg, xbmc.LOGDEBUG)
def log_notice(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGNOTICE`` error level. Use for general log messages. This will
show up in the log prefixed with 'NOTICE:' whether you have debugging
switched on or not.
'''
self.log(msg, xbmc.LOGNOTICE)
def show_ok_dialog(self, msg, title=None, is_error=False):
'''
Display an XBMC dialog with a message and a single 'OK' button. The
message is also written to the XBMC log file at the appropriate log
level.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Example::
addon.show_ok_dialog(['My message'], 'My Addon')
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
Kwargs:
title (str): String to be displayed as the title of the dialog box.
Defaults to the addon name.
is_error (bool): If ``True``, the log message will be written at
the ERROR log level, otherwise NOTICE will be used.
'''
if not title: title = self.get_name()
log_msg = ' '.join(msg)
while len(msg) < 3: msg.append('')
if is_error: self.log_error(log_msg)
else: self.log_notice(log_msg)
xbmcgui.Dialog().ok(title, msg[0], msg[1], msg[2])
def show_error_dialog(self, msg):
'''
Convenience method to show an XBMC dialog box with a single OK button
and also write the message to the log file at the ERROR log level.
The title of the dialog will be the addon's name with the prefix
'Error: '.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
'''
self.show_ok_dialog(msg, 'Error: %s' % self.get_name(), True)
def show_small_popup(self, title='', msg='', delay=5000, image=''):
'''
Displays a small popup box in the lower right corner. The default delay
is 5 seconds.
Code inspired by anarchintosh and daledude's Icefilms addon.
Example::
import os
logo = os.path.join(addon.get_path(), 'art','logo.jpg')
addon.show_small_popup('MyAddonName','Is now loaded enjoy', 5000, logo)
Kwargs:
title (str): title to be displayed at the top of the box
msg (str): Main message body
delay (int): delay in milliseconds until it disapears
image (str): Path to the image you want to display
'''
xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")' % (title, msg, delay, image))
def show_countdown(self, time_to_wait, title='', text=''):
'''
Show a countdown dialog with a progress bar for XBMC while delaying
execution. Necessary for some filehosters eg. megaupload
The original version of this code came from Anarchintosh.
Args:
time_to_wait (int): number of seconds to pause for.
Kwargs:
title (str): Displayed in the title of the countdown dialog. Default
is blank.
text (str): A line of text to be displayed in the dialog. Default
is blank.
Returns:
``True`` if countdown is allowed to complete, ``False`` if the
user cancelled the countdown.
'''
dialog = xbmcgui.DialogProgress()
ret = dialog.create(title)
self.log_notice('waiting %d secs' % time_to_wait)
secs = 0
increment = 100 / time_to_wait
cancelled = False
while secs <= time_to_wait:
if (dialog.iscanceled()): cancelled = True; break
if secs != 0: xbmc.sleep(1000)
secs_left = time_to_wait - secs
if secs_left == 0: percent = 100
else: percent = increment * secs
remaining_display = ('Wait %d seconds for the ' + 'video stream to activate...') % secs_left
dialog.update(percent, text, remaining_display)
secs += 1
if cancelled == True: self.log_notice('countdown cancelled'); return False
else: self.log_debug('countdown finished waiting'); return True
def show_settings(self):
'''Shows the settings dialog for this addon.'''
self.addon.openSettings()
def resolve_url(self, stream_url):
'''
Tell XBMC that you have resolved a URL (or not!).
This method should be called as follows:
#. The user selects a list item that has previously had ``isPlayable``
set (this is true for items added with :meth:`add_item`,
:meth:`add_music_item` or :meth:`add_music_item`)
#. Your code resolves the item requested by the user to a media URL
#. Your addon calls this method with the resolved URL
Args:
stream_url (str or ``False``): If a string, tell XBMC that the
media URL ha been successfully resolved to stream_url. If ``False``
or an empty string tell XBMC the resolving failed and pop up an
error messsage.
'''
if stream_url:
self.log_debug('resolved to: %s' % stream_url)
xbmcplugin.setResolvedUrl(self.handle, True, xbmcgui.ListItem(path=stream_url))
else:
self.show_error_dialog(['sorry, failed to resolve URL :('])
xbmcplugin.setResolvedUrl(self.handle, False, xbmcgui.ListItem())
def get_playlist(self, pl_type, new=False):
'''
Return a :class:`xbmc.Playlist` object of the specified type.
The available playlist types are defined in the :mod:`xbmc` module and
are currently as follows::
xbmc.PLAYLIST_MUSIC = 0
xbmc.PLAYLIST_VIDEO = 1
.. seealso::
:meth:`get_music_playlist`, :meth:`get_video_playlist`
Args:
pl_type (int): The type of playlist to get.
new (bool): If ``False`` (default), get the current
:class:`xbmc.Playlist` object of the type specified. If ``True``
then return a new blank :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
pl = xbmc.PlayList(pl_type)
if new: pl.clear()
return pl
def get_music_playlist(self, new=False):
'''
Convenience method to return a music :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current music
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
music :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_MUSIC, new)
def get_video_playlist(self, new=False):
'''
Convenience method to return a video :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current video
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
video :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_VIDEO, new)
def add_item(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', resolved=False, total_items=0, playlist=False, item_type='video', is_folder=False):
'''
Adds an item to the list of entries to be displayed in XBMC or to a
playlist.
Use this method when you want users to be able to select this item to
start playback of a media file. ``queries`` is a dict that will be sent
back to the addon when this item is selected::
add_item({'host': 'youtube.com', 'media_id': 'ABC123XYZ'}, {'title': 'A youtube vid'})
will add a link to::
plugin://your.plugin.id/?host=youtube.com&media_id=ABC123XYZ
.. seealso::
:meth:`add_music_item`, :meth:`add_video_item`,
:meth:`add_directory`
Args:
queries (dict): A set of keys/values to be sent to the addon when
the user selects this item.
infolabels (dict): A dictionary of information about this media
(see the `XBMC Wiki InfoLabels entry
<http://wiki.xbmc.org/?title=InfoLabels>`_).
Kwargs:
contextmenu_items (list): A list of contextmenu items
context_replace (bool): To replace the xbmc default contextmenu items
img (str): A URL to an image file to be used as an icon for this
entry.
fanart (str): A URL to a fanart image for this entry.
resolved (str): If not empty, ``queries`` will be ignored and
instead the added item will be the exact contentes of ``resolved``.
total_items (int): Total number of items to be added in this list.
If supplied it enables XBMC to show a progress bar as the list of
items is being built.
playlist (playlist object): If ``False`` (default), the item will
be added to the list of entries to be displayed in this directory.
If a playlist object is passed (see :meth:`get_playlist`) then
the item will be added to the playlist instead
item_type (str): The type of item to add (eg. 'music', 'video' or
'pictures')
'''
infolabels = self.unescape_dict(infolabels)
if not resolved:
if not is_folder: queries['play'] = 'True'
play = self.build_plugin_url(queries)
else: play = resolved
listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img, thumbnailImage=img)
listitem.setInfo(item_type, infolabels)
listitem.setProperty('IsPlayable', 'true')
listitem.setProperty('fanart_image', fanart)
if contextmenu_items: listitem.addContextMenuItems(contextmenu_items, replaceItems=context_replace)
if playlist is not False:
self.log_debug('adding item: %s - %s to playlist' % \
(infolabels['title'], play))
playlist.add(play, listitem)
else:
self.log_debug('adding item: %s - %s' % (infolabels['title'], play))
xbmcplugin.addDirectoryItem(self.handle, play, listitem, isFolder=is_folder, totalItems=total_items)
def add_video_item(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a video item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, contextmenu_items, context_replace, img, fanart, resolved, total_items, playlist, item_type='video')
def add_music_item(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a music item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, contextmenu_items, img, context_replace, fanart, resolved, total_items, playlist, item_type='music')
def add_directory(self, queries, infolabels, contextmenu_items='', context_replace=False, img='', fanart='', total_items=0, is_folder=True):
'''
Convenience method to add a directory to the display list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, contextmenu_items, context_replace, img, fanart, total_items=total_items, resolved=self.build_plugin_url(queries), is_folder=is_folder)
def end_of_directory(self):
'''Tell XBMC that we have finished adding items to this directory.'''
xbmcplugin.endOfDirectory(self.handle)
def _decode_callback(self, matches):
'''Callback method used by :meth:`decode`.'''
id = matches.group(1)
try: return unichr(int(id))
except: return id
def decode(self, data):
'''
Regular expression to convert entities such as ``,`` to the correct
characters. It is called by :meth:`unescape` and so it is not required
to call it directly.
This method was found `on the web <http://stackoverflow.com/questions/1208916/decoding-html-entities-with-python/1208931#1208931>`_
Args:
data (str): String to be cleaned.
Returns:
Cleaned string.
'''
return re.sub("&#(\d+)(;|(?=\s))", self._decode_callback, data).strip()
def unescape(self, text):
'''
Decodes HTML entities in a string.
You can add more entities to the ``rep`` dictionary.
Args:
text (str): String to be cleaned.
Returns:
Cleaned string.
'''
try:
text = self.decode(text)
rep = {'<': '<',
'>': '>',
'"': '"',
'’': '\'',
'´': '\'',
}
for s, r in rep.items():
text = text.replace(s, r)
# this has to be last:
text = text.replace("&", "&")
#we don't want to fiddle with non-string types
except TypeError: pass
return text
def unescape_dict(self, d):
'''
Calls :meth:`unescape` on all values in a dictionary.
Args:
d (dict): A dictionary containing string values
Returns:
A dictionary with HTML entities removed from the values.
'''
out = {}
for key, value in d.items(): out[key] = self.unescape(value)
return out
def save_data(self, filename, data):
'''
Saves the data structure using pickle. If the addon data path does
not exist it will be automatically created. This save function has
the same restrictions as the pickle module.
Args:
filename (string): name of the file you want to save data to. This
file will be saved in your addon's profile directory.
data (data object/string): you want to save.
Returns:
True on success
False on failure
'''
profile_path = self.get_profile()
try: os.makedirs(profile_path)
except: pass
save_path = os.path.join(profile_path, filename)
try: pickle.dump(data, open(save_path, 'wb')); return True
except pickle.PickleError: return False
def load_data(self,filename):
'''
Load the data that was saved with save_data() and returns the
data structure.
Args:
filename (string): Name of the file you want to load data from. This
file will be loaded from your addons profile directory.
Returns:
Data stucture on success
False on failure
'''
profile_path = self.get_profile()
load_path = os.path.join(profile_path, filename)
print profile_path
if not os.path.isfile(load_path): self.log_debug('%s does not exist' % load_path); return False
try: data = pickle.load(open(load_path))
except: return False
return data
|
ccpgames/eve-metrics
|
refs/heads/master
|
web2py/applications/admin/controllers/appadmin.py
|
1
|
# -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
try:
import pygraphviz as pgv
except ImportError:
pgv = None
response.subtitle = 'Database Administration (appadmin)'
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.is_https:
session.secure()
#!!! CCP EXTENSION
# elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"):
# raise HTTP(200, T('appadmin is disabled because insecure channel'))
if request.function in ('auth_manage','manage') and 'auth' in globals():
auth.requires_membership(auth.settings.manager_group_role)(lambda: None)()
menu = False
elif (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
else:
menu = True
ignore_rw = True
response.view = 'appadmin.html'
if menu:
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
if False and request.tickets_db:
from gluon.restricted import TicketStorage
ts = TicketStorage()
ts._get_table(request.tickets_db, ts.tablename, request.application)
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
stop = start + 100
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value=T('submit')))),
_action=URL(r=request, args=request.args))
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query).count()
if form.vars.update_check and form.vars.update_fields:
db(query).update(**eval_in_global_env('dict(%s)'
% form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query).count()
if orderby:
rows = db(query, ignore_common_filters=True).select(limitby=(
start, stop), orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
limitby=(start, stop))
except Exception, e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(str(T('or import from csv file')) + " ",
INPUT(_type='file', _name='csvfile'),
INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import')))
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb,
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
db[table]._common_filter = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]]).select().first()
else:
record = db(db[table].id == request.args(
2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
clear_ram = False
clear_disk = False
session.flash = ""
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
for key, value in cache.ram.storage.iteritems():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
folder = os.path.join(request.folder,'cache')
if not os.path.exists(folder):
os.mkdir(folder)
locker = open(os.path.join(folder, 'cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(folder, 'cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['entries'] = ram['entries'] + disk['entries']
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
total['keys'] = ram['keys'] + disk['keys']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
def table_template(table):
from gluon.html import TR, TD, TABLE, TAG
def FONT(*args, **kwargs):
return TAG.font(*args, **kwargs)
def types(field):
f_type = field.type
if not isinstance(f_type,str):
return ' '
elif f_type == 'string':
return field.length
elif f_type == 'id':
return B('pk')
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
return B('fk')
else:
return ' '
# This is horribe HTML but the only one graphiz understands
rows = []
cellpadding = 4
color = "#000000"
bgcolor = "#FFFFFF"
face = "Helvetica"
face_bold = "Helvetica Bold"
border = 0
rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor),
_colspan=3, _cellpadding=cellpadding,
_align="center", _bgcolor=color)))
for row in db[table]:
rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(row.type, _color=color, _face=face),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(types(row), _color=color, _face=face),
_align="center", _cellpadding=cellpadding,
_border=border)))
return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1,
_cellborder=0, _cellspacing=0)
).xml()
def bg_graph_model():
graph = pgv.AGraph(layout='dot', directed=True, strict=False, rankdir='LR')
subgraphs = dict()
for tablename in db.tables:
if hasattr(db[tablename],'_meta_graphmodel'):
meta_graphmodel = db[tablename]._meta_graphmodel
else:
meta_graphmodel = dict(group='Undefined', color='#ECECEC')
group = meta_graphmodel['group'].replace(' ', '')
if not subgraphs.has_key(group):
subgraphs[group] = dict(meta=meta_graphmodel, tables=[])
subgraphs[group]['tables'].append(tablename)
else:
subgraphs[group]['tables'].append(tablename)
graph.add_node(tablename, name=tablename, shape='plaintext',
label=table_template(tablename))
for n, key in enumerate(subgraphs.iterkeys()):
graph.subgraph(nbunch=subgraphs[key]['tables'],
name='cluster%d' % n,
style='filled',
color=subgraphs[key]['meta']['color'],
label=subgraphs[key]['meta']['group'])
for tablename in db.tables:
for field in db[tablename]:
f_type = field.type
if isinstance(f_type,str) and (
f_type.startswith('reference') or
f_type.startswith('list:reference')):
referenced_table = f_type.split()[1].split('.')[0]
n1 = graph.get_node(tablename)
n2 = graph.get_node(referenced_table)
graph.add_edge(n1, n2, color="#4C4C4C", label='')
graph.layout()
#return graph.draw(format='png', prog='dot')
if not request.args:
return graph.draw(format='png', prog='dot')
else:
response.headers['Content-Disposition']='attachment;filename=graph.%s'%request.args(0)
if request.args(0) == 'dot':
return graph.string()
else:
return graph.draw(format=request.args(0), prog='dot')
def graph_model():
return dict(databases=databases, pgv=pgv)
def auth_manage():
tablename = request.args(0)
if not tablename or not tablename in auth.db.tables:
return dict()
table = auth.db[tablename]
formname = '%s_grid' % tablename
if tablename == auth.settings.table_user_name:
auth.settings.table_user._plural = T('Users')
auth.settings.table_membership._plural = T('Roles')
auth.settings.table_membership._id.readable = False
auth.settings.table_membership.user_id.label = T('User')
auth.settings.table_membership.group_id.label = T('Role')
grid = SQLFORM.smartgrid(table, args=request.args[:1], user_signature=True,
linked_tables=[auth.settings.table_membership_name],
maxtextlength=1000, formname=formname)
else:
table._id.readable = False
auth.settings.table_permission.group_id.label = T('Role')
auth.settings.table_permission.name.label = T('Permission')
orderby = 'role' if table == auth.settings.table_group_name else 'group_id'
grid = SQLFORM.grid(table, args=request.args[:1], orderby=table[orderby],
user_signature=True, maxtextlength=1000, formname=formname)
return grid if request.extension=='load' else dict(grid=grid)
def manage():
tablename = request.args(0)
if tablename in auth.db.tables:
grid = SQLFORM.smartgrid(auth.db[tablename], args=request.args[:1])
else:
return dict()
return grid if request.extension=='load' else dict(grid=grid)
|
dudepare/django
|
refs/heads/master
|
tests/resolve_url/urls.py
|
357
|
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^accounts/logout/$', views.logout, name='logout'),
]
|
HorusCMF/Shop
|
refs/heads/master
|
py/botIRC.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import irclib
help(irclib.ServerConnection)
|
stackArmor/security_monkey
|
refs/heads/develop
|
security_monkey/common/jinja.py
|
2
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_onkey.common.jinja
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
import os.path
import jinja2
templates = "templates"
def get_jinja_env():
"""
Returns a Jinja environment with a FileSystemLoader for our templates
"""
templates_directory = os.path.abspath(os.path.join(__file__, '..', '..', templates))
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_directory)) # nosec
# templates are HTML escaped elsewhere
#jinja_environment.filters['dateformat'] = dateformat
return jinja_environment
|
fpohtmeh/loki
|
refs/heads/master
|
plugins/currency/__init__.py
|
1
|
import urllib.request
from bs4 import BeautifulSoup
from algo.speech_synthesizer import SpeechSynthesizer
from devices.console import Console
from devices.speakers import Speakers
from plugins.plugin import Plugin
class Currency(Plugin):
def __init__(self, parent=None):
super().__init__(parent)
self.load_settings(__file__)
self.add_children(Console())
SpeechSynthesizer(self).add_children(Speakers())
def match(self, data):
return Plugin._re_match(self.lang.re, data)
def update(self, data):
# load data by URL
url = 'https://goverla.ua/'
with urllib.request.urlopen(url) as response:
html = response.read().decode('utf-8')
# parse data
parsed_html = BeautifulSoup(html, 'lxml')
buy = None
sell = None
for buy in parsed_html.body.select_one('div#usd > div.bid'):
buy = Currency._format_number(int(buy))
for sell in parsed_html.body.select_one('div#usd > div.ask'):
sell = Currency._format_number(int(sell))
if buy and sell:
self._output = self.lang.output.format(buy, sell)
else:
self._output = self.lang.error
@staticmethod
def _format_number(value):
s = str(value)
if len(s) > 2:
s = s[:2] + ' ' + s[2:]
return s
|
heri/openaccess
|
refs/heads/master
|
mitro-core/tools/nuke_identity.py
|
23
|
#!/usr/bin/env python
'''Deletes an identity and all related information.'''
import random
import sys
import time
import psycopg2
def group_by_column0(rows):
output = {}
for row in rows:
l = output.get(row[0], [])
l.append(row)
output[row[0]] = l
return output
_AUDIT_ACTION_DELETE = 'DELETE_IDENTITY'
_EXPECTED_PROCESSED_AUDIT_COLUMNS = set((
'id',
'actor',
'actor_name',
'affected_user',
'affected_user_name',
'affected_secret',
'affected_group',
'action',
'timestamp_ms',
'transaction_id',
'source_ip',
'device_id',
))
def is_processed_audit_unchanged(connection):
cursor = connection.cursor()
cursor.execute('SELECT * FROM processedaudit LIMIT 0')
actual_columns = set()
for column in cursor.description:
actual_columns.add(column.name)
cursor.close()
connection.rollback()
return actual_columns == _EXPECTED_PROCESSED_AUDIT_COLUMNS
def confirm_prompt_or_exit(description):
'''Calls sys.exit if we don't confirm that we want to nuke.'''
# add a random char to force the user to read:
nuke_string = 'nuke %d' % random.randint(0, 9)
print
print 'Type "%s" to delete %s:' % (
nuke_string, description)
m = raw_input()
if m != nuke_string:
sys.stderr.write("Not deleting: input '%s' != '%s'\n" % (m, nuke_string))
sys.exit(1)
return True
def connect_to_mitro_db():
connection = psycopg2.connect('dbname=mitro')
# Set serializable isolation
cursor = connection.cursor()
cursor.execute('set transaction isolation level serializable')
return connection
def nuke_identity(identity_name):
connection = connect_to_mitro_db()
cursor = connection.cursor()
# Check that processed_audit table is unchanged
if not is_processed_audit_unchanged(connection):
sys.stderr.write('Error: processedaudit table has changed!\n')
return 1
cursor.execute('SELECT id from identity where name = %s', (identity_name,))
results = cursor.fetchall()
if len(results) == 0:
sys.stderr.write('Error: identity %s not found\n' % identity_name)
return 1
identity_id = results[0][0]
cursor.execute('SELECT email from username where identity = %s', (identity_id,))
results = cursor.fetchall()
if len(results) > 1:
print 'Aliases:'
for row in results:
email = row[0]
print ' ', email
print
print 'ACLs for identity %s (id %d):' % (identity_name, identity_id)
# Locate all acls for the user
groups = {}
delete_acls = set()
cursor.execute('SELECT acl.id,group_id,groups.name,level FROM acl,groups WHERE member_identity = %s AND groups.id = group_id', (identity_id,))
for acl_id, group_id, group_name, level in cursor:
print ' group "%s" (%d): %s' % (group_name, group_id, level)
delete_acls.add(acl_id)
groups[group_id] = group_name
print
# Find groups that must be deleted (this user is the only member)
delete_groups = set()
cursor.execute('SELECT group_id, groups.type, count(*) FROM acl, groups WHERE group_id = ANY(%s) AND groups.id = group_id GROUP BY group_id, groups.type',
(groups.keys(),))
for group_id, group_type, count in cursor:
if count == 1:
if group_type != 'PRIVATE' and group_type is not None:
raise Exception('ERROR: Only admin for an organization? Cannot delete!')
delete_groups.add(group_id)
print 'Groups and secrets that will be deleted:'
print
# Verify that none of our "to delete" groups are parent groups in an ACL
# This should be caught by the group_type check above
cursor.execute('SELECT count(*) FROM acl WHERE group_identity = ANY(%s)', (list(delete_groups),))
count = cursor.next()[0]
assert count == 0
delete_group_secrets = set()
maybe_delete_secrets = set()
not_printed_groups = list(delete_groups)
cursor.execute('SELECT group_id, group_secret.id, "serverVisibleSecret_id", hostname FROM ' +
'group_secret,secrets WHERE group_id = ANY(%s) AND secrets.id="serverVisibleSecret_id"', (list(delete_groups),))
for group_id, rows in group_by_column0(cursor).iteritems():
not_printed_groups.remove(group_id)
print 'Group "%s" (id %d):' % (groups[group_id], group_id)
for group_id, group_secret_id, svs_id, hostname in rows:
delete_group_secrets.add(group_secret_id)
maybe_delete_secrets.add(svs_id)
print ' %s (secret %d; group_secret %d)' % (hostname, svs_id, group_secret_id)
for group_id in not_printed_groups:
print 'Group "%s" (id %d): (no secrets)' % (groups[group_id], group_id)
# find secrets shared outside groups we are going to delete; do not delete these
# if len() > 0 required because IN of empty tuple is an error
delete_secrets = set()
if len(maybe_delete_secrets) > 0 and len(delete_groups) > 0:
cursor.execute('SELECT DISTINCT "serverVisibleSecret_id" FROM group_secret ' +
'WHERE "serverVisibleSecret_id" IN %s AND "group_id" NOT IN %s',
(tuple(maybe_delete_secrets), tuple(delete_groups)))
shared_secrets = set()
for row in cursor:
secret_id = row[0]
shared_secrets.add(secret_id)
delete_secrets = maybe_delete_secrets - shared_secrets
prompt_description = 'identity %s; %d groups; %d secrets' % (
identity_name, len(delete_groups), len(delete_secrets))
confirm_prompt_or_exit(prompt_description)
cursor.execute('DELETE FROM secrets WHERE id = ANY(%s)', (list(delete_secrets),))
print 'Deleted %d secrets' % cursor.rowcount
assert cursor.rowcount == len(delete_secrets)
# Remove dangling king references on shared secrets TODO: Set to another user?
cursor.execute('UPDATE secrets SET king = NULL WHERE king = %s', (identity_id,))
print 'Removed king from %d secrets' % cursor.rowcount
cursor.execute('DELETE FROM group_secret WHERE id = ANY(%s)', (list(delete_group_secrets),))
print 'Deleted %d group_secrets' % cursor.rowcount
assert cursor.rowcount == len(delete_group_secrets)
cursor.execute('DELETE FROM acl WHERE id = ANY(%s)', (list(delete_acls),))
print 'Deleted %d acls' % cursor.rowcount
assert cursor.rowcount == len(delete_acls)
cursor.execute('SELECT COUNT(*) FROM acl WHERE member_identity = %s', (identity_id,))
results = cursor.fetchall()
assert results[0][0] == 0
cursor.execute('DELETE FROM groups WHERE id = ANY(%s)', (list(delete_groups),))
print 'Deleted %d groups' % cursor.rowcount
assert cursor.rowcount == len(delete_groups)
cursor.execute('DELETE FROM device_specific WHERE "user" = %s', (identity_id,))
print 'Deleted %d device_specific' % cursor.rowcount
cursor.execute('DELETE FROM username WHERE identity = %s', (identity_id,))
print 'Deleted %d aliases' % cursor.rowcount
cursor.execute('DELETE FROM identity WHERE id = %s', (identity_id,))
print 'Deleted %d identity' % cursor.rowcount
assert cursor.rowcount == 1
now_ms = long(time.time() * 1000 + 0.5)
cursor.execute('INSERT INTO processedaudit (actor, actor_name, action, timestamp_ms) VALUES ' +
'(%s, %s, %s, %s)', (identity_id, identity_name, _AUDIT_ACTION_DELETE, now_ms))
cursor.close()
connection.commit()
print 'Committed'
connection.close()
def main():
if len(sys.argv) != 2:
sys.stderr.write('nuke_identity.py (identity name)\n')
sys.exit(1)
identity_name = sys.argv[1]
code = nuke_identity(identity_name)
if code is not None:
sys.exit(code)
if __name__ == '__main__':
main()
|
igemsoftware/SYSU-Software2013
|
refs/heads/master
|
project/Python27_32/Lib/test/test_pydoc.py
|
13
|
import sys
import os
import os.path
import difflib
import subprocess
import re
import pydoc
import inspect
import keyword
import unittest
import xml.etree
import test.test_support
from contextlib import contextmanager
from collections import namedtuple
from test.test_support import (
TESTFN, forget, rmtree, EnvironmentVarGuard, reap_children, captured_stdout)
from test import pydoc_mod
expected_text_pattern = \
"""
NAME
test.pydoc_mod - This is a test module for test_pydoc
FILE
%s
%s
CLASSES
__builtin__.object
B
A
\x20\x20\x20\x20
class A
| Hello and goodbye
|\x20\x20
| Methods defined here:
|\x20\x20
| __init__()
| Wow, I have no function!
\x20\x20\x20\x20
class B(__builtin__.object)
| Data descriptors defined here:
|\x20\x20
| __dict__
| dictionary for instance variables (if defined)
|\x20\x20
| __weakref__
| list of weak references to the object (if defined)
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|\x20\x20
| NO_MEANING = 'eggs'
FUNCTIONS
doc_func()
This function solves all of the world's problems:
hunger
lack of Python
war
\x20\x20\x20\x20
nodoc_func()
DATA
__author__ = 'Benjamin Peterson'
__credits__ = 'Nobody'
__version__ = '1.2.3.4'
VERSION
1.2.3.4
AUTHOR
Benjamin Peterson
CREDITS
Nobody
""".strip()
expected_html_pattern = \
"""
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="#7799ee">
<td valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"> <br><big><big><strong><a href="test.html"><font color="#ffffff">test</font></a>.pydoc_mod</strong></big></big> (version 1.2.3.4)</font></td
><td align=right valign=bottom
><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="file:%s">%s</a>%s</font></td></tr></table>
<p><tt>This is a test module for test_pydoc</tt></p>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ee77aa">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ee77aa"><tt> </tt></td><td> </td>
<td width="100%%"><dl>
<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#B">B</a>
</font></dt></dl>
</dd>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#A">A</a>
</font></dt></dl>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="A">class <strong>A</strong></a></font></td></tr>
\x20\x20\x20\x20
<tr bgcolor="#ffc8d8"><td rowspan=2><tt> </tt></td>
<td colspan=2><tt>Hello and goodbye<br> </tt></td></tr>
<tr><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="A-__init__"><strong>__init__</strong></a>()</dt><dd><tt>Wow, I have no function!</tt></dd></dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="B">class <strong>B</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary for instance variables (if defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list of weak references to the object (if defined)</tt></dd>
</dl>
<hr>
Data and other attributes defined here:<br>
<dl><dt><strong>NO_MEANING</strong> = 'eggs'</dl>
</td></tr></table></td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#eeaa77"><tt> </tt></td><td> </td>
<td width="100%%"><dl><dt><a name="-doc_func"><strong>doc_func</strong></a>()</dt><dd><tt>This function solves all of the world's problems:<br>
hunger<br>
lack of Python<br>
war</tt></dd></dl>
<dl><dt><a name="-nodoc_func"><strong>nodoc_func</strong></a>()</dt></dl>
</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#55aa55">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#55aa55"><tt> </tt></td><td> </td>
<td width="100%%"><strong>__author__</strong> = 'Benjamin Peterson'<br>
<strong>__credits__</strong> = 'Nobody'<br>
<strong>__version__</strong> = '1.2.3.4'</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Author</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Benjamin Peterson</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Credits</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Nobody</td></tr></table>
""".strip()
# output pattern for missing module
missing_pattern = "no Python documentation found for '%s'"
# output pattern for module with bad imports
badimport_pattern = "problem in %s - <type 'exceptions.ImportError'>: No module named %s"
def run_pydoc(module_name, *args):
"""
Runs pydoc on the specified module. Returns the stripped
output of pydoc.
"""
cmd = [sys.executable, pydoc.__file__, " ".join(args), module_name]
try:
output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
return output.strip()
finally:
reap_children()
def get_pydoc_html(module):
"Returns pydoc generated output as html"
doc = pydoc.HTMLDoc()
output = doc.docmodule(module)
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "<br><a href=\"" + loc + "\">Module Docs</a>"
return output.strip(), loc
def get_pydoc_text(module):
"Returns pydoc generated output as text"
doc = pydoc.TextDoc()
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "\nMODULE DOCS\n " + loc + "\n"
output = doc.docmodule(module)
# cleanup the extra text formatting that pydoc preforms
patt = re.compile('\b.')
output = patt.sub('', output)
return output.strip(), loc
def print_diffs(text1, text2):
"Prints unified diffs for two texts"
lines1 = text1.splitlines(True)
lines2 = text2.splitlines(True)
diffs = difflib.unified_diff(lines1, lines2, n=0, fromfile='expected',
tofile='got')
print '\n' + ''.join(diffs)
class PyDocDocTest(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_html_doc(self):
result, doc_loc = get_pydoc_html(pydoc_mod)
mod_file = inspect.getabsfile(pydoc_mod)
if sys.platform == 'win32':
import nturl2path
mod_url = nturl2path.pathname2url(mod_file)
else:
mod_url = mod_file
expected_html = expected_html_pattern % (mod_url, mod_file, doc_loc)
if result != expected_html:
print_diffs(expected_html, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_text_doc(self):
result, doc_loc = get_pydoc_text(pydoc_mod)
expected_text = expected_text_pattern % \
(inspect.getabsfile(pydoc_mod), doc_loc)
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
def test_issue8225(self):
# Test issue8225 to ensure no doc link appears for xml.etree
result, doc_loc = get_pydoc_text(xml.etree)
self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
def test_not_here(self):
missing_module = "test.i_am_not_here"
result = run_pydoc(missing_module)
expected = missing_pattern % missing_module
self.assertEqual(expected, result,
"documentation for missing module found")
def test_badimport(self):
# This tests the fix for issue 5230, where if pydoc found the module
# but the module had an internal import error pydoc would report no doc
# found.
modname = 'testmod_xyzzy'
testpairs = (
('i_am_not_here', 'i_am_not_here'),
('test.i_am_not_here_either', 'i_am_not_here_either'),
('test.i_am_not_here.neither_am_i', 'i_am_not_here.neither_am_i'),
('i_am_not_here.{}'.format(modname), 'i_am_not_here.{}'.format(modname)),
('test.{}'.format(modname), modname),
)
@contextmanager
def newdirinpath(dir):
os.mkdir(dir)
sys.path.insert(0, dir)
yield
sys.path.pop(0)
rmtree(dir)
with newdirinpath(TESTFN), EnvironmentVarGuard() as env:
env['PYTHONPATH'] = TESTFN
fullmodname = os.path.join(TESTFN, modname)
sourcefn = fullmodname + os.extsep + "py"
for importstring, expectedinmsg in testpairs:
f = open(sourcefn, 'w')
f.write("import {}\n".format(importstring))
f.close()
try:
result = run_pydoc(modname)
finally:
forget(modname)
expected = badimport_pattern % (modname, expectedinmsg)
self.assertEqual(expected, result)
def test_input_strip(self):
missing_module = " test.i_am_not_here "
result = run_pydoc(missing_module)
expected = missing_pattern % missing_module.strip()
self.assertEqual(expected, result,
"white space was not stripped from module name "
"or other error output mismatch")
def test_stripid(self):
# test with strings, other implementations might have different repr()
stripid = pydoc.stripid
# strip the id
self.assertEqual(stripid('<function stripid at 0x88dcee4>'),
'<function stripid>')
self.assertEqual(stripid('<function stripid at 0x01F65390>'),
'<function stripid>')
# nothing to strip, return the same text
self.assertEqual(stripid('42'), '42')
self.assertEqual(stripid("<type 'exceptions.Exception'>"),
"<type 'exceptions.Exception'>")
class TestDescriptions(unittest.TestCase):
def test_module(self):
# Check that pydocfodder module can be described
from test import pydocfodder
doc = pydoc.render_doc(pydocfodder)
self.assertIn("pydocfodder", doc)
def test_classic_class(self):
class C: "Classic class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'instance of C')
expected = 'instance of C in module %s' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_class(self):
class C(object): "New-style class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'C')
expected = 'C in module %s object' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_namedtuple_public_underscore(self):
NT = namedtuple('NT', ['abc', 'def'], rename=True)
with captured_stdout() as help_io:
help(NT)
helptext = help_io.getvalue()
self.assertIn('_1', helptext)
self.assertIn('_replace', helptext)
self.assertIn('_asdict', helptext)
class TestHelper(unittest.TestCase):
def test_keywords(self):
self.assertEqual(sorted(pydoc.Helper.keywords),
sorted(keyword.kwlist))
def test_main():
test.test_support.run_unittest(PyDocDocTest,
TestDescriptions,
TestHelper)
if __name__ == "__main__":
test_main()
|
simberaj/interactions
|
refs/heads/master
|
modeling.py
|
1
|
# Modeling library - contains functions
import math, operator, common
from numpy import array
try:
from scipy.optimize import fsolve, fmin
except ImportError:
raise ImportError, 'modeling tools require an installed SCIPY package'
## INTERACTION CLASS - used in optimization - abstract superclass
class OptimizableInteraction(object):
def __init__(self, strength, distance):
if not (strength and float(strength)):
raise ValueError, 'invalid interaction strength: %s' % strength
self.strength = float(strength)
self.distance = float(distance)
def residual(self, b, g):
return (self.strength - self.theoretical(b, g)) ** 2
def fraction(self, b):
return NotImplemented
def theoretical(self, b, g):
return NotImplemented
def real(self):
return self.strength
@property
def logdist(self):
return math.log(self.distance)
class GravityInteraction(OptimizableInteraction):
def __init__(self, strength, distance, massFrom, massTo):
OptimizableInteraction.__init__(self, strength, distance)
self.massFrom = float(massFrom)
self.massTo = float(massTo)
def yLogChar(self): # used for initial logarithmic approximation
return math.log((self.strength / self.massFrom) / self.massTo)
def theoretical(self, b, g):
return g * self.massFrom * self.massTo * (self.distance ** (-b))
def fraction(self, b):
return self.massFrom * self.massTo * (self.distance ** (-b))
class GaussianInteraction(OptimizableInteraction):
def theoretical(self, b, g):
return g * math.exp(-self.distance ** 2 / b)
def fraction(self, b):
return math.exp(-self.distance ** 2 / b)
def yLogChar(self):
return math.log((self.strength / self.massFrom) / self.massTo)
## OPTIMIZATION CLASS - USED TO DETERMINE THE MODEL PARAMETERS
class Optimizer(object):
TOLERANCE = 1e-8
def __init__(self, interactions):
self.interactions = interactions
self.b = None
self.g = None
# returns a residual sum of square differences between real and modelled interactions
def decOLS(self, inputs):
sum = 0
for inter in self.interactions:
sum += inter.residual(*inputs)
return sum
def theoreticalInteractions(self):
return [inter.theoretical(self.b, self.g) for inter in self.interactions]
def realInteractions(self):
return [inter.real() for inter in self.interactions]
def residuals(self, theoretical, real):
return [theoretical[i] - real[i] for i in range(len(theoretical))]
def optimizeOLS(self):
return fmin(self.decOLS, array(self.approx()))
def getB(self):
return self.b
def getG(self):
return self.g
def report(self, theoretical=None):
if theoretical is None:
theoretical = self.theoreticalInteractions()
theorAvg = sum(theoretical) / float(len(theoretical))
real = self.realInteractions()
residuals = self.residuals(theoretical, real)
return 'REAL INTERACTIONS\n%s\nTHEORETICAL INTERACTIONS\n%s\nRESIDUALS\n%s\n' % (
self.statReport(real), self.statReport(theoretical), self.statReport(residuals))
def statReport(self, numbers):
mean = sum(numbers) / float(len(numbers))
stdev = (sum([res**2 for res in numbers]) / float(len(numbers)))**0.5
varcoef = (stdev / mean if mean != 0 else 0)
return '''Mean: %g
Min: %g
Max: %g
Standard deviation: %g
Variation coefficient: %g''' % (mean, min(numbers), max(numbers), stdev, varcoef)
@staticmethod
def writeReport(text, fname):
report(text, fname)
class GravityOptimizer(Optimizer):
# Fits a gravity model in form
# g * m1 * m2 * d^(-b)
# where b is the distance decay parameter, g is the scaling factor,
# m1, m2 masses of the interacting cities and d their distance
# vraci optimalizacni charakteristiku maximalni verohodnosti pro urceni modelovacich parametru
def decMLE(self, b):
inters, logbords, bords, loginters = 0, 0, 0, 0
for inter in self.interactions:
inters += inter.strength
bord = inter.fraction(b)
logbords += (bord * inter.logdist)
bords += bord
loginters += (inter.strength * inter.logdist)
return (inters * logbords / bords) - loginters
# vraci logaritmickou aproximaci jako prvni vstupni odhad do optimalizace
def approx(self):
xhelps = [inter.logdist for inter in self.interactions]
yhelps = [inter.yLogChar() for inter in self.interactions]
yavg = sum(yhelps) / len(yhelps)
xavg = sum(xhelps) / len(xhelps)
btops = 0
bbottoms = 0
for i in range(len(xhelps)):
btops += (xhelps[i] - xavg) * (yhelps[i] - yavg)
bbottoms += (xhelps[i] - xavg) ** 2
b = -(btops / bbottoms)
return [b, math.exp(yavg + b * xavg)]
def countG(self, b):
strsum = 0
fracsum = 0
for inter in self.interactions:
strsum += inter.strength
fracsum += inter.fraction(b)
return strsum / fracsum
def optimize(self, type='MLE'):
if type == 'OLS':
res = self.optimizeOLS()
self.b = res[0]
self.g = res[1]
else:
self.b = self.optimizeMLE()
self.g = self.countG(self.b)
def optimizeMLE(self):
return float(fsolve(self.decMLE, self.approx()[0], xtol=self.TOLERANCE))
class GaussianOptimizer(Optimizer):
# Fits a Gaussian curve to the interactions in form
# f = g * e ^ (-d^2 / b)
# where b is the bandwidth and g is the scaling factor.
# creates group interactions from raw interactions by grouping into quantiles and assigning their sum
# primarily for calculating distance decay
@classmethod
def fromData(cls, data, qnum=20):
# expects data to contain 2-tuples of (strength, length)
count = len(data)
# compute number of quantiles
maxQ = len(data) / 10
qnum = qnum if qnum < maxQ else maxQ
# sort the data
data = sorted(data, key=operator.itemgetter('strength'))
# compute quantile strength sums and their mean length as new interactions
interactions = []
fromBreak = 0
for i in range(1, qnum):
toBreak = int(round(i * count / qnum))
qsum = sum([item['strength'] for item in data[fromBreak:toBreak]])
qmid = sum([item['length'] for item in data[fromBreak:toBreak]]) / (toBreak - fromBreak)
interactions.append(GaussianInteraction(qsum, qmid))
fromBreak = toBreak
return cls(interactions)
# initial approximation of the curve by solving the parameter values analytically from two
# values of the curve
def approx(self):
# two approximate points to fit the gaussian curve
inter1 = self.interactions[len(self.interactions) // 4]
inter2 = self.interactions[len(self.interactions) // 2]
# logarithmic approximation of bandwidth
b = (inter1.distance ** 2 - inter2.distance ** 2) / (math.log(inter2.strength) - math.log(inter1.strength))
return [b, inter1.strength / inter1.fraction(b)]
def optimize(self):
self.b, self.g = self.optimizeOLS()
def decay(self, strength, distance, divc=1):
return strength * math.exp(-(distance ** 2) / (self.b * divc))
def report(text, fname):
common.progress('saving report')
try:
with open(fname, 'w') as outfile:
outfile.write(text.encode('utf8'))
except (IOError, OSError, UnicodeEncodeError):
common.warning('Report output failed.')
|
jesseditson/rethinkdb
|
refs/heads/next
|
test/interface/shard_balancing.py
|
4
|
#!/usr/bin/env python
# Copyright 2014 RethinkDB, all rights reserved.
"""The `interface.shard_balancing` test checks that RethinkDB generates balanced shards in a variety of scenarios."""
from __future__ import print_function
import pprint, os, sys, time
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()
print("Spinning up two servers (%.2fs)" % (time.time() - startTime))
with driver.Cluster(initial_servers=['a', 'b'], output_folder='.', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as cluster:
cluster.check()
print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime))
server = cluster[0]
conn = r.connect(host=server.host, port=server.driver_port)
print("Creating db if necessary (%.2fs)" % (time.time() - startTime))
if not dbName in r.db_list().run(conn):
r.db_create(dbName).run(conn)
print("Testing pre-sharding with UUID primary keys (%.2fs)" % (time.time() - startTime))
res = r.db(dbName).table_create("uuid_pkey").run(conn)
assert res["tables_created"] == 1
r.db(dbName).table("uuid_pkey").reconfigure(shards=10, replicas=1).run(conn)
r.db(dbName).table("uuid_pkey").wait().run(conn)
res = r.db(dbName).table("uuid_pkey").insert([{}]*1000).run(conn)
assert res["inserted"] == 1000 and res["errors"] == 0
res = r.db(dbName).table("uuid_pkey").info().run(conn)["doc_count_estimates"]
pprint.pprint(res)
for num in res:
assert 50 < num < 200
print("Testing down-sharding existing balanced shards (%.2fs)" % (time.time() - startTime))
r.db(dbName).table("uuid_pkey").reconfigure(shards=2, replicas=1).run(conn)
r.db(dbName).table("uuid_pkey").wait().run(conn)
res = r.db(dbName).table("uuid_pkey").info().run(conn)["doc_count_estimates"]
pprint.pprint(res)
for num in res:
assert 250 < num < 750
print("Testing sharding of existing inserted data (%.2fs)" % (time.time() - startTime))
res = r.db(dbName).table_create("numeric_pkey").run(conn)
assert res["tables_created"] == 1
res = r.db(dbName).table("numeric_pkey").insert([{"id": n} for n in xrange(1000)]).run(conn)
assert res["inserted"] == 1000 and res["errors"] == 0
r.db(dbName).table("numeric_pkey").reconfigure(shards=10, replicas=1).run(conn)
r.db(dbName).table("numeric_pkey").wait().run(conn)
res = r.db(dbName).table("numeric_pkey").info().run(conn)["doc_count_estimates"]
pprint.pprint(res)
for num in res:
assert 50 < num < 200
print("Creating an unbalanced table (%.2fs)" % (time.time() - startTime))
res = r.db(dbName).table_create("unbalanced").run(conn)
assert res["tables_created"] == 1
r.db(dbName).table("unbalanced").reconfigure(shards=2, replicas=1).run(conn)
r.db(dbName).table("unbalanced").wait().run(conn)
res = r.db(dbName).table("unbalanced").insert([{"id": n} for n in xrange(1000)]).run(conn)
assert res["inserted"] == 1000 and res["errors"] == 0
res = r.db(dbName).table("unbalanced").info().run(conn)["doc_count_estimates"]
pprint.pprint(res)
assert res[0] > 500
assert res[1] < 100
# RSI(reql_admin): Once #2896 is implemented, make sure the server has an issue now
print("Fixing the unbalanced table (%.2fs)" % (time.time() - startTime))
status_before = r.db(dbName).table("unbalanced").status().run(conn)
res = r.db(dbName).table("unbalanced").rebalance().run(conn)
assert res["rebalanced"] == 1
assert len(res["status_changes"]) == 1
assert res["status_changes"][0]["old_val"] == status_before
assert res["status_changes"][0]["new_val"]["status"]["all_replicas_ready"] == False
r.db(dbName).table("unbalanced").wait().run(conn)
res = r.db(dbName).table("unbalanced").info().run(conn)["doc_count_estimates"]
pprint.pprint(res)
for num in res:
assert 250 < num < 750
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
|
giocalitri/django-guardian
|
refs/heads/devel
|
guardian/testapp/models.py
|
14
|
from __future__ import unicode_literals
from datetime import datetime
import django
from django.db import models
from django.contrib.admin.models import LogEntry
from guardian.mixins import GuardianUserMixin
from guardian.models import UserObjectPermissionBase
from guardian.models import GroupObjectPermissionBase
class DynamicAccessor(object):
def __init__(self):
pass
def __getattr__(self, key):
return DynamicAccessor()
class ProjectUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey('Project')
class ProjectGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey('Project')
class Project(models.Model):
name = models.CharField(max_length=128, unique=True)
created_at = models.DateTimeField(default=datetime.now)
class Meta:
get_latest_by = 'created_at'
def __unicode__(self):
return self.name
Project.not_a_relation_descriptor = DynamicAccessor()
class MixedGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey('Mixed')
class Mixed(models.Model):
"""
Model for tests obj perms checks with generic user object permissions model
and direct group object permissions model.
"""
name = models.CharField(max_length=128, unique=True)
def __unicode__(self):
return self.name
class LogEntryWithGroup(LogEntry):
group = models.ForeignKey('auth.Group', null=True, blank=True)
class NonIntPKModel(models.Model):
"""
Model for testing whether get_objects_for_user will work when the objects to
be returned have non-integer primary keys.
"""
char_pk = models.CharField(primary_key=True, max_length=128)
if django.VERSION > (1, 5):
from django.contrib.auth.models import AbstractUser
class CustomUser(AbstractUser, GuardianUserMixin):
custom_id = models.AutoField(primary_key=True)
|
benjaminjkraft/django
|
refs/heads/master
|
django/conf/locale/id/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i"
TIME_FORMAT = 'G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M.%S.%f', # '25-10-2009 14.30.59.000200'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M.%S.%f', # '25-10-09' 14.30.59.000200'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M.%S.%f', # '10/25/06 14.30.59.000200'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M.%S.%f', # '25/10/2009 14.30.59.000200'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
tangp3/gpdb
|
refs/heads/master
|
gpMgmt/bin/gppylib/test/behave/mgmt_utils/steps/logger.py
|
19
|
import os
import glob
from gppylib.test.behave_utils.utils import execute_sql_singleton
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
if master_data_dir is None:
raise Exception('Please set MASTER_DATA_DIRECTORY in environment')
def gp_fts_log_in_master_log_count(mdd):
return gp_in_master_log_count(mdd, 'FTS: probe result processing is complete')
def gp_in_master_log_count(mdd, pattern):
pg_log_glob = os.path.join(mdd, 'pg_log', 'gpdb-20??-??-*.csv')
files = glob.glob(pg_log_glob)
files.sort()
if not files:
raise Exception('pg_log not found with the following pattern on master: %s' % pg_log_glob)
fd = open(files[-1])
output = fd.read()
counter = 0
for line in output.splitlines():
if pattern in line:
counter = counter + 1
return counter
def gp_fts_log_in_segment_log_count():
COUNT_SQL = """
SELECT count(*) from gp_toolkit.__gp_log_segment_ext
WHERE logmessage like '%FTS: Probe Request%';
"""
result = execute_sql_singleton('template1', COUNT_SQL)
return result
QUERY_PLAN_SIZE_STRING = 'Query plan size to dispatch:'
@then(u'the count of query plan logs in pg_log is stored')
def impl(context):
context.master_plan_size_count = gp_in_master_log_count(master_data_dir, QUERY_PLAN_SIZE_STRING)
@then(u'the count of query plan logs is not changed')
def impl(context):
count = gp_in_master_log_count(master_data_dir, QUERY_PLAN_SIZE_STRING)
if count != context.master_plan_size_count:
raise Exception("'%s' is still being logged in pg_log when it should be off counts (%d, %d)" % (QUERY_PLAN_SIZE_STRING, count, context.master_plan_size_count))
print "IVAN query plan logs is unchanged: %d %d" % (count, context.master_plan_size_count)
@then(u'the count of query plan logs is increased')
def impl(context):
count = gp_in_master_log_count(master_data_dir, QUERY_PLAN_SIZE_STRING)
if count <= context.master_plan_size_count:
raise Exception("'%s' is not being logged in pg_log when it should be on counts (%d, %d)" % (QUERY_PLAN_SIZE_STRING, count, context.master_plan_size_count))
print "IVAN query plan logs is increased: %d %d" % (count, context.master_plan_size_count)
@then('the count of verbose logs in pg_log is stored')
def impl(context):
context.master_fts_log_count = gp_fts_log_in_master_log_count(master_data_dir)
context.segment_fts_log_count = gp_fts_log_in_segment_log_count()
@then('the count of verbose fts logs is not changed')
def impl(context):
master_fts_log_count = gp_fts_log_in_master_log_count(master_data_dir)
segment_fts_log_count = gp_fts_log_in_segment_log_count()
if master_fts_log_count != context.master_fts_log_count:
raise Exception("Number of FTS logs on master has changed when logging is turned off: orig count %d new count %d" % (context.master_fts_log_count, master_fts_log_count))
if segment_fts_log_count != context.segment_fts_log_count:
raise Exception("Number of FTS logs on segments has changed when logging is turned off: orig count %d new count %d" % (context.segment_fts_log_count, segment_fts_log_count))
context.master_fts_log_count = master_fts_log_count
context.segment_fts_log_count = segment_fts_log_count
@then('the count of verbose fts logs is increased on all segments')
def impl(context):
master_fts_log_count = gp_fts_log_in_master_log_count(master_data_dir)
segment_fts_log_count = gp_fts_log_in_segment_log_count()
if master_fts_log_count <= context.master_fts_log_count:
raise Exception("Number of FTS logs on master has not increased changed when logging is turned on: orig count %d new count %d" % (context.master_fts_log_count, master_fts_log_count))
if segment_fts_log_count <= context.segment_fts_log_count:
raise Exception("Number of FTS logs on segments has not increased when logging is turned on: orig count %d new count %d" % (context.segment_fts_log_count, segment_fts_log_count))
context.master_fts_log_count = master_fts_log_count
context.segment_fts_log_count = segment_fts_log_count
|
louisLouL/pair_trading
|
refs/heads/master
|
capstone_env/lib/python3.6/site-packages/setuptools/command/install_scripts.py
|
454
|
from distutils import log
import distutils.command.install_scripts as orig
import os
import sys
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
|
danicampora/micropython
|
refs/heads/master
|
tests/basics/setattr1.py
|
69
|
class A:
var = 132
def __init__(self):
self.var2 = 34
a = A()
setattr(a, "var", 123)
setattr(a, "var2", 56)
print(a.var)
print(a.var2)
try:
setattr(a, b'var3', 1)
except TypeError:
print('TypeError')
|
bregman-arie/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/_nxos_ip_interface.py
|
16
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ip_interface
version_added: "2.1"
deprecated:
removed_in: "2.9"
why: Replaced with common C(*_l3_interface) network modules.
alternative: Use M(nxos_l3_interface) instead.
short_description: Manages L3 attributes for IPv4 and IPv6 interfaces.
description:
- Manages Layer 3 attributes for IPv4 and IPv6 interfaces.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Interface must already be a L3 port when using this module.
- Logical interfaces (po, loop, svi) must be created first.
- C(mask) must be inserted in decimal format (i.e. 24) for
both IPv6 and IPv4.
- A single interface can have multiple IPv6 configured.
- C(tag) is not idempotent for IPv6 addresses and I2 system image.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1, vlan10.
required: true
addr:
description:
- IPv4 or IPv6 Address.
version:
description:
- Version of IP address. If the IP address is IPV4 version should be v4.
If the IP address is IPV6 version should be v6.
default: v4
choices: ['v4', 'v6']
mask:
description:
- Subnet mask for IPv4 or IPv6 Address in decimal format.
dot1q:
description:
- Configures IEEE 802.1Q VLAN encapsulation on the subinterface. The range is from 2 to 4093.
version_added: "2.5"
tag:
description:
- Route tag for IPv4 or IPv6 Address in integer format.
default: 0
version_added: "2.4"
allow_secondary:
description:
- Allow to configure IPv4 secondary addresses on interface.
type: bool
default: 'no'
version_added: "2.4"
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
requirements:
- "ipaddress"
'''
EXAMPLES = '''
- name: Ensure ipv4 address is configured on Ethernet1/32
nxos_ip_interface:
interface: Ethernet1/32
transport: nxapi
version: v4
state: present
addr: 20.20.20.20
mask: 24
- name: Ensure ipv6 address is configured on Ethernet1/31
nxos_ip_interface:
interface: Ethernet1/31
transport: cli
version: v6
state: present
addr: '2001::db8:800:200c:cccb'
mask: 64
- name: Ensure ipv4 address is configured with tag
nxos_ip_interface:
interface: Ethernet1/32
transport: nxapi
version: v4
state: present
tag: 100
addr: 20.20.20.20
mask: 24
- name: Ensure ipv4 address is configured on sub-intf with dot1q encapsulation
nxos_ip_interface:
interface: Ethernet1/32.10
transport: nxapi
version: v4
state: present
dot1q: 10
addr: 20.20.20.20
mask: 24
- name: Configure ipv4 address as secondary if needed
nxos_ip_interface:
interface: Ethernet1/32
transport: nxapi
version: v4
state: present
allow_secondary: true
addr: 21.21.21.21
mask: 24
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"addr": "20.20.20.20", "allow_secondary": true,
"interface": "Ethernet1/32", "mask": "24", "tag": 100}
existing:
description: k/v pairs of existing IP attributes on the interface
returned: always
type: dict
sample: {"addresses": [{"addr": "11.11.11.11", "mask": 17, "tag": 101, "secondary": false}],
"interface": "ethernet1/32", "prefixes": ["11.11.0.0/17"],
"type": "ethernet", "vrf": "default"}
end_state:
description: k/v pairs of IP attributes after module execution
returned: always
type: dict
sample: {"addresses": [{"addr": "11.11.11.11", "mask": 17, "tag": 101, "secondary": false},
{"addr": "20.20.20.20", "mask": 24, "tag": 100, "secondary": true}],
"interface": "ethernet1/32", "prefixes": ["11.11.0.0/17", "20.20.20.0/24"],
"type": "ethernet", "vrf": "default"}
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface ethernet1/32", "ip address 20.20.20.20/24 secondary tag 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
try:
import ipaddress
HAS_IPADDRESS = True
except ImportError:
HAS_IPADDRESS = False
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
def find_same_addr(existing, addr, mask, full=False, **kwargs):
for address in existing['addresses']:
if address['addr'] == addr and address['mask'] == mask:
if full:
if kwargs['version'] == 'v4' and int(address['tag']) == kwargs['tag']:
return address
elif kwargs['version'] == 'v6' and kwargs['tag'] == 0:
# Currently we don't get info about IPv6 address tag
# But let's not break idempotence for the default case
return address
else:
return address
return False
def execute_show_command(command, module):
cmd = {}
cmd['answer'] = None
cmd['command'] = command
cmd['output'] = 'text'
cmd['prompt'] = None
body = run_commands(module, [cmd])
return body
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)[0]
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except KeyError:
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0} switchport'.format(interface)
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
if len(body) > 0:
if 'Switchport: Disabled' in body:
mode = 'layer3'
elif 'Switchport: Enabled' in body:
mode = "layer2"
elif intf_type == 'svi':
mode = 'layer3'
return mode
def send_show_command(interface_name, version, module):
if version == 'v4':
command = 'show ip interface {0}'.format(interface_name)
elif version == 'v6':
command = 'show ipv6 interface {0}'.format(interface_name)
body = execute_show_command(command, module)
return body
def parse_unstructured_data(body, interface_name, version, module):
interface = {}
interface['addresses'] = []
interface['prefixes'] = []
vrf = None
body = body[0]
splitted_body = body.split('\n')
if version == "v6":
if "ipv6 is disabled" not in body.lower():
address_list = []
# We can have multiple IPv6 on the same interface.
# We need to parse them manually from raw output.
for index in range(0, len(splitted_body) - 1):
if "IPv6 address:" in splitted_body[index]:
first_reference_point = index + 1
elif "IPv6 subnet:" in splitted_body[index]:
last_reference_point = index
break
interface_list_table = splitted_body[first_reference_point:last_reference_point]
for each_line in interface_list_table:
address = each_line.strip().split(' ')[0]
if address not in address_list:
address_list.append(address)
interface['prefixes'].append(str(ipaddress.ip_interface(u"%s" % address).network))
if address_list:
for ipv6 in address_list:
address = {}
splitted_address = ipv6.split('/')
address['addr'] = splitted_address[0]
address['mask'] = splitted_address[1]
interface['addresses'].append(address)
else:
for index in range(0, len(splitted_body) - 1):
if "IP address" in splitted_body[index]:
regex = r'.*IP\saddress:\s(?P<addr>\d{1,3}(?:\.\d{1,3}){3}),\sIP\ssubnet:' + \
r'\s\d{1,3}(?:\.\d{1,3}){3}\/(?P<mask>\d+)(?:\s(?P<secondary>secondary)\s)?' + \
r'(.+?tag:\s(?P<tag>\d+).*)?'
match = re.match(regex, splitted_body[index])
if match:
match_dict = match.groupdict()
if match_dict['secondary'] is None:
match_dict['secondary'] = False
else:
match_dict['secondary'] = True
if match_dict['tag'] is None:
match_dict['tag'] = 0
else:
match_dict['tag'] = int(match_dict['tag'])
interface['addresses'].append(match_dict)
prefix = str(ipaddress.ip_interface(u"%(addr)s/%(mask)s" % match_dict).network)
interface['prefixes'].append(prefix)
try:
vrf_regex = r'.+?VRF\s+(?P<vrf>\S+?)\s'
match_vrf = re.match(vrf_regex, body, re.DOTALL)
vrf = match_vrf.groupdict()['vrf']
except AttributeError:
vrf = None
interface['interface'] = interface_name
interface['type'] = get_interface_type(interface_name)
interface['vrf'] = vrf
return interface
def parse_interface_data(body):
body = body[0]
splitted_body = body.split('\n')
for index in range(0, len(splitted_body) - 1):
if "Encapsulation 802.1Q" in splitted_body[index]:
regex = r'(.+?ID\s(?P<dot1q>\d+).*)?'
match = re.match(regex, splitted_body[index])
if match:
match_dict = match.groupdict()
if match_dict['dot1q'] is not None:
return int(match_dict['dot1q'])
return 0
def get_dot1q_id(interface_name, module):
if "." not in interface_name:
return 0
command = 'show interface {0}'.format(interface_name)
try:
body = execute_show_command(command, module)
dot1q = parse_interface_data(body)
return dot1q
except KeyError:
return 0
def get_ip_interface(interface_name, version, module):
body = send_show_command(interface_name, version, module)
interface = parse_unstructured_data(body, interface_name, version, module)
return interface
def get_remove_ip_config_commands(interface, addr, mask, existing, version):
commands = []
if version == 'v4':
# We can't just remove primary address if secondary address exists
for address in existing['addresses']:
if address['addr'] == addr:
if address['secondary']:
commands.append('no ip address {0}/{1} secondary'.format(addr, mask))
elif len(existing['addresses']) > 1:
new_primary = False
for address in existing['addresses']:
if address['addr'] != addr:
commands.append('no ip address {0}/{1} secondary'.format(address['addr'], address['mask']))
if not new_primary:
command = 'ip address {0}/{1}'.format(address['addr'], address['mask'])
new_primary = True
else:
command = 'ip address {0}/{1} secondary'.format(address['addr'], address['mask'])
if 'tag' in address and address['tag'] != 0:
command += " tag " + str(address['tag'])
commands.append(command)
else:
commands.append('no ip address {0}/{1}'.format(addr, mask))
break
else:
for address in existing['addresses']:
if address['addr'] == addr:
commands.append('no ipv6 address {0}/{1}'.format(addr, mask))
return commands
def get_config_ip_commands(delta, interface, existing, version):
commands = []
delta = dict(delta)
if version == 'v4':
command = 'ip address {addr}/{mask}'.format(**delta)
if len(existing['addresses']) > 0:
if delta['allow_secondary']:
for address in existing['addresses']:
if delta['addr'] == address['addr'] and address['secondary'] is False and delta['tag'] != 0:
break
else:
command += ' secondary'
else:
# Remove all existed addresses if 'allow_secondary' isn't specified
for address in existing['addresses']:
if address['secondary']:
commands.insert(0, 'no ip address {addr}/{mask} secondary'.format(**address))
else:
commands.append('no ip address {addr}/{mask}'.format(**address))
else:
if not delta['allow_secondary']:
# Remove all existed addresses if 'allow_secondary' isn't specified
for address in existing['addresses']:
commands.insert(0, 'no ipv6 address {addr}/{mask}'.format(**address))
command = 'ipv6 address {addr}/{mask}'.format(**delta)
if int(delta['tag']) > 0:
command += ' tag {tag}'.format(**delta)
elif int(delta['tag']) == 0:
# Case when we need to remove tag from an address. Just enter command like
# 'ip address ...' (without 'tag') not enough
commands += get_remove_ip_config_commands(interface, delta['addr'], delta['mask'], existing, version)
commands.append(command)
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(addr, interface, mask, dot1q, tag, allow_secondary, version, state, intf_type, module):
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if state == "present":
if addr is None or mask is None:
module.fail_json(msg="An IP address AND a mask must be provided "
"when state=present.")
elif state == "absent" and version == "v6":
if addr is None or mask is None:
module.fail_json(msg="IPv6 address and mask must be provided when "
"state=absent.")
if intf_type != "ethernet" and network_api == 'cliconf':
if is_default(interface, module) == "DNE":
module.fail_json(msg="That interface does not exist yet. Create "
"it first.", interface=interface)
if mask is not None:
try:
if (int(mask) < 1 or int(mask) > 32) and version == "v4":
raise ValueError
elif int(mask) < 1 or int(mask) > 128:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'mask' must be an integer between"
" 1 and 32 when version v4 and up to 128 "
"when version v6.", version=version,
mask=mask)
if addr is not None and mask is not None:
try:
ipaddress.ip_interface(u'%s/%s' % (addr, mask))
except ValueError:
module.fail_json(msg="Warning! Invalid ip address or mask set.", addr=addr, mask=mask)
if dot1q is not None:
try:
if 2 > dot1q > 4093:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'dot1q' must be an integer between"
" 2 and 4093", dot1q=dot1q)
if tag is not None:
try:
if 0 > tag > 4294967295:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'tag' must be an integer between"
" 0 (default) and 4294967295."
"To use tag you must set 'addr' and 'mask' params.", tag=tag)
if allow_secondary is not None:
try:
if addr is None or mask is None:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'secondary' can be used only when 'addr' and 'mask' set.",
allow_secondary=allow_secondary)
def main():
argument_spec = dict(
interface=dict(required=True),
addr=dict(required=False),
version=dict(required=False, choices=['v4', 'v6'],
default='v4'),
mask=dict(type='str', required=False),
dot1q=dict(required=False, default=0, type='int'),
tag=dict(required=False, default=0, type='int'),
state=dict(required=False, default='present',
choices=['present', 'absent']),
allow_secondary=dict(required=False, default=False,
type='bool')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_IPADDRESS:
module.fail_json(msg="ipaddress is required for this module. Run 'pip install ipaddress' for install.")
warnings = list()
addr = module.params['addr']
version = module.params['version']
mask = module.params['mask']
dot1q = module.params['dot1q']
tag = module.params['tag']
allow_secondary = module.params['allow_secondary']
interface = module.params['interface'].lower()
state = module.params['state']
intf_type = get_interface_type(interface)
validate_params(addr, interface, mask, dot1q, tag, allow_secondary, version, state, intf_type, module)
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
existing = get_ip_interface(interface, version, module)
dot1q_tag = get_dot1q_id(interface, module)
if dot1q_tag > 1:
existing['dot1q'] = dot1q_tag
args = dict(addr=addr, mask=mask, dot1q=dot1q, tag=tag, interface=interface, allow_secondary=allow_secondary)
proposed = dict((k, v) for k, v in args.items() if v is not None)
commands = []
changed = False
end_state = existing
commands = ['interface {0}'.format(interface)]
if state == 'absent':
if existing['addresses']:
if find_same_addr(existing, addr, mask):
command = get_remove_ip_config_commands(interface, addr,
mask, existing, version)
commands.append(command)
if 'dot1q' in existing and existing['dot1q'] > 1:
command = 'no encapsulation dot1Q {0}'.format(existing['dot1q'])
commands.append(command)
elif state == 'present':
if not find_same_addr(existing, addr, mask, full=True, tag=tag, version=version):
command = get_config_ip_commands(proposed, interface, existing, version)
commands.append(command)
if 'dot1q' not in existing and (intf_type in ['ethernet', 'portchannel'] and "." in interface):
command = 'encapsulation dot1Q {0}'.format(proposed['dot1q'])
commands.append(command)
if len(commands) < 2:
del commands[0]
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
changed = True
end_state = get_ip_interface(interface, version, module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['commands'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
cryptobanana/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/keyring.py
|
82
|
# (c) 2016, Samuel Boucher <boucher.samuel.c@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: keyring
author:
- Samuel Boucher <boucher.samuel.c@gmail.com>
version_added: "2.3"
requirements:
- keyring (python library)
short_description: grab secrets from the OS keyring
description:
- Allows you to access data stored in the OS provided keyring/keychain.
"""
EXAMPLES = """
- name : output secrets to screen (BAD IDEA)
debug:
msg: "Password: {{item}}"
with_keyring:
- 'servicename username'
- name: access mysql with password from keyring
mysql_db: login_password={{lookup('keyring','mysql joe')}} login_user=joe
"""
RETURN = """
_raw:
description: secrets stored
"""
HAS_KEYRING = True
from ansible.errors import AnsibleError
try:
import keyring
except ImportError:
HAS_KEYRING = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
if not HAS_KEYRING:
raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
display.vvvv(u"keyring: %s" % keyring.get_keyring())
ret = []
for term in terms:
(servicename, username) = (term.split()[0], term.split()[1])
display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
password = keyring.get_password(servicename, username)
if password is None:
raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
ret.append(password.rstrip())
return ret
|
837468220/python-for-android
|
refs/heads/master
|
python-modules/pybluez/bluetooth/__init__.py
|
66
|
import sys
import os
from btcommon import *
__version__ = 0.19
def _dbg(*args):
return
sys.stderr.write(*args)
sys.stderr.write("\n")
if sys.platform == "win32":
_dbg("trying widcomm")
have_widcomm = False
dll = "wbtapi.dll"
sysroot = os.getenv ("SystemRoot")
if os.path.exists (dll) or \
os.path.exists (os.path.join (sysroot, "system32", dll)) or \
os.path.exists (os.path.join (sysroot, dll)):
try:
import widcomm
if widcomm.inquirer.is_device_ready ():
# if the Widcomm stack is active and a Bluetooth device on that
# stack is detected, then use the Widcomm stack
from widcomm import *
have_widcomm = True
except ImportError:
pass
if not have_widcomm:
# otherwise, fall back to the Microsoft stack
_dbg("Widcomm not ready. falling back to MS stack")
from msbt import *
elif sys.platform.startswith("linux"):
from bluez import *
elif sys.platform == "darwin":
from osx import *
discover_devices.__doc__ = \
"""
performs a bluetooth device discovery using the first available bluetooth
resource.
if lookup_names is False, returns a list of bluetooth addresses.
if lookup_names is True, returns a list of (address, name) tuples
lookup_names=False
if set to True, then discover_devices also attempts to lookup the
display name of each detected device.
"""
lookup_name.__doc__ = \
"""
Tries to determine the friendly name (human readable) of the device with
the specified bluetooth address. Returns the name on success, and None
on failure.
"""
advertise_service.__doc__ = \
"""
Advertises a service with the local SDP server. sock must be a bound,
listening socket. name should be the name of the service, and service_id
(if specified) should be a string of the form
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", where each 'X' is a hexadecimal
digit.
service_classes is a list of service classes whose this service belongs to.
Each class service is a 16-bit UUID in the form "XXXX", where each 'X' is a
hexadecimal digit, or a 128-bit UUID in the form
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX". There are some constants for
standard services, e.g. SERIAL_PORT_CLASS that equals to "1101". Some class
constants:
SERIAL_PORT_CLASS LAN_ACCESS_CLASS DIALUP_NET_CLASS
HEADSET_CLASS CORDLESS_TELEPHONY_CLASS AUDIO_SOURCE_CLASS
AUDIO_SINK_CLASS PANU_CLASS NAP_CLASS
GN_CLASS
profiles is a list of service profiles that thie service fulfills. Each
profile is a tuple with ( uuid, version). Most standard profiles use
standard classes as UUIDs. PyBluez offers a list of standard profiles,
for example SERIAL_PORT_PROFILE. All standard profiles have the same
name as the classes, except that _CLASS suffix is replaced by _PROFILE.
provider is a text string specifying the provider of the service
description is a text string describing the service
A note on working with Symbian smartphones:
bt_discover in Python for Series 60 will only detect service records
with service class SERIAL_PORT_CLASS and profile SERIAL_PORT_PROFILE
"""
stop_advertising.__doc__ = \
"""
Instructs the local SDP server to stop advertising the service associated
with sock. You should typically call this right before you close sock.
"""
find_service.__doc__ = \
"""
find_service (name = None, uuid = None, address = None)
Searches for SDP services that match the specified criteria and returns
the search results. If no criteria are specified, then returns a list of
all nearby services detected. If more than one is specified, then
the search results will match all the criteria specified. If uuid is
specified, it must be either a 16-bit UUID in the form "XXXX", where each
'X' is a hexadecimal digit, or as a 128-bit UUID in the form
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX". A special case of address is
"localhost", which will search for services on the local machine.
The search results will be a list of dictionaries. Each dictionary
represents a search match and will have the following key/value pairs:
host - the bluetooth address of the device advertising the
service
name - the name of the service being advertised
description - a description of the service being advertised
provider - the name of the person/organization providing the service
protocol - either 'RFCOMM', 'L2CAP', None if the protocol was not
specified, or 'UNKNOWN' if the protocol was specified but
unrecognized
port - the L2CAP PSM # if the protocol is 'L2CAP', the RFCOMM
channel # if the protocol is 'RFCOMM', or None if it
wasn't specified
service-classes - a list of service class IDs (UUID strings). possibly
empty
profiles - a list of profiles - (UUID, version) pairs - the
service claims to support. possibly empty.
service-id - the Service ID of the service. None if it wasn't set
See the Bluetooth spec for the difference between
Service ID and Service Class ID List
"""
|
betamos/apitools
|
refs/heads/master
|
samples/fusiontables_sample/__init__.py
|
216
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Learningtribes/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/site_configuration/templatetags/configuration.py
|
46
|
"""
Template tags and helper functions for displaying breadcrumbs in page titles
based on the current site.
"""
from django import template
from django.conf import settings
from django.templatetags.static import static
from django.contrib.staticfiles.storage import staticfiles_storage
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
register = template.Library() # pylint: disable=invalid-name
@register.simple_tag(name="page_title_breadcrumbs", takes_context=True)
def page_title_breadcrumbs_tag(context, *crumbs): # pylint: disable=unused-argument
"""
Django template that creates breadcrumbs for page titles:
{% page_title_breadcrumbs "Specific" "Less Specific" General %}
"""
return configuration_helpers.page_title_breadcrumbs(*crumbs)
@register.simple_tag(name="platform_name")
def platform_name():
"""
Django template tag that outputs the current platform name:
{% platform_name %}
"""
return configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
@register.simple_tag(name="favicon_path")
def favicon_path(default=getattr(settings, 'FAVICON_PATH', 'images/favicon.ico')):
"""
Django template tag that outputs the configured favicon:
{% favicon_path %}
"""
return staticfiles_storage.url(configuration_helpers.get_value('favicon_path', default))
@register.simple_tag(name="microsite_css_overrides_file")
def microsite_css_overrides_file():
"""
Django template tag that outputs the css import for a:
{% microsite_css_overrides_file %}
"""
file_path = configuration_helpers.get_value('css_overrides_file', None)
if file_path is not None:
return "<link href='{}' rel='stylesheet' type='text/css'>".format(static(file_path))
else:
return ""
@register.filter
def microsite_template_path(template_name):
"""
Django template filter to apply template overriding to microsites.
The django_templates loader does not support the leading slash, therefore
it is stripped before returning.
"""
template_name = theming_helpers.get_template_path(template_name)
return template_name[1:] if template_name[0] == '/' else template_name
|
rajiteh/taiga-back
|
refs/heads/master
|
taiga/feedback/migrations/0001_initial.py
|
29
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedbackEntry',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('full_name', models.CharField(verbose_name='full name', max_length=256)),
('email', models.EmailField(verbose_name='email address', max_length=255)),
('comment', models.TextField(verbose_name='comment')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date')),
],
options={
'verbose_name': 'feedback entry',
'verbose_name_plural': 'feedback entries',
'ordering': ['-created_date', 'id'],
},
bases=(models.Model,),
),
]
|
QinerTech/QinerApps
|
refs/heads/master
|
openerp/addons/analytic/wizard/__init__.py
|
43
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_analytic_chart
|
silly-wacky-3-town-toon/SOURCE-COD
|
refs/heads/master
|
toontown/town/TownBattleSOSPanelNEW.py
|
1
|
from panda3d.core import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import types
from toontown.toon import NPCToons
from toontown.toon import NPCFriendPanel
from toontown.toonbase import ToontownBattleGlobals
class TownBattleSOSPanelNEW(DirectFrame, StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('TownBattleSOSPanel')
def __init__(self, doneEvent):
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(TownBattleSOSPanelNEW)
StateData.StateData.__init__(self, doneEvent)
self.friends = {}
self.NPCFriends = {}
self.textRolloverColor = Vec4(1, 1, 0, 1)
self.textDownColor = Vec4(0.5, 0.9, 1, 1)
self.textDisabledColor = Vec4(0.4, 0.8, 0.4, 1)
self.bldg = 0
self.chosenNPCToons = []
return
def load(self):
if self.isLoaded == 1:
return None
self.isLoaded = 1
bgd = loader.loadModel('phase_3.5/models/gui/frame')
gui = loader.loadModel('phase_3.5/models/gui/frame4names')
scrollGui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
backGui = loader.loadModel('phase_3.5/models/gui/battle_gui_new')
self['image'] = bgd
self['image_pos'] = (0.0, 0.1, -0.08)
self.setScale(0.3)
self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.TownBattleSOSNoFriends, text_scale=0.4, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(0.0, 0.0, 1.5))
self.NPCFriendPanel = NPCFriendPanel.NPCFriendPanel(parent=self, doneEvent=self.doneEvent)
self.NPCFriendPanel.setPos(-0.75, 0, -0.15)
self.NPCFriendPanel.setScale(0.325)
self.NPCFriendsLabel = DirectLabel(parent=self, relief=None, text=TTLocalizer.TownBattleSOSNPCFriends, text_scale=0.3, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(-0.75, 0.0, -2.0))
self.scrollList = DirectScrolledList(parent=self, relief=None, image=gui.find('**/frame4names'), image_scale=(0.11, 1, 0.1), text=TTLocalizer.FriendsListPanelOnlineFriends, text_scale=0.04, text_pos=(-0.02, 0.275), text_fg=(0, 0, 0, 1), incButton_image=(scrollGui.find('**/FndsLst_ScrollUp'),
scrollGui.find('**/FndsLst_ScrollDN'),
scrollGui.find('**/FndsLst_ScrollUp_Rllvr'),
scrollGui.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.3), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), incButton_scale=(1.0, 1.0, -1.0), decButton_image=(scrollGui.find('**/FndsLst_ScrollUp'),
scrollGui.find('**/FndsLst_ScrollDN'),
scrollGui.find('**/FndsLst_ScrollUp_Rllvr'),
scrollGui.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.175), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_pos=(-0.17, 0.0, 0.11), itemFrame_relief=None, numItemsVisible=9, items=[], pos=(2.4, 0.0, 0.025), scale=3.5)
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.32, 0, 0)))
clipNP = self.scrollList.component('itemFrame').attachNewNode(clipper)
self.scrollList.component('itemFrame').setClipPlane(clipNP)
self.close = DirectButton(parent=self, relief=None, image=(backGui.find('**/PckMn_BackBtn'), backGui.find('**/PckMn_BackBtn_Dn'), backGui.find('**/PckMn_BackBtn_Rlvr')), pos=(2.3, 0.0, -1.65), scale=3, text=TTLocalizer.TownBattleSOSBack, text_scale=0.05, text_pos=(0.01, -0.012), text_fg=Vec4(0, 0, 0.8, 1), command=self.__close)
gui.removeNode()
scrollGui.removeNode()
backGui.removeNode()
bgd.removeNode()
self.hide()
return
def unload(self):
if self.isLoaded == 0:
return None
self.isLoaded = 0
self.exit()
del self.title
del self.scrollList
del self.close
del self.friends
del self.NPCFriends
DirectFrame.destroy(self)
return None
def makeFriendButton(self, friendPair):
friendId, flags = friendPair
handle = base.cr.playerFriendsManager.identifyFriend(friendId)
if handle == None:
base.cr.fillUpFriendsMap()
return
friendName = handle.getName()
fg = Vec4(0.0, 0.0, 0.0, 1.0)
if handle.isPet():
com = self.__chosePet
else:
com = self.__choseFriend
return DirectButton(relief=None, text=friendName, text_scale=0.04, text_align=TextNode.ALeft, text_fg=fg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, command=com, extraArgs=[friendId, friendName])
def makeNPCFriendButton(self, NPCFriendId, numCalls):
if not TTLocalizer.NPCToonNames.has_key(NPCFriendId):
return None
friendName = TTLocalizer.NPCToonNames[NPCFriendId]
friendName += ' %d' % numCalls
fg = Vec4(0.0, 0.0, 0.0, 1.0)
return DirectButton(relief=None, text=friendName, text_scale=0.04, text_align=TextNode.ALeft, text_fg=fg, text1_bg=self.textDownColor, text2_bg=self.textRolloverColor, text3_fg=self.textDisabledColor, command=self.__choseNPCFriend, extraArgs=[NPCFriendId])
def enter(self, canLure = 1, canTrap = 1):
if self.isEntered == 1:
return None
self.isEntered = 1
if self.isLoaded == 0:
self.load()
self.canLure = canLure
self.canTrap = canTrap
self.factoryToonIdList = None
messenger.send('SOSPanelEnter', [self])
self.__updateScrollList()
self.__updateNPCFriendsPanel()
self.__updateTitleText()
self.show()
self.accept('friendOnline', self.__friendOnline)
self.accept('friendOffline', self.__friendOffline)
self.accept('friendsListChanged', self.__friendsListChanged)
self.accept('friendsMapComplete', self.__friendsListChanged)
return
def exit(self):
if self.isEntered == 0:
return None
self.isEntered = 0
self.hide()
self.ignore('friendOnline')
self.ignore('friendOffline')
self.ignore('friendsListChanged')
self.ignore('friendsMapComplete')
messenger.send(self.doneEvent)
return None
def __close(self):
doneStatus = {}
doneStatus['mode'] = 'Back'
messenger.send(self.doneEvent, [doneStatus])
def __choseFriend(self, friendId, friendName):
doneStatus = {}
doneStatus['mode'] = 'Friend'
doneStatus['friend'] = friendId
messenger.send(self.doneEvent, [doneStatus])
def __chosePet(self, petId, petName):
doneStatus = {}
doneStatus['mode'] = 'Pet'
doneStatus['petId'] = petId
doneStatus['petName'] = petName
messenger.send(self.doneEvent, [doneStatus])
def __choseNPCFriend(self, friendId):
doneStatus = {}
doneStatus['mode'] = 'NPCFriend'
doneStatus['friend'] = friendId
self.chosenNPCToons.append(friendId)
messenger.send(self.doneEvent, [doneStatus])
def setFactoryToonIdList(self, toonIdList):
self.factoryToonIdList = toonIdList[:]
def __updateScrollList(self):
newFriends = []
battlePets = config.GetBool('want-pets-in-battle', 1)
if base.wantPets and battlePets == 1 and base.localAvatar.hasPet():
newFriends.append((base.localAvatar.getPetId(), 0))
if not self.bldg or self.factoryToonIdList is not None:
for friendPair in base.localAvatar.friendsList:
if base.cr.isFriendOnline(friendPair[0]):
if self.factoryToonIdList is None or friendPair[0] in self.factoryToonIdList:
newFriends.append(friendPair)
if hasattr(base.cr, 'playerFriendsManager'):
for avatarId in base.cr.playerFriendsManager.getAllOnlinePlayerAvatars():
if not base.cr.playerFriendsManager.askAvatarKnownElseWhere(avatarId):
newFriends.append((avatarId, 0))
for friendPair in self.friends.keys():
if friendPair not in newFriends:
friendButton = self.friends[friendPair]
self.scrollList.removeItem(friendButton)
if not friendButton.isEmpty():
friendButton.destroy()
del self.friends[friendPair]
for friendPair in newFriends:
if not self.friends.has_key(friendPair):
friendButton = self.makeFriendButton(friendPair)
if friendButton:
self.scrollList.addItem(friendButton)
self.friends[friendPair] = friendButton
return
def __updateNPCFriendsPanel(self):
self.NPCFriends = {}
for friend, count in base.localAvatar.NPCFriendsDict.items():
track = NPCToons.getNPCTrack(friend)
if track == ToontownBattleGlobals.LURE_TRACK and self.canLure == 0 or track == ToontownBattleGlobals.TRAP_TRACK and self.canTrap == 0:
self.NPCFriends[friend] = 0
else:
self.NPCFriends[friend] = count
self.NPCFriendPanel.update(self.NPCFriends, fCallable=1)
def __updateTitleText(self):
isEmpty = (len(self.friends) == 0 and len(self.NPCFriends) == 0)
if isEmpty:
self.title['text'] = TTLocalizer.TownBattleSOSNoFriends
else:
self.title['text'] = TTLocalizer.TownBattleSOSWhichFriend
def __friendOnline(self, doId, commonChatFlags, whitelistChatFlags):
self.__updateScrollList()
self.__updateTitleText()
def __friendOffline(self, doId):
self.__updateScrollList()
self.__updateTitleText()
def __friendsListChanged(self):
self.__updateScrollList()
self.__updateTitleText()
|
toofar/qutebrowser
|
refs/heads/master
|
tests/end2end/features/test_private_bdd.py
|
5
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import json
import pytest_bdd as bdd
bdd.scenarios('private.feature')
@bdd.then(bdd.parsers.parse('the cookie {name} should be set to {value}'))
def check_cookie(quteproc, name, value):
"""Check if a given cookie is set correctly.
This assumes we're on the server cookies page.
"""
content = quteproc.get_content()
data = json.loads(content)
print(data)
assert data['cookies'][name] == value
@bdd.then(bdd.parsers.parse('the cookie {name} should not be set'))
def check_cookie_not_set(quteproc, name):
"""Check if a given cookie is not set."""
content = quteproc.get_content()
data = json.loads(content)
print(data)
assert name not in data['cookies']
@bdd.then(bdd.parsers.parse('the file {name} should not contain "{text}"'))
def check_not_contain(tmpdir, name, text):
path = tmpdir / name
assert text not in path.read()
|
benjamin9999/python-stix
|
refs/heads/master
|
stix/bindings/incident.py
|
1
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Apr 11 15:06:24 2013 by generateDS.py version 2.9a.
#
import sys
import getopt
import re as re_
import cybox.bindings.cybox_core as cybox_core_binding
import cybox.bindings.cybox_common as cybox_common_binding
import stix.bindings.stix_common as stix_common_binding
import stix.bindings.data_marking as data_marking_binding
import base64
from datetime import datetime, tzinfo, timedelta
XML_NS = "http://stix.mitre.org/Incident-1"
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser(huge_tree=True)
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_date(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
return datetime.strptime(input_data,
'%Y-%m-%d').replace(tzinfo = tz)
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(lwrite, level, pretty_print=True):
if pretty_print:
lwrite(' ' * level)
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, lwrite, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
lwrite(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(lwrite, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(lwrite, level, namespace, name, pretty_print)
def exportSimple(self, lwrite, level, name):
if self.content_type == MixedContainer.TypeString:
lwrite('<%s>%s</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
lwrite('<%s>%d</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
lwrite('<%s>%f</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
lwrite('<%s>%g</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
lwrite('<%s>%s</%s>' %
(self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, lwrite, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(lwrite, level + 1)
showIndent(lwrite, level)
lwrite(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class PropertyAffectedType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Property=None, Description_Of_Effect=None, Type_Of_Availability_Loss=None, Duration_Of_Availability_Loss=None, Non_Public_Data_Compromised=None):
self.Property = Property
self.Description_Of_Effect = Description_Of_Effect
self.Type_Of_Availability_Loss = Type_Of_Availability_Loss
self.Duration_Of_Availability_Loss = Duration_Of_Availability_Loss
self.Non_Public_Data_Compromised = Non_Public_Data_Compromised
def factory(*args_, **kwargs_):
if PropertyAffectedType.subclass:
return PropertyAffectedType.subclass(*args_, **kwargs_)
else:
return PropertyAffectedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Property(self): return self.Property
def set_Property(self, Property): self.Property = Property
def get_Description_Of_Effect(self): return self.Description_Of_Effect
def set_Description_Of_Effect(self, Description_Of_Effect): self.Description_Of_Effect = Description_Of_Effect
def get_Type_Of_Availability_Loss(self): return self.Type_Of_Availability_Loss
def set_Type_Of_Availability_Loss(self, Type_Of_Availability_Loss): self.Type_Of_Availability_Loss = Type_Of_Availability_Loss
def get_Duration_Of_Availability_Loss(self): return self.Duration_Of_Availability_Loss
def set_Duration_Of_Availability_Loss(self, Duration_Of_Availability_Loss): self.Duration_Of_Availability_Loss = Duration_Of_Availability_Loss
def get_Non_Public_Data_Compromised(self): return self.Non_Public_Data_Compromised
def set_Non_Public_Data_Compromised(self, Non_Public_Data_Compromised): self.Non_Public_Data_Compromised = Non_Public_Data_Compromised
def hasContent_(self):
if (
self.Property is not None or
self.Description_Of_Effect is not None or
self.Type_Of_Availability_Loss is not None or
self.Duration_Of_Availability_Loss is not None or
self.Non_Public_Data_Compromised is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='PropertyAffectedType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='PropertyAffectedType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='PropertyAffectedType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='PropertyAffectedType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Property is not None:
self.Property.export(lwrite, level, nsmap, namespace_, name_='Property', pretty_print=pretty_print)
if self.Description_Of_Effect is not None:
self.Description_Of_Effect.export(lwrite, level, nsmap, namespace_, name_='Description_Of_Effect', pretty_print=pretty_print)
if self.Type_Of_Availability_Loss is not None:
self.Type_Of_Availability_Loss.export(lwrite, level, nsmap, namespace_, name_='Type_Of_Availability_Loss', pretty_print=pretty_print)
if self.Duration_Of_Availability_Loss is not None:
self.Duration_Of_Availability_Loss.export(lwrite, level, nsmap, namespace_, name_='Duration_Of_Availability_Loss', pretty_print=pretty_print)
if self.Non_Public_Data_Compromised is not None:
self.Non_Public_Data_Compromised.export(lwrite, level, nsmap, namespace_, name_='Non_Public_Data_Compromised', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Property':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Property(obj_)
elif nodeName_ == 'Description_Of_Effect':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.set_Description_Of_Effect(obj_)
elif nodeName_ == 'Type_Of_Availability_Loss':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Type_Of_Availability_Loss(obj_)
elif nodeName_ == 'Duration_Of_Availability_Loss':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Duration_Of_Availability_Loss(obj_)
elif nodeName_ == 'Non_Public_Data_Compromised':
obj_ = NonPublicDataCompromisedType.factory()
obj_.build(child_)
self.set_Non_Public_Data_Compromised(obj_)
# end class PropertyAffectedType
class AffectedAssetType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Type=None, Description=None, Business_Function_Or_Role=None, Ownership_Class=None, Management_Class=None, Location_Class=None, Location=None, Nature_Of_Security_Effect=None, Structured_Description=None):
self.Type = Type
self.Description = Description
self.Business_Function_Or_Role = Business_Function_Or_Role
self.Ownership_Class = Ownership_Class
self.Management_Class = Management_Class
self.Location_Class = Location_Class
self.Location = Location
self.Nature_Of_Security_Effect = Nature_Of_Security_Effect
self.Structured_Description = Structured_Description
def factory(*args_, **kwargs_):
if AffectedAssetType.subclass:
return AffectedAssetType.subclass(*args_, **kwargs_)
else:
return AffectedAssetType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Type(self): return self.Type
def set_Type(self, Type): self.Type = Type
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Business_Function_Or_Role(self): return self.Business_Function_Or_Role
def set_Business_Function_Or_Role(self, Business_Function_Or_Role): self.Business_Function_Or_Role = Business_Function_Or_Role
def get_Ownership_Class(self): return self.Ownership_Class
def set_Ownership_Class(self, Ownership_Class): self.Ownership_Class = Ownership_Class
def get_Management_Class(self): return self.Management_Class
def set_Management_Class(self, Management_Class): self.Management_Class = Management_Class
def get_Location_Class(self): return self.Location_Class
def set_Location_Class(self, Location_Class): self.Location_Class = Location_Class
def get_Location(self): return self.Location
def set_Location(self, Location): self.Location = Location
def get_Nature_Of_Security_Effect(self): return self.Nature_Of_Security_Effect
def set_Nature_Of_Security_Effect(self, Nature_Of_Security_Effect): self.Nature_Of_Security_Effect = Nature_Of_Security_Effect
def get_Structured_Description(self): return self.Structured_Description
def set_Structured_Description(self, Structured_Description): self.Structured_Description = Structured_Description
def hasContent_(self):
if (
self.Type is not None or
self.Description is not None or
self.Business_Function_Or_Role is not None or
self.Ownership_Class is not None or
self.Management_Class is not None or
self.Location_Class is not None or
self.Location is not None or
self.Nature_Of_Security_Effect is not None or
self.Structured_Description is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AffectedAssetType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='AffectedAssetType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='AffectedAssetType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AffectedAssetType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Type is not None:
self.Type.export(lwrite, level, nsmap, namespace_, name_='Type', pretty_print=pretty_print)
if self.Description is not None:
self.Description.export(lwrite, level, nsmap, namespace_, name_='Description', pretty_print=pretty_print)
if self.Business_Function_Or_Role is not None:
self.Business_Function_Or_Role.export(lwrite, level, nsmap, namespace_, name_='Business_Function_Or_Role', pretty_print=pretty_print)
if self.Ownership_Class is not None:
self.Ownership_Class.export(lwrite, level, nsmap, namespace_, name_='Ownership_Class', pretty_print=pretty_print)
if self.Management_Class is not None:
self.Management_Class.export(lwrite, level, nsmap, namespace_, name_='Management_Class', pretty_print=pretty_print)
if self.Location_Class is not None:
self.Location_Class.export(lwrite, level, nsmap, namespace_, name_='Location_Class', pretty_print=pretty_print)
if self.Location is not None:
self.Location.export(lwrite, level, nsmap, namespace_, name_='Location', pretty_print=pretty_print)
if self.Nature_Of_Security_Effect is not None:
self.Nature_Of_Security_Effect.export(lwrite, level, nsmap, namespace_, name_='Nature_Of_Security_Effect', pretty_print=pretty_print)
if self.Structured_Description is not None:
self.Structured_Description.export(lwrite, level, "%s:" % (nsmap[namespace_]), name_='Structured_Description', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Type':
obj_ = AssetTypeType.factory()
obj_.build(child_)
self.set_Type(obj_)
elif nodeName_ == 'Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.set_Description(obj_)
elif nodeName_ == 'Business_Function_Or_Role':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.set_Business_Function_Or_Role(obj_)
elif nodeName_ == 'Ownership_Class':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Ownership_Class(obj_)
elif nodeName_ == 'Management_Class':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Management_Class(obj_)
elif nodeName_ == 'Location_Class':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Location_Class(obj_)
elif nodeName_ == 'Location':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
if type_name_ == "CIQAddress3.0InstanceType":
import stix.bindings.extensions.address.ciq_address_3_0 as ciq_address_binding
obj_ = ciq_address_binding.CIQAddress3_0InstanceType.factory()
else:
raise NotImplementedError('No implementation class found for: ' + type_name_)
else:
raise NotImplementedError('Class not implemented for <Location> element')
obj_.build(child_)
self.set_Location(obj_)
elif nodeName_ == 'Nature_Of_Security_Effect':
obj_ = NatureOfSecurityEffectType.factory()
obj_.build(child_)
self.set_Nature_Of_Security_Effect(obj_)
elif nodeName_ == 'Structured_Description':
obj_ = cybox_core_binding.ObservablesType.factory()
obj_.build(child_)
self.set_Structured_Description(obj_)
# end class AffectedAssetType
class ImpactAssessmentType(GeneratedsSuper):
"""The ImpactAssessmentType specifies a summary assessment of impact
for this cyber threat Incident."""
subclass = None
superclass = None
def __init__(self, Direct_Impact_Summary=None, Indirect_Impact_Summary=None, Total_Loss_Estimation=None, Impact_Qualification=None, Effects=None, External_Impact_Assessment_Model=None):
self.Direct_Impact_Summary = Direct_Impact_Summary
self.Indirect_Impact_Summary = Indirect_Impact_Summary
self.Total_Loss_Estimation = Total_Loss_Estimation
self.Impact_Qualification = Impact_Qualification
self.Effects = Effects
self.External_Impact_Assessment_Model = External_Impact_Assessment_Model
def factory(*args_, **kwargs_):
if ImpactAssessmentType.subclass:
return ImpactAssessmentType.subclass(*args_, **kwargs_)
else:
return ImpactAssessmentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Direct_Impact_Summary(self): return self.Direct_Impact_Summary
def set_Direct_Impact_Summary(self, Direct_Impact_Summary): self.Direct_Impact_Summary = Direct_Impact_Summary
def get_Indirect_Impact_Summary(self): return self.Indirect_Impact_Summary
def set_Indirect_Impact_Summary(self, Indirect_Impact_Summary): self.Indirect_Impact_Summary = Indirect_Impact_Summary
def get_Total_Loss_Estimation(self): return self.Total_Loss_Estimation
def set_Total_Loss_Estimation(self, Total_Loss_Estimation): self.Total_Loss_Estimation = Total_Loss_Estimation
def get_Impact_Qualification(self): return self.Impact_Qualification
def set_Impact_Qualification(self, Impact_Qualification): self.Impact_Qualification = Impact_Qualification
def get_Effects(self): return self.Effects
def set_Effects(self, Effects): self.Effects = Effects
def get_External_Impact_Assessment_Model(self): return self.External_Impact_Assessment_Model
def set_External_Impact_Assessment_Model(self, External_Impact_Assessment_Model): self.External_Impact_Assessment_Model = External_Impact_Assessment_Model
def hasContent_(self):
if (
self.Direct_Impact_Summary is not None or
self.Indirect_Impact_Summary is not None or
self.Total_Loss_Estimation is not None or
self.Impact_Qualification is not None or
self.Effects is not None or
self.External_Impact_Assessment_Model is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ImpactAssessmentType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='ImpactAssessmentType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='ImpactAssessmentType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ImpactAssessmentType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Direct_Impact_Summary is not None:
self.Direct_Impact_Summary.export(lwrite, level, nsmap, namespace_, name_='Direct_Impact_Summary', pretty_print=pretty_print)
if self.Indirect_Impact_Summary is not None:
self.Indirect_Impact_Summary.export(lwrite, level, nsmap, namespace_, name_='Indirect_Impact_Summary', pretty_print=pretty_print)
if self.Total_Loss_Estimation is not None:
self.Total_Loss_Estimation.export(lwrite, level, nsmap, namespace_, name_='Total_Loss_Estimation', pretty_print=pretty_print)
if self.Impact_Qualification is not None:
self.Impact_Qualification.export(lwrite, level, nsmap, namespace_, name_='Impact_Qualification', pretty_print=pretty_print)
if self.Effects is not None:
self.Effects.export(lwrite, level, nsmap, namespace_, name_='Effects', pretty_print=pretty_print)
if self.External_Impact_Assessment_Model is not None:
self.External_Impact_Assessment_Model.export(lwrite, level, nsmap, namespace_, name_='External_Impact_Assessment_Model', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Direct_Impact_Summary':
obj_ = DirectImpactSummaryType.factory()
obj_.build(child_)
self.set_Direct_Impact_Summary(obj_)
elif nodeName_ == 'Indirect_Impact_Summary':
obj_ = IndirectImpactSummaryType.factory()
obj_.build(child_)
self.set_Indirect_Impact_Summary(obj_)
elif nodeName_ == 'Total_Loss_Estimation':
obj_ = TotalLossEstimationType.factory()
obj_.build(child_)
self.set_Total_Loss_Estimation(obj_)
elif nodeName_ == 'Impact_Qualification':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Impact_Qualification(obj_)
elif nodeName_ == 'Effects':
obj_ = EffectsType.factory()
obj_.build(child_)
self.set_Effects(obj_)
elif nodeName_ == 'External_Impact_Assessment_Model':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <External_Impact_Assessment_Model> element')
self.set_External_Impact_Assessment_Model(obj_)
# end class ImpactAssessmentType
class ExternalImpactAssessmentModelType(GeneratedsSuper):
"""The ExternalImpactAssessmentModelType is an abstract type enabling
the definition through extension of incident impact assessment
models external to STIX.Specifies the name of the externally
defined impact assessment model.Specifies a URL reference for
the externally defined impact assessment model."""
subclass = None
superclass = None
def __init__(self, model_name=None, model_reference=None):
self.model_name = _cast(None, model_name)
self.model_reference = _cast(None, model_reference)
pass
def factory(*args_, **kwargs_):
if ExternalImpactAssessmentModelType.subclass:
return ExternalImpactAssessmentModelType.subclass(*args_, **kwargs_)
else:
return ExternalImpactAssessmentModelType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_model_name(self): return self.model_name
def set_model_name(self, model_name): self.model_name = model_name
def get_model_reference(self): return self.model_reference
def set_model_reference(self, model_reference): self.model_reference = model_reference
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ExternalImpactAssessmentModelType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='ExternalImpactAssessmentModelType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='ExternalImpactAssessmentModelType'):
if self.model_name is not None and 'model_name' not in already_processed:
already_processed.add('model_name')
lwrite(' model_name=%s' % (self.gds_format_string(quote_attrib(self.model_name).encode(ExternalEncoding), input_name='model_name'), ))
if self.model_reference is not None and 'model_reference' not in already_processed:
already_processed.add('model_reference')
lwrite(' model_reference=%s' % (self.gds_format_string(quote_attrib(self.model_reference).encode(ExternalEncoding), input_name='model_reference'), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ExternalImpactAssessmentModelType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('model_name', node)
if value is not None and 'model_name' not in already_processed:
already_processed.add('model_name')
self.model_name = value
value = find_attr_value_('model_reference', node)
if value is not None and 'model_reference' not in already_processed:
already_processed.add('model_reference')
self.model_reference = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ExternalImpactAssessmentModelType
class COATakenType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Time=None, Contributors=None, Course_Of_Action=None, extensiontype_=None):
self.Time = Time
self.Contributors = Contributors
self.Course_Of_Action = Course_Of_Action
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if COATakenType.subclass:
return COATakenType.subclass(*args_, **kwargs_)
else:
return COATakenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Time(self): return self.Time
def set_Time(self, Time): self.Time = Time
def get_Contributors(self): return self.Contributors
def set_Contributors(self, Contributors): self.Contributors = Contributors
def get_Course_Of_Action(self): return self.Course_Of_Action
def set_Course_Of_Action(self, Course_Of_Action): self.Course_Of_Action = Course_Of_Action
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Time is not None or
self.Contributors is not None or
self.Course_Of_Action is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='COATakenType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='COATakenType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='COATakenType'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
lwrite(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
lwrite(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='COATakenType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Time is not None:
self.Time.export(lwrite, level, nsmap, namespace_, name_='Time', pretty_print=pretty_print)
if self.Contributors is not None:
self.Contributors.export(lwrite, level, nsmap, namespace_, name_='Contributors', pretty_print=pretty_print)
if self.Course_Of_Action is not None:
self.Course_Of_Action.export(lwrite, level, nsmap, namespace_, name_='Course_Of_Action', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Time':
obj_ = TimeType.factory()
obj_.build(child_)
self.set_Time(obj_)
elif nodeName_ == 'Contributors':
obj_ = ContributorsType.factory()
obj_.build(child_)
self.set_Contributors(obj_)
elif nodeName_ == 'Course_Of_Action':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
if type_name_ == "CourseOfActionType":
import stix.bindings.course_of_action as coa_binding
obj_ = coa_binding.CourseOfActionType.factory()
else:
raise NotImplementedError('Class not implemented for element type: ' + type_name_)
else:
obj_ = stix_common_binding.CourseOfActionBaseType.factory() # not abstract
obj_.build(child_)
self.set_Course_Of_Action(obj_)
# end class COATakenType
class JournalEntryType(GeneratedsSuper):
"""The JournalEntryType is optional and provides journal notes for
information discovered during the handling of the
Incident.Specifies the author of the JournalEntry note.Specifies
the date and time that the JournalEntry note was written.In
order to avoid ambiguity, it is strongly suggest that all
timestamps include a specification of the timezone if it is
known.Represents the precision of the associated time value. If
omitted, the default is "second", meaning the timestamp is
precise to the full field value. Digits in the timestamp that
are required by the xs:dateTime datatype but are beyond the
specified precision should be zeroed out."""
subclass = None
superclass = None
def __init__(self, time=None, time_precision='second', author=None, valueOf_=None):
self.time = _cast(None, time)
self.time_precision = _cast(None, time_precision)
self.author = _cast(None, author)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if JournalEntryType.subclass:
return JournalEntryType.subclass(*args_, **kwargs_)
else:
return JournalEntryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_time(self): return self.time
def set_time(self, time): self.time = time
def get_time_precision(self): return self.time_precision
def set_time_precision(self, time_precision): self.time_precision = time_precision
def get_author(self): return self.author
def set_author(self, author): self.author = author
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='JournalEntryType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='JournalEntryType')
if self.hasContent_():
lwrite('>')
lwrite(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='JournalEntryType'):
if self.time is not None and 'time' not in already_processed:
already_processed.add('time')
lwrite(' time="%s"' % self.gds_format_datetime(self.time, input_name='time'))
if self.time_precision is not None and 'time_precision' not in already_processed:
already_processed.add('time_precision')
lwrite(' time_precision=%s' % (quote_attrib(self.time_precision), ))
if self.author is not None and 'author' not in already_processed:
already_processed.add('author')
lwrite(' author=%s' % (self.gds_format_string(quote_attrib(self.author).encode(ExternalEncoding), input_name='author'), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='JournalEntryType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('time', node)
if value is not None and 'time' not in already_processed:
already_processed.add('time')
try:
self.time = self.gds_parse_datetime(value, node, 'time')
except ValueError, exp:
raise ValueError('Bad date-time attribute (time): %s' % exp)
value = find_attr_value_('time_precision', node)
if value is not None and 'time_precision' not in already_processed:
already_processed.add('time_precision')
self.time_precision = value
value = find_attr_value_('author', node)
if value is not None and 'author' not in already_processed:
already_processed.add('author')
self.author = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class JournalEntryType
class COARequestedType(COATakenType):
"""Specifies a suggested level of priority to be applied to this
requested COA."""
subclass = None
superclass = COATakenType
def __init__(self, Time=None, Contributors=None, Course_Of_Action=None, priority=None):
super(COARequestedType, self).__init__(Time=Time, Contributors=Contributors, Course_Of_Action=Course_Of_Action)
self.priority = _cast(None, priority)
pass
def factory(*args_, **kwargs_):
if COARequestedType.subclass:
return COARequestedType.subclass(*args_, **kwargs_)
else:
return COARequestedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_priority(self): return self.priority
def set_priority(self, priority): self.priority = priority
def hasContent_(self):
if (
super(COARequestedType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='COARequestedType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='COARequestedType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='COARequestedType'):
super(COARequestedType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='COARequestedType')
if self.priority is not None and 'priority' not in already_processed:
already_processed.add('priority')
lwrite(' priority=%s' % (self.gds_format_string(quote_attrib(self.priority).encode(ExternalEncoding), input_name='priority'), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='COARequestedType', fromsubclass_=False, pretty_print=True):
super(COARequestedType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('priority', node)
if value is not None and 'priority' not in already_processed:
already_processed.add('priority')
self.priority = value
super(COARequestedType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(COARequestedType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class COARequestedType
class ContributorsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Contributor=None):
if Contributor is None:
self.Contributor = []
else:
self.Contributor = Contributor
def factory(*args_, **kwargs_):
if ContributorsType.subclass:
return ContributorsType.subclass(*args_, **kwargs_)
else:
return ContributorsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Contributor(self): return self.Contributor
def set_Contributor(self, Contributor): self.Contributor = Contributor
def add_Contributor(self, value): self.Contributor.append(value)
def insert_Contributor(self, index, value): self.Contributor[index] = value
def hasContent_(self):
if (
self.Contributor
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ContributorsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='ContributorsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='ContributorsType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ContributorsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Contributor_ in self.Contributor:
Contributor_.export(lwrite, level, "%s:" % (nsmap[namespace_]), name_='Contributor', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Contributor':
obj_ = cybox_common_binding.ContributorType.factory()
obj_.build(child_)
self.Contributor.append(obj_)
# end class ContributorsType
class COATimeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Start=None, End=None):
self.Start = Start
self.End = End
def factory(*args_, **kwargs_):
if COATimeType.subclass:
return COATimeType.subclass(*args_, **kwargs_)
else:
return COATimeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Start(self): return self.Start
def set_Start(self, Start): self.Start = Start
def get_End(self): return self.End
def set_End(self, End): self.End = End
def hasContent_(self):
if (
self.Start is not None or
self.End is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='COATimeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='COATimeType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='COATimeType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='COATimeType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Start is not None:
self.Start.export(lwrite, level, nsmap, namespace_, name_='Start', pretty_print=pretty_print)
if self.End is not None:
self.End.export(lwrite, level, nsmap, namespace_, name_='End', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Start':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Start(obj_)
elif nodeName_ == 'End':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_End(obj_)
# end class COATimeType
class LossEstimationType(GeneratedsSuper):
"""Specifies the estimated financial loss for the Incident.Specifies
the ISO 4217 currency code if other than USD"""
subclass = None
superclass = None
def __init__(self, iso_currency_code=None, amount=None):
self.iso_currency_code = _cast(None, iso_currency_code)
self.amount = _cast(None, amount)
pass
def factory(*args_, **kwargs_):
if LossEstimationType.subclass:
return LossEstimationType.subclass(*args_, **kwargs_)
else:
return LossEstimationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_iso_currency_code(self): return self.iso_currency_code
def set_iso_currency_code(self, iso_currency_code): self.iso_currency_code = iso_currency_code
def get_amount(self): return self.amount
def set_amount(self, amount): self.amount = amount
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='LossEstimationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='LossEstimationType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='LossEstimationType'):
if self.iso_currency_code is not None and 'iso_currency_code' not in already_processed:
already_processed.add('iso_currency_code')
lwrite(' iso_currency_code=%s' % (self.gds_format_string(quote_attrib(self.iso_currency_code).encode(ExternalEncoding), input_name='iso_currency_code'), ))
if self.amount is not None and 'amount' not in already_processed:
already_processed.add('amount')
lwrite(' amount=%s' % (self.gds_format_string(quote_attrib(self.amount).encode(ExternalEncoding), input_name='amount'), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='LossEstimationType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('iso_currency_code', node)
if value is not None and 'iso_currency_code' not in already_processed:
already_processed.add('iso_currency_code')
self.iso_currency_code = value
value = find_attr_value_('amount', node)
if value is not None and 'amount' not in already_processed:
already_processed.add('amount')
self.amount = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class LossEstimationType
class TotalLossEstimationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Initial_Reported_Total_Loss_Estimation=None, Actual_Total_Loss_Estimation=None):
self.Initial_Reported_Total_Loss_Estimation = Initial_Reported_Total_Loss_Estimation
self.Actual_Total_Loss_Estimation = Actual_Total_Loss_Estimation
def factory(*args_, **kwargs_):
if TotalLossEstimationType.subclass:
return TotalLossEstimationType.subclass(*args_, **kwargs_)
else:
return TotalLossEstimationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Initial_Reported_Total_Loss_Estimation(self): return self.Initial_Reported_Total_Loss_Estimation
def set_Initial_Reported_Total_Loss_Estimation(self, Initial_Reported_Total_Loss_Estimation): self.Initial_Reported_Total_Loss_Estimation = Initial_Reported_Total_Loss_Estimation
def get_Actual_Total_Loss_Estimation(self): return self.Actual_Total_Loss_Estimation
def set_Actual_Total_Loss_Estimation(self, Actual_Total_Loss_Estimation): self.Actual_Total_Loss_Estimation = Actual_Total_Loss_Estimation
def hasContent_(self):
if (
self.Initial_Reported_Total_Loss_Estimation is not None or
self.Actual_Total_Loss_Estimation is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='TotalLossEstimationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='TotalLossEstimationType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='TotalLossEstimationType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='TotalLossEstimationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Initial_Reported_Total_Loss_Estimation is not None:
self.Initial_Reported_Total_Loss_Estimation.export(lwrite, level, nsmap, namespace_, name_='Initial_Reported_Total_Loss_Estimation', pretty_print=pretty_print)
if self.Actual_Total_Loss_Estimation is not None:
self.Actual_Total_Loss_Estimation.export(lwrite, level, nsmap, namespace_, name_='Actual_Total_Loss_Estimation', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Initial_Reported_Total_Loss_Estimation':
obj_ = LossEstimationType.factory()
obj_.build(child_)
self.set_Initial_Reported_Total_Loss_Estimation(obj_)
elif nodeName_ == 'Actual_Total_Loss_Estimation':
obj_ = LossEstimationType.factory()
obj_.build(child_)
self.set_Actual_Total_Loss_Estimation(obj_)
# end class TotalLossEstimationType
class IndirectImpactSummaryType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Loss_Of_Competitive_Advantage=None, Brand_And_Market_Damage=None, Increased_Operating_Costs=None, Legal_And_Regulatory_Costs=None):
self.Loss_Of_Competitive_Advantage = Loss_Of_Competitive_Advantage
self.Brand_And_Market_Damage = Brand_And_Market_Damage
self.Increased_Operating_Costs = Increased_Operating_Costs
self.Legal_And_Regulatory_Costs = Legal_And_Regulatory_Costs
def factory(*args_, **kwargs_):
if IndirectImpactSummaryType.subclass:
return IndirectImpactSummaryType.subclass(*args_, **kwargs_)
else:
return IndirectImpactSummaryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Loss_Of_Competitive_Advantage(self): return self.Loss_Of_Competitive_Advantage
def set_Loss_Of_Competitive_Advantage(self, Loss_Of_Competitive_Advantage): self.Loss_Of_Competitive_Advantage = Loss_Of_Competitive_Advantage
def get_Brand_And_Market_Damage(self): return self.Brand_And_Market_Damage
def set_Brand_And_Market_Damage(self, Brand_And_Market_Damage): self.Brand_And_Market_Damage = Brand_And_Market_Damage
def get_Increased_Operating_Costs(self): return self.Increased_Operating_Costs
def set_Increased_Operating_Costs(self, Increased_Operating_Costs): self.Increased_Operating_Costs = Increased_Operating_Costs
def get_Legal_And_Regulatory_Costs(self): return self.Legal_And_Regulatory_Costs
def set_Legal_And_Regulatory_Costs(self, Legal_And_Regulatory_Costs): self.Legal_And_Regulatory_Costs = Legal_And_Regulatory_Costs
def hasContent_(self):
if (
self.Loss_Of_Competitive_Advantage is not None or
self.Brand_And_Market_Damage is not None or
self.Increased_Operating_Costs is not None or
self.Legal_And_Regulatory_Costs is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='IndirectImpactSummaryType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='IndirectImpactSummaryType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='IndirectImpactSummaryType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='IndirectImpactSummaryType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Loss_Of_Competitive_Advantage is not None:
self.Loss_Of_Competitive_Advantage.export(lwrite, level, nsmap, namespace_, name_='Loss_Of_Competitive_Advantage', pretty_print=pretty_print)
if self.Brand_And_Market_Damage is not None:
self.Brand_And_Market_Damage.export(lwrite, level, nsmap, namespace_, name_='Brand_And_Market_Damage', pretty_print=pretty_print)
if self.Increased_Operating_Costs is not None:
self.Increased_Operating_Costs.export(lwrite, level, nsmap, namespace_, name_='Increased_Operating_Costs', pretty_print=pretty_print)
if self.Legal_And_Regulatory_Costs is not None:
self.Legal_And_Regulatory_Costs.export(lwrite, level, nsmap, namespace_, name_='Legal_And_Regulatory_Costs', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Loss_Of_Competitive_Advantage':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Loss_Of_Competitive_Advantage(obj_)
elif nodeName_ == 'Brand_And_Market_Damage':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Brand_And_Market_Damage(obj_)
elif nodeName_ == 'Increased_Operating_Costs':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Increased_Operating_Costs(obj_)
elif nodeName_ == 'Legal_And_Regulatory_Costs':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Legal_And_Regulatory_Costs(obj_)
# end class IndirectImpactSummaryType
class DirectImpactSummaryType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Asset_Losses=None, Business_Mission_Disruption=None, Response_And_Recovery_Costs=None):
self.Asset_Losses = Asset_Losses
self.Business_Mission_Disruption = Business_Mission_Disruption
self.Response_And_Recovery_Costs = Response_And_Recovery_Costs
def factory(*args_, **kwargs_):
if DirectImpactSummaryType.subclass:
return DirectImpactSummaryType.subclass(*args_, **kwargs_)
else:
return DirectImpactSummaryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Asset_Losses(self): return self.Asset_Losses
def set_Asset_Losses(self, Asset_Losses): self.Asset_Losses = Asset_Losses
def get_Business_Mission_Disruption(self): return self.Business_Mission_Disruption
def set_Business_Mission_Disruption(self, Business_Mission_Disruption): self.Business_Mission_Disruption = Business_Mission_Disruption
def get_Response_And_Recovery_Costs(self): return self.Response_And_Recovery_Costs
def set_Response_And_Recovery_Costs(self, Response_And_Recovery_Costs): self.Response_And_Recovery_Costs = Response_And_Recovery_Costs
def hasContent_(self):
if (
self.Asset_Losses is not None or
self.Business_Mission_Disruption is not None or
self.Response_And_Recovery_Costs is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='DirectImpactSummaryType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='DirectImpactSummaryType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='DirectImpactSummaryType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='DirectImpactSummaryType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Asset_Losses is not None:
self.Asset_Losses.export(lwrite, level, nsmap, namespace_, name_='Asset_Losses', pretty_print=pretty_print)
if self.Business_Mission_Disruption is not None:
self.Business_Mission_Disruption.export(lwrite, level, nsmap, namespace_, name_='Business-Mission_Disruption', pretty_print=pretty_print)
if self.Response_And_Recovery_Costs is not None:
self.Response_And_Recovery_Costs.export(lwrite, level, nsmap, namespace_, name_='Response_And_Recovery_Costs', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Asset_Losses':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Asset_Losses(obj_)
elif nodeName_ == 'Business-Mission_Disruption':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Business_Mission_Disruption(obj_)
elif nodeName_ == 'Response_And_Recovery_Costs':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Response_And_Recovery_Costs(obj_)
# end class DirectImpactSummaryType
class NatureOfSecurityEffectType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Property_Affected=None):
if Property_Affected is None:
self.Property_Affected = []
else:
self.Property_Affected = Property_Affected
def factory(*args_, **kwargs_):
if NatureOfSecurityEffectType.subclass:
return NatureOfSecurityEffectType.subclass(*args_, **kwargs_)
else:
return NatureOfSecurityEffectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Property_Affected(self): return self.Property_Affected
def set_Property_Affected(self, Property_Affected): self.Property_Affected = Property_Affected
def add_Property_Affected(self, value): self.Property_Affected.append(value)
def insert_Property_Affected(self, index, value): self.Property_Affected[index] = value
def hasContent_(self):
if (
self.Property_Affected
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='NatureOfSecurityEffectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='NatureOfSecurityEffectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='NatureOfSecurityEffectType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='NatureOfSecurityEffectType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Property_Affected_ in self.Property_Affected:
Property_Affected_.export(lwrite, level, nsmap, namespace_, name_='Property_Affected', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Property_Affected':
obj_ = PropertyAffectedType.factory()
obj_.build(child_)
self.Property_Affected.append(obj_)
# end class NatureOfSecurityEffectType
class HistoryItemType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Action_Entry=None, Journal_Entry=None):
self.Action_Entry = Action_Entry
self.Journal_Entry = Journal_Entry
def factory(*args_, **kwargs_):
if HistoryItemType.subclass:
return HistoryItemType.subclass(*args_, **kwargs_)
else:
return HistoryItemType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Action_Entry(self): return self.Action_Entry
def set_Action_Entry(self, Action_Entry): self.Action_Entry = Action_Entry
def get_Journal_Entry(self): return self.Journal_Entry
def set_Journal_Entry(self, Journal_Entry): self.Journal_Entry = Journal_Entry
def hasContent_(self):
if (
self.Action_Entry is not None or
self.Journal_Entry is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='HistoryItemType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='HistoryItemType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='HistoryItemType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='HistoryItemType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Action_Entry is not None:
self.Action_Entry.export(lwrite, level, nsmap, namespace_, name_='Action_Entry', pretty_print=pretty_print)
if self.Journal_Entry is not None:
self.Journal_Entry.export(lwrite, level, nsmap, namespace_, name_='Journal_Entry', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Action_Entry':
obj_ = COATakenType.factory()
obj_.build(child_)
self.set_Action_Entry(obj_)
elif nodeName_ == 'Journal_Entry':
obj_ = JournalEntryType.factory()
obj_.build(child_)
self.set_Journal_Entry(obj_)
# end class HistoryItemType
class HistoryType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, History_Item=None):
if History_Item is None:
self.History_Item = []
else:
self.History_Item = History_Item
def factory(*args_, **kwargs_):
if HistoryType.subclass:
return HistoryType.subclass(*args_, **kwargs_)
else:
return HistoryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_History_Item(self): return self.History_Item
def set_History_Item(self, History_Item): self.History_Item = History_Item
def add_History_Item(self, value): self.History_Item.append(value)
def insert_History_Item(self, index, value): self.History_Item[index] = value
def hasContent_(self):
if (
self.History_Item
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='HistoryType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='HistoryType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='HistoryType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='HistoryType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for History_Item_ in self.History_Item:
History_Item_.export(lwrite, level, nsmap, namespace_, name_='History_Item', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'History_Item':
obj_ = HistoryItemType.factory()
obj_.build(child_)
self.History_Item.append(obj_)
# end class HistoryType
class AffectedAssetsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Affected_Asset=None):
if Affected_Asset is None:
self.Affected_Asset = []
else:
self.Affected_Asset = Affected_Asset
def factory(*args_, **kwargs_):
if AffectedAssetsType.subclass:
return AffectedAssetsType.subclass(*args_, **kwargs_)
else:
return AffectedAssetsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Affected_Asset(self): return self.Affected_Asset
def set_Affected_Asset(self, Affected_Asset): self.Affected_Asset = Affected_Asset
def add_Affected_Asset(self, value): self.Affected_Asset.append(value)
def insert_Affected_Asset(self, index, value): self.Affected_Asset[index] = value
def hasContent_(self):
if (
self.Affected_Asset
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AffectedAssetsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='AffectedAssetsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='AffectedAssetsType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AffectedAssetsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Affected_Asset_ in self.Affected_Asset:
Affected_Asset_.export(lwrite, level, nsmap, namespace_, name_='Affected_Asset', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Affected_Asset':
obj_ = AffectedAssetType.factory()
obj_.build(child_)
self.Affected_Asset.append(obj_)
# end class AffectedAssetsType
class TimeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, First_Malicious_Action=None, Initial_Compromise=None, First_Data_Exfiltration=None, Incident_Discovery=None, Incident_Opened=None, Containment_Achieved=None, Restoration_Achieved=None, Incident_Reported=None, Incident_Closed=None):
self.First_Malicious_Action = First_Malicious_Action
self.Initial_Compromise = Initial_Compromise
self.First_Data_Exfiltration = First_Data_Exfiltration
self.Incident_Discovery = Incident_Discovery
self.Incident_Opened = Incident_Opened
self.Containment_Achieved = Containment_Achieved
self.Restoration_Achieved = Restoration_Achieved
self.Incident_Reported = Incident_Reported
self.Incident_Closed = Incident_Closed
def factory(*args_, **kwargs_):
if TimeType.subclass:
return TimeType.subclass(*args_, **kwargs_)
else:
return TimeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_First_Malicious_Action(self): return self.First_Malicious_Action
def set_First_Malicious_Action(self, First_Malicious_Action): self.First_Malicious_Action = First_Malicious_Action
def get_Initial_Compromise(self): return self.Initial_Compromise
def set_Initial_Compromise(self, Initial_Compromise): self.Initial_Compromise = Initial_Compromise
def get_First_Data_Exfiltration(self): return self.First_Data_Exfiltration
def set_First_Data_Exfiltration(self, First_Data_Exfiltration): self.First_Data_Exfiltration = First_Data_Exfiltration
def get_Incident_Discovery(self): return self.Incident_Discovery
def set_Incident_Discovery(self, Incident_Discovery): self.Incident_Discovery = Incident_Discovery
def get_Incident_Opened(self): return self.Incident_Opened
def set_Incident_Opened(self, Incident_Opened): self.Incident_Opened = Incident_Opened
def get_Containment_Achieved(self): return self.Containment_Achieved
def set_Containment_Achieved(self, Containment_Achieved): self.Containment_Achieved = Containment_Achieved
def get_Restoration_Achieved(self): return self.Restoration_Achieved
def set_Restoration_Achieved(self, Restoration_Achieved): self.Restoration_Achieved = Restoration_Achieved
def get_Incident_Reported(self): return self.Incident_Reported
def set_Incident_Reported(self, Incident_Reported): self.Incident_Reported = Incident_Reported
def get_Incident_Closed(self): return self.Incident_Closed
def set_Incident_Closed(self, Incident_Closed): self.Incident_Closed = Incident_Closed
def hasContent_(self):
if (
self.First_Malicious_Action is not None or
self.Initial_Compromise is not None or
self.First_Data_Exfiltration is not None or
self.Incident_Discovery is not None or
self.Incident_Opened is not None or
self.Containment_Achieved is not None or
self.Restoration_Achieved is not None or
self.Incident_Reported is not None or
self.Incident_Closed is not None
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='TimeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='TimeType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='TimeType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='TimeType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.First_Malicious_Action is not None:
self.First_Malicious_Action.export(lwrite, level, nsmap, namespace_, name_='First_Malicious_Action', pretty_print=pretty_print)
if self.Initial_Compromise is not None:
self.Initial_Compromise.export(lwrite, level, nsmap, namespace_, name_='Initial_Compromise', pretty_print=pretty_print)
if self.First_Data_Exfiltration is not None:
self.First_Data_Exfiltration.export(lwrite, level, nsmap, namespace_, name_='First_Data_Exfiltration', pretty_print=pretty_print)
if self.Incident_Discovery is not None:
self.Incident_Discovery.export(lwrite, level, nsmap, namespace_, name_='Incident_Discovery', pretty_print=pretty_print)
if self.Incident_Opened is not None:
self.Incident_Opened.export(lwrite, level, nsmap, namespace_, name_='Incident_Opened', pretty_print=pretty_print)
if self.Containment_Achieved is not None:
self.Containment_Achieved.export(lwrite, level, nsmap, namespace_, name_='Containment_Achieved', pretty_print=pretty_print)
if self.Restoration_Achieved is not None:
self.Restoration_Achieved.export(lwrite, level, nsmap, namespace_, name_='Restoration_Achieved', pretty_print=pretty_print)
if self.Incident_Reported is not None:
self.Incident_Reported.export(lwrite, level, nsmap, namespace_, name_='Incident_Reported', pretty_print=pretty_print)
if self.Incident_Closed is not None:
self.Incident_Closed.export(lwrite, level, nsmap, namespace_, name_='Incident_Closed', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'First_Malicious_Action':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_First_Malicious_Action(obj_)
elif nodeName_ == 'Initial_Compromise':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Initial_Compromise(obj_)
elif nodeName_ == 'First_Data_Exfiltration':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_First_Data_Exfiltration(obj_)
elif nodeName_ == 'Incident_Discovery':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Incident_Discovery(obj_)
elif nodeName_ == 'Incident_Opened':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Incident_Opened(obj_)
elif nodeName_ == 'Containment_Achieved':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Containment_Achieved(obj_)
elif nodeName_ == 'Restoration_Achieved':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Restoration_Achieved(obj_)
elif nodeName_ == 'Incident_Reported':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Incident_Reported(obj_)
elif nodeName_ == 'Incident_Closed':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Incident_Closed(obj_)
# end class TimeType
class CategoriesType(GeneratedsSuper):
"""Represents a list of incident categories that an incident is tagged
with."""
subclass = None
superclass = None
def __init__(self, Category=None):
if Category is None:
self.Category = []
else:
self.Category = Category
def factory(*args_, **kwargs_):
if CategoriesType.subclass:
return CategoriesType.subclass(*args_, **kwargs_)
else:
return CategoriesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Category(self): return self.Category
def set_Category(self, Category): self.Category = Category
def add_Category(self, value): self.Category.append(value)
def insert_Category(self, index, value): self.Category[index] = value
def hasContent_(self):
if (
self.Category
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='CategoriesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='CategoriesType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='CategoriesType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='CategoriesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Category_ in self.Category:
Category_.export(lwrite, level, nsmap, namespace_, name_='Category', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Category':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.add_Category(obj_)
# end class CategoriesType
class EffectsType(GeneratedsSuper):
"""Represents a list of incident effects that an incident is tagged
with."""
subclass = None
superclass = None
def __init__(self, Effect=None):
if Effect is None:
self.Effect = []
else:
self.Effect = Effect
def factory(*args_, **kwargs_):
if EffectsType.subclass:
return EffectsType.subclass(*args_, **kwargs_)
else:
return EffectsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Effect(self): return self.Effect
def set_Effect(self, Effect): self.Effect = Effect
def add_Effect(self, value): self.Effect.append(value)
def insert_Effect(self, index, value): self.Effect[index] = value
def hasContent_(self):
if (
self.Effect
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='EffectsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='EffectsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='EffectsType'):
pass
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='EffectsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Effect_ in self.Effect:
Effect_.export(lwrite, level, nsmap, namespace_, name_='Effect', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Effect':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.Effect.append(obj_)
# end class EffectsType
class AttributedThreatActorsType(stix_common_binding.GenericRelationshipListType):
"""The AttributedThreatActorsType specifies a Threat Actor asserted to
be attributed for this Incident."""
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Threat_Actor=None):
super(AttributedThreatActorsType, self).__init__(scope=scope)
if Threat_Actor is None:
self.Threat_Actor = []
else:
self.Threat_Actor = Threat_Actor
def factory(*args_, **kwargs_):
if AttributedThreatActorsType.subclass:
return AttributedThreatActorsType.subclass(*args_, **kwargs_)
else:
return AttributedThreatActorsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Threat_Actor(self): return self.Threat_Actor
def set_Threat_Actor(self, Threat_Actor): self.Threat_Actor = Threat_Actor
def add_Threat_Actor(self, value): self.Threat_Actor.append(value)
def insert_Threat_Actor(self, index, value): self.Threat_Actor[index] = value
def hasContent_(self):
if (
self.Threat_Actor or
super(AttributedThreatActorsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AttributedThreatActorsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='AttributedThreatActorsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='AttributedThreatActorsType'):
super(AttributedThreatActorsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='AttributedThreatActorsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AttributedThreatActorsType', fromsubclass_=False, pretty_print=True):
super(AttributedThreatActorsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Threat_Actor_ in self.Threat_Actor:
Threat_Actor_.export(lwrite, level, nsmap, namespace_, name_='Threat_Actor', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AttributedThreatActorsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Threat_Actor':
obj_ = stix_common_binding.RelatedThreatActorType.factory()
obj_.build(child_)
self.Threat_Actor.append(obj_)
super(AttributedThreatActorsType, self).buildChildren(child_, node, nodeName_, True)
# end class AttributedThreatActorsType
class RelatedIndicatorsType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Related_Indicator=None):
super(RelatedIndicatorsType, self).__init__(scope=scope)
if Related_Indicator is None:
self.Related_Indicator = []
else:
self.Related_Indicator = Related_Indicator
def factory(*args_, **kwargs_):
if RelatedIndicatorsType.subclass:
return RelatedIndicatorsType.subclass(*args_, **kwargs_)
else:
return RelatedIndicatorsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Related_Indicator(self): return self.Related_Indicator
def set_Related_Indicator(self, Related_Indicator): self.Related_Indicator = Related_Indicator
def add_Related_Indicator(self, value): self.Related_Indicator.append(value)
def insert_Related_Indicator(self, index, value): self.Related_Indicator[index] = value
def hasContent_(self):
if (
self.Related_Indicator or
super(RelatedIndicatorsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIndicatorsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIndicatorsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='RelatedIndicatorsType'):
super(RelatedIndicatorsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIndicatorsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIndicatorsType', fromsubclass_=False, pretty_print=True):
super(RelatedIndicatorsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Related_Indicator_ in self.Related_Indicator:
Related_Indicator_.export(lwrite, level, nsmap, namespace_, name_='Related_Indicator', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RelatedIndicatorsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Related_Indicator':
obj_ = stix_common_binding.RelatedIndicatorType.factory()
obj_.build(child_)
self.Related_Indicator.append(obj_)
super(RelatedIndicatorsType, self).buildChildren(child_, node, nodeName_, True)
# end class RelatedIndicatorsType
class RelatedObservablesType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Related_Observable=None):
super(RelatedObservablesType, self).__init__(scope=scope)
if Related_Observable is None:
self.Related_Observable = []
else:
self.Related_Observable = Related_Observable
def factory(*args_, **kwargs_):
if RelatedObservablesType.subclass:
return RelatedObservablesType.subclass(*args_, **kwargs_)
else:
return RelatedObservablesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Related_Observable(self): return self.Related_Observable
def set_Related_Observable(self, Related_Observable): self.Related_Observable = Related_Observable
def add_Related_Observable(self, value): self.Related_Observable.append(value)
def insert_Related_Observable(self, index, value): self.Related_Observable[index] = value
def hasContent_(self):
if (
self.Related_Observable or
super(RelatedObservablesType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedObservablesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedObservablesType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='RelatedObservablesType'):
super(RelatedObservablesType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedObservablesType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedObservablesType', fromsubclass_=False, pretty_print=True):
super(RelatedObservablesType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Related_Observable_ in self.Related_Observable:
Related_Observable_.export(lwrite, level, nsmap, namespace_, name_='Related_Observable', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RelatedObservablesType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Related_Observable':
obj_ = stix_common_binding.RelatedObservableType.factory()
obj_.build(child_)
self.Related_Observable.append(obj_)
super(RelatedObservablesType, self).buildChildren(child_, node, nodeName_, True)
# end class RelatedObservablesType
class LeveragedTTPsType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Leveraged_TTP=None):
super(LeveragedTTPsType, self).__init__(scope=scope)
if Leveraged_TTP is None:
self.Leveraged_TTP = []
else:
self.Leveraged_TTP = Leveraged_TTP
def factory(*args_, **kwargs_):
if LeveragedTTPsType.subclass:
return LeveragedTTPsType.subclass(*args_, **kwargs_)
else:
return LeveragedTTPsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Leveraged_TTP(self): return self.Leveraged_TTP
def set_Leveraged_TTP(self, Leveraged_TTP): self.Leveraged_TTP = Leveraged_TTP
def add_Leveraged_TTP(self, value): self.Leveraged_TTP.append(value)
def insert_Leveraged_TTP(self, index, value): self.Leveraged_TTP[index] = value
def hasContent_(self):
if (
self.Leveraged_TTP or
super(LeveragedTTPsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='LeveragedTTPsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='LeveragedTTPsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='LeveragedTTPsType'):
super(LeveragedTTPsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='LeveragedTTPsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='LeveragedTTPsType', fromsubclass_=False, pretty_print=True):
super(LeveragedTTPsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Leveraged_TTP_ in self.Leveraged_TTP:
Leveraged_TTP_.export(lwrite, level, nsmap, namespace_, name_='Leveraged_TTP', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(LeveragedTTPsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Leveraged_TTP':
obj_ = stix_common_binding.RelatedTTPType.factory()
obj_.build(child_)
self.Leveraged_TTP.append(obj_)
super(LeveragedTTPsType, self).buildChildren(child_, node, nodeName_, True)
# end class LeveragedTTPsType
class RelatedIncidentsType(stix_common_binding.GenericRelationshipListType):
subclass = None
superclass = stix_common_binding.GenericRelationshipListType
def __init__(self, scope='exclusive', Related_Incident=None):
super(RelatedIncidentsType, self).__init__(scope=scope)
if Related_Incident is None:
self.Related_Incident = []
else:
self.Related_Incident = Related_Incident
def factory(*args_, **kwargs_):
if RelatedIncidentsType.subclass:
return RelatedIncidentsType.subclass(*args_, **kwargs_)
else:
return RelatedIncidentsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Related_Incident(self): return self.Related_Incident
def set_Related_Incident(self, Related_Incident): self.Related_Incident = Related_Incident
def add_Related_Incident(self, value): self.Related_Incident.append(value)
def insert_Related_Incident(self, index, value): self.Related_Incident[index] = value
def hasContent_(self):
if (
self.Related_Incident or
super(RelatedIncidentsType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIncidentsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIncidentsType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='RelatedIncidentsType'):
super(RelatedIncidentsType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='RelatedIncidentsType')
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='RelatedIncidentsType', fromsubclass_=False, pretty_print=True):
super(RelatedIncidentsType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Related_Incident_ in self.Related_Incident:
Related_Incident_.export(lwrite, level, nsmap, namespace_, name_='Related_Incident', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RelatedIncidentsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Related_Incident':
obj_ = stix_common_binding.RelatedIncidentType.factory()
obj_.build(child_)
self.Related_Incident.append(obj_)
super(RelatedIncidentsType, self).buildChildren(child_, node, nodeName_, True)
# end class RelatedIncidentsType
class AssetTypeType(stix_common_binding.ControlledVocabularyStringType):
"""This field specifies the number of assets of this type affected.
This field is implemented through the xsi:type controlled
vocabulary extension mechanism. The default vocabulary type is
AssetTypeVocab-1.0 in the
http://stix.mitre.org/default_vocabularies-1 namespace. This
type is defined in the stix_default_vocabularies.xsd file or at
the URL http://stix.mitre.org/XMLSchema/default_vocabularies/1.0
.0/stix_default_vocabularies.xsd . Users may also define their
own vocabulary using the type extension mechanism, specify a
vocabulary name and reference using the attributes, or simply
use this as a string field."""
subclass = None
superclass = stix_common_binding.ControlledVocabularyStringType
def __init__(self, vocab_reference=None, vocab_name=None, count_affected=None, valueOf_=None):
super(AssetTypeType, self).__init__(vocab_reference=vocab_reference, vocab_name=vocab_name, valueOf_=valueOf_)
self.count_affected = _cast(None, count_affected)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if AssetTypeType.subclass:
return AssetTypeType.subclass(*args_, **kwargs_)
else:
return AssetTypeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_count_affected(self): return self.count_affected
def set_count_affected(self, count_affected): self.count_affected = count_affected
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_ or
super(AssetTypeType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AssetTypeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='AssetTypeType')
if self.hasContent_():
lwrite('>')
lwrite(unicode(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='AssetTypeType'):
super(AssetTypeType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='AssetTypeType')
if self.count_affected is not None and 'count_affected' not in already_processed:
already_processed.add('count_affected')
lwrite(' count_affected=%s' % (self.gds_format_string(quote_attrib(self.count_affected).encode(ExternalEncoding), input_name='count_affected'), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='AssetTypeType', fromsubclass_=False, pretty_print=True):
super(AssetTypeType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('count_affected', node)
if value is not None and 'count_affected' not in already_processed:
already_processed.add('count_affected')
self.count_affected = value
super(AssetTypeType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AssetTypeType
class IncidentType(stix_common_binding.IncidentBaseType):
"""The IncidentType characterizes a single cyber threat
Incident.Specifies the relevant STIX-Incident schema version for
this content.Specifies a URL referencing the location for the
Incident specification."""
subclass = None
superclass = stix_common_binding.IncidentBaseType
def __init__(self, idref=None, id=None, timestamp=None, URL=None, version=None, Title=None, External_ID=None, Time=None, Description=None, Short_Description=None, Categories=None, Reporter=None, Responder=None, Coordinator=None, Victim=None, Affected_Assets=None, Impact_Assessment=None, Status=None, Related_Indicators=None, Related_Observables=None, Leveraged_TTPs=None, Attributed_Threat_Actors=None, Intended_Effect=None, Security_Compromise=None, Discovery_Method=None, Related_Incidents=None, COA_Requested=None, COA_Taken=None, Confidence=None, Contact=None, History=None, Information_Source=None, Handling=None, Related_Packages=None):
super(IncidentType, self).__init__(timestamp=timestamp, idref=idref, id=id)
self.xmlns = "http://stix.mitre.org/Incident-1"
self.xmlns_prefix = "incident"
self.xml_type = "IncidentType"
self.URL = _cast(None, URL)
self.version = _cast(None, version)
self.Title = Title
if External_ID is None:
self.External_ID = []
else:
self.External_ID = External_ID
self.Time = Time
self.Description = Description
self.Short_Description = Short_Description
self.Categories = Categories
self.Reporter = Reporter
if Responder is None:
self.Responder = []
else:
self.Responder = Responder
if Coordinator is None:
self.Coordinator = []
else:
self.Coordinator = Coordinator
if Victim is None:
self.Victim = []
else:
self.Victim = Victim
self.Affected_Assets = Affected_Assets
self.Impact_Assessment = Impact_Assessment
self.Status = Status
self.Related_Indicators = Related_Indicators
self.Related_Observables = Related_Observables
self.Leveraged_TTPs = Leveraged_TTPs
self.Attributed_Threat_Actors = Attributed_Threat_Actors
if Intended_Effect is None:
self.Intended_Effect = []
else:
self.Intended_Effect = Intended_Effect
self.Security_Compromise = Security_Compromise
if Discovery_Method is None:
self.Discovery_Method = []
else:
self.Discovery_Method = Discovery_Method
self.Related_Incidents = Related_Incidents
if COA_Requested is None:
self.COA_Requested = []
else:
self.COA_Requested = COA_Requested
if COA_Taken is None:
self.COA_Taken = []
else:
self.COA_Taken = COA_Taken
self.Confidence = Confidence
if Contact is None:
self.Contact = []
else:
self.Contact = Contact
self.History = History
self.Information_Source = Information_Source
self.Handling = Handling
self.Related_Packages = Related_Packages
def factory(*args_, **kwargs_):
if IncidentType.subclass:
return IncidentType.subclass(*args_, **kwargs_)
else:
return IncidentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Title(self): return self.Title
def set_Title(self, Title): self.Title = Title
def get_External_ID(self): return self.External_ID
def set_External_ID(self, External_ID): self.External_ID = External_ID
def add_External_ID(self, value): self.External_ID.append(value)
def insert_External_ID(self, index, value): self.External_ID[index] = value
def get_Time(self): return self.Time
def set_Time(self, Time): self.Time = Time
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Short_Description(self): return self.Short_Description
def set_Short_Description(self, Short_Description): self.Short_Description = Short_Description
def get_Categories(self): return self.Categories
def set_Categories(self, Categories): self.Categories = Categories
def get_Reporter(self): return self.Reporter
def set_Reporter(self, Reporter): self.Reporter = Reporter
def get_Responder(self): return self.Responder
def set_Responder(self, Responder): self.Responder = Responder
def add_Responder(self, value): self.Responder.append(value)
def insert_Responder(self, index, value): self.Responder[index] = value
def get_Coordinator(self): return self.Coordinator
def set_Coordinator(self, Coordinator): self.Coordinator = Coordinator
def add_Coordinator(self, value): self.Coordinator.append(value)
def insert_Coordinator(self, index, value): self.Coordinator[index] = value
def get_Victim(self): return self.Victim
def set_Victim(self, Victim): self.Victim = Victim
def add_Victim(self, value): self.Victim.append(value)
def insert_Victim(self, index, value): self.Victim[index] = value
def get_Affected_Assets(self): return self.Affected_Assets
def set_Affected_Assets(self, Affected_Assets): self.Affected_Assets = Affected_Assets
def get_Impact_Assessment(self): return self.Impact_Assessment
def set_Impact_Assessment(self, Impact_Assessment): self.Impact_Assessment = Impact_Assessment
def get_Status(self): return self.Status
def set_Status(self, Status): self.Status = Status
def get_Related_Indicators(self): return self.Related_Indicators
def set_Related_Indicators(self, Related_Indicators): self.Related_Indicators = Related_Indicators
def get_Related_Observables(self): return self.Related_Observables
def set_Related_Observables(self, Related_Observables): self.Related_Observables = Related_Observables
def get_Leveraged_TTPs(self): return self.Leveraged_TTPs
def set_Leveraged_TTPs(self, Leveraged_TTPs): self.Leveraged_TTPs = Leveraged_TTPs
def get_Attributed_Threat_Actors(self): return self.Attributed_Threat_Actors
def set_Attributed_Threat_Actors(self, Attributed_Threat_Actors): self.Attributed_Threat_Actors = Attributed_Threat_Actors
def get_Intended_Effect(self): return self.Intended_Effect
def set_Intended_Effect(self, Intended_Effect): self.Intended_Effect = Intended_Effect
def add_Intended_Effect(self, value): self.Intended_Effect.append(value)
def insert_Intended_Effect(self, index, value): self.Intended_Effect[index] = value
def get_Security_Compromise(self): return self.Security_Compromise
def set_Security_Compromise(self, Security_Compromise): self.Security_Compromise = Security_Compromise
def get_Discovery_Method(self): return self.Discovery_Method
def set_Discovery_Method(self, Discovery_Method): self.Discovery_Method = Discovery_Method
def add_Discovery_Method(self, value): self.Discovery_Method.append(value)
def insert_Discovery_Method(self, index, value): self.Discovery_Method[index] = value
def get_Related_Incidents(self): return self.Related_Incidents
def set_Related_Incidents(self, Related_Incidents): self.Related_Incidents = Related_Incidents
def get_COA_Requested(self): return self.COA_Requested
def set_COA_Requested(self, COA_Requested): self.COA_Requested = COA_Requested
def add_COA_Requested(self, value): self.COA_Requested.append(value)
def insert_COA_Requested(self, index, value): self.COA_Requested[index] = value
def get_COA_Taken(self): return self.COA_Taken
def set_COA_Taken(self, COA_Taken): self.COA_Taken = COA_Taken
def add_COA_Taken(self, value): self.COA_Taken.append(value)
def insert_COA_Taken(self, index, value): self.COA_Taken[index] = value
def get_Confidence(self): return self.Confidence
def set_Confidence(self, Confidence): self.Confidence = Confidence
def get_Contact(self): return self.Contact
def set_Contact(self, Contact): self.Contact = Contact
def add_Contact(self, value): self.Contact.append(value)
def insert_Contact(self, index, value): self.Contact[index] = value
def get_History(self): return self.History
def set_History(self, History): self.History = History
def get_Information_Source(self): return self.Information_Source
def set_Information_Source(self, Information_Source): self.Information_Source = Information_Source
def get_Handling(self): return self.Handling
def set_Handling(self, Handling): self.Handling = Handling
def get_Related_Packages(self): return self.Related_Packages
def set_Related_Packages(self, Related_Packages): self.Related_Packages = Related_Packages
def get_URL(self): return self.URL
def set_URL(self, URL): self.URL = URL
def get_version(self): return self.version
def set_version(self, version): self.version = version
def hasContent_(self):
if (
self.Title is not None or
self.External_ID or
self.Time is not None or
self.Description is not None or
self.Short_Description is not None or
self.Categories is not None or
self.Reporter is not None or
self.Responder or
self.Coordinator or
self.Victim or
self.Affected_Assets is not None or
self.Impact_Assessment is not None or
self.Status is not None or
self.Related_Indicators is not None or
self.Related_Observables is not None or
self.Leveraged_TTPs is not None or
self.Attributed_Threat_Actors is not None or
self.Intended_Effect or
self.Security_Compromise is not None or
self.Discovery_Method or
self.Related_Incidents is not None or
self.COA_Requested or
self.COA_Taken or
self.Confidence is not None or
self.Contact or
self.History is not None or
self.Information_Source is not None or
self.Handling is not None or
self.Related_Packages is not None or
super(IncidentType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='Incident', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='Incident')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='Incident'):
super(IncidentType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='Incident')
# if 'xmlns' not in already_processed:
# already_processed.add('xmlns')
# xmlns = " xmlns:%s='%s'" % (self.xmlns_prefix, self.xmlns)
# lwrite(xmlns)
if 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
xsi_type = " xsi:type='%s:%s'" % (self.xmlns_prefix, self.xml_type)
lwrite(xsi_type)
if self.URL is not None and 'URL' not in already_processed:
already_processed.add('URL')
lwrite(' URL=%s' % (self.gds_format_string(quote_attrib(self.URL).encode(ExternalEncoding), input_name='URL'), ))
if self.version is not None and 'version' not in already_processed:
already_processed.add('version')
lwrite(' version=%s' % (quote_attrib(self.version), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='IncidentType', fromsubclass_=False, pretty_print=True):
super(IncidentType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Title is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%s:Title>%s</%s:Title>%s' % (nsmap[namespace_], self.gds_format_string(quote_xml(self.Title).encode(ExternalEncoding), input_name='Title'), nsmap[namespace_], eol_))
for External_ID_ in self.External_ID:
External_ID_.export(lwrite, level, nsmap, namespace_, name_='External_ID', pretty_print=pretty_print)
if self.Time is not None:
self.Time.export(lwrite, level, nsmap, namespace_, name_='Time', pretty_print=pretty_print)
if self.Description is not None:
self.Description.export(lwrite, level, nsmap, namespace_, name_='Description', pretty_print=pretty_print)
if self.Short_Description is not None:
self.Short_Description.export(lwrite, level, nsmap, namespace_, name_='Short_Description', pretty_print=pretty_print)
if self.Categories is not None:
self.Categories.export(lwrite, level, nsmap, namespace_, name_='Categories', pretty_print=pretty_print)
if self.Reporter is not None:
self.Reporter.export(lwrite, level, nsmap, namespace_, name_='Reporter', pretty_print=pretty_print)
for Responder_ in self.Responder:
Responder_.export(lwrite, level, nsmap, namespace_, name_='Responder', pretty_print=pretty_print)
for Coordinator_ in self.Coordinator:
Coordinator_.export(lwrite, level, nsmap, namespace_, name_='Coordinator', pretty_print=pretty_print)
for Victim_ in self.Victim:
Victim_.export(lwrite, level, nsmap, namespace_, name_='Victim', pretty_print=pretty_print)
if self.Affected_Assets is not None:
self.Affected_Assets.export(lwrite, level, nsmap, namespace_, name_='Affected_Assets', pretty_print=pretty_print)
if self.Impact_Assessment is not None:
self.Impact_Assessment.export(lwrite, level, nsmap, namespace_, name_='Impact_Assessment', pretty_print=pretty_print)
if self.Status is not None:
self.Status.export(lwrite, level, nsmap, namespace_, name_='Status', pretty_print=pretty_print)
if self.Related_Indicators is not None:
self.Related_Indicators.export(lwrite, level, nsmap, namespace_, name_='Related_Indicators', pretty_print=pretty_print)
if self.Related_Observables is not None:
self.Related_Observables.export(lwrite, level, nsmap, namespace_, name_='Related_Observables', pretty_print=pretty_print)
if self.Leveraged_TTPs is not None:
self.Leveraged_TTPs.export(lwrite, level, nsmap, namespace_, name_='Leveraged_TTPs', pretty_print=pretty_print)
if self.Attributed_Threat_Actors is not None:
self.Attributed_Threat_Actors.export(lwrite, level, nsmap, namespace_, name_='Attributed_Threat_Actors', pretty_print=pretty_print)
for Intended_Effect_ in self.Intended_Effect:
Intended_Effect_.export(lwrite, level, nsmap, namespace_, name_='Intended_Effect', pretty_print=pretty_print)
if self.Security_Compromise is not None:
self.Security_Compromise.export(lwrite, level, nsmap, namespace_, name_='Security_Compromise', pretty_print=pretty_print)
for Discovery_Method_ in self.Discovery_Method:
Discovery_Method_.export(lwrite, level, nsmap, namespace_, name_='Discovery_Method', pretty_print=pretty_print)
if self.Related_Incidents is not None:
self.Related_Incidents.export(lwrite, level, nsmap, namespace_, name_='Related_Incidents', pretty_print=pretty_print)
for COA_Requested_ in self.COA_Requested:
COA_Requested_.export(lwrite, level, nsmap, namespace_, name_='COA_Requested', pretty_print=pretty_print)
for COA_Taken_ in self.COA_Taken:
COA_Taken_.export(lwrite, level, nsmap, namespace_, name_='COA_Taken', pretty_print=pretty_print)
if self.Confidence is not None:
self.Confidence.export(lwrite, level, nsmap, namespace_, name_='Confidence', pretty_print=pretty_print)
for Contact_ in self.Contact:
Contact_.export(lwrite, level, nsmap, namespace_, name_='Contact', pretty_print=pretty_print)
if self.History is not None:
self.History.export(lwrite, level, nsmap, namespace_, name_='History', pretty_print=pretty_print)
if self.Information_Source is not None:
self.Information_Source.export(lwrite, level, nsmap, namespace_, name_='Information_Source', pretty_print=pretty_print)
if self.Handling is not None:
self.Handling.export(lwrite, level, nsmap, namespace_, name_='Handling', pretty_print=pretty_print)
if self.Related_Packages is not None:
self.Related_Packages.export(lwrite, level, nsmap, namespace_, name_='Related_Packages', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('URL', node)
if value is not None and 'URL' not in already_processed:
already_processed.add('URL')
self.URL = value
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.add('version')
self.version = value
super(IncidentType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Title':
Title_ = child_.text
Title_ = self.gds_validate_string(Title_, node, 'Title')
self.Title = Title_
elif nodeName_ == 'External_ID':
obj_ = ExternalIDType.factory()
obj_.build(child_)
self.External_ID.append(obj_)
elif nodeName_ == 'Time':
obj_ = TimeType.factory()
obj_.build(child_)
self.set_Time(obj_)
elif nodeName_ == 'Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.set_Description(obj_)
elif nodeName_ == 'Short_Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.set_Short_Description(obj_)
elif nodeName_ == 'Categories':
obj_ = CategoriesType.factory()
obj_.build(child_)
self.set_Categories(obj_)
elif nodeName_ == 'Reporter':
obj_ = stix_common_binding.InformationSourceType.factory()
obj_.build(child_)
self.set_Reporter(obj_)
elif nodeName_ == 'Responder':
obj_ = stix_common_binding.InformationSourceType.factory()
obj_.build(child_)
self.Responder.append(obj_)
elif nodeName_ == 'Coordinator':
obj_ = stix_common_binding.InformationSourceType.factory()
obj_.build(child_)
self.Coordinator.append(obj_)
elif nodeName_ == 'Victim':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
if type_name_ == "CIQIdentity3.0InstanceType":
import stix.bindings.extensions.identity.ciq_identity_3_0 as ciq_identity_binding
obj_ = ciq_identity_binding.CIQIdentity3_0InstanceType.factory()
else:
obj_ = stix_common_binding.IdentityType.factory() # IdentityType is not abstract
obj_.build(child_)
self.Victim.append(obj_)
elif nodeName_ == 'Affected_Assets':
obj_ = AffectedAssetsType.factory()
obj_.build(child_)
self.set_Affected_Assets(obj_)
elif nodeName_ == 'Impact_Assessment':
obj_ = ImpactAssessmentType.factory()
obj_.build(child_)
self.set_Impact_Assessment(obj_)
elif nodeName_ == 'Status':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Status(obj_)
elif nodeName_ == 'Related_Indicators':
obj_ = RelatedIndicatorsType.factory()
obj_.build(child_)
self.set_Related_Indicators(obj_)
elif nodeName_ == 'Related_Observables':
obj_ = RelatedObservablesType.factory()
obj_.build(child_)
self.set_Related_Observables(obj_)
elif nodeName_ == 'Leveraged_TTPs':
obj_ = LeveragedTTPsType.factory()
obj_.build(child_)
self.set_Leveraged_TTPs(obj_)
elif nodeName_ == 'Attributed_Threat_Actors':
obj_ = AttributedThreatActorsType.factory()
obj_.build(child_)
self.set_Attributed_Threat_Actors(obj_)
elif nodeName_ == 'Intended_Effect':
obj_ = stix_common_binding.StatementType.factory()
obj_.build(child_)
self.Intended_Effect.append(obj_)
elif nodeName_ == 'Security_Compromise':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Security_Compromise(obj_)
elif nodeName_ == 'Discovery_Method':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.Discovery_Method.append(obj_)
elif nodeName_ == 'Related_Incidents':
obj_ = RelatedIncidentsType.factory()
obj_.build(child_)
self.set_Related_Incidents(obj_)
elif nodeName_ == 'COA_Requested':
obj_ = COARequestedType.factory()
obj_.build(child_)
self.COA_Requested.append(obj_)
elif nodeName_ == 'COA_Taken':
class_obj_ = self.get_class_obj_(child_, COATakenType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.COA_Taken.append(obj_)
elif nodeName_ == 'Confidence':
obj_ = stix_common_binding.ConfidenceType.factory()
obj_.build(child_)
self.set_Confidence(obj_)
elif nodeName_ == 'Contact':
obj_ = stix_common_binding.InformationSourceType.factory()
obj_.build(child_)
self.Contact.append(obj_)
elif nodeName_ == 'History':
obj_ = HistoryType.factory()
obj_.build(child_)
self.set_History(obj_)
elif nodeName_ == 'Information_Source':
obj_ = stix_common_binding.InformationSourceType.factory()
obj_.build(child_)
self.set_Information_Source(obj_)
elif nodeName_ == 'Handling':
obj_ = data_marking_binding.MarkingType.factory()
obj_.build(child_)
self.set_Handling(obj_)
elif nodeName_ == 'Related_Packages':
obj_ = stix_common_binding.RelatedPackageRefsType.factory()
obj_.build(child_)
self.set_Related_Packages(obj_)
super(IncidentType, self).buildChildren(child_, node, nodeName_, True)
# end class IncidentType
class NonPublicDataCompromisedType(stix_common_binding.ControlledVocabularyStringType):
"""This type represents whether non-public data was compromised or
exposed and whether that data was encrypted or not.Indicates
whether the data that was compromised was encrypted or not."""
subclass = None
superclass = stix_common_binding.ControlledVocabularyStringType
def __init__(self, vocab_reference=None, vocab_name=None, data_encrypted=None):
super(NonPublicDataCompromisedType, self).__init__(vocab_reference=vocab_reference, vocab_name=vocab_name)
self.data_encrypted = _cast(bool, data_encrypted)
pass
def factory(*args_, **kwargs_):
if NonPublicDataCompromisedType.subclass:
return NonPublicDataCompromisedType.subclass(*args_, **kwargs_)
else:
return NonPublicDataCompromisedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_data_encrypted(self): return self.data_encrypted
def set_data_encrypted(self, data_encrypted): self.data_encrypted = data_encrypted
def hasContent_(self):
if (
super(NonPublicDataCompromisedType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='NonPublicDataCompromisedType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='NonPublicDataCompromisedType')
if self.hasContent_():
lwrite('>')
lwrite(unicode(self.valueOf_).encode(ExternalEncoding))
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='NonPublicDataCompromisedType'):
super(NonPublicDataCompromisedType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='NonPublicDataCompromisedType')
if self.data_encrypted is not None and 'data_encrypted' not in already_processed:
already_processed.add('data_encrypted')
lwrite(' data_encrypted="%s"' % self.gds_format_boolean(self.data_encrypted, input_name='data_encrypted'))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='NonPublicDataCompromisedType', fromsubclass_=False, pretty_print=True):
super(NonPublicDataCompromisedType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('data_encrypted', node)
if value is not None and 'data_encrypted' not in already_processed:
already_processed.add('data_encrypted')
if value in ('true', '1'):
self.data_encrypted = True
elif value in ('false', '0'):
self.data_encrypted = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(NonPublicDataCompromisedType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(NonPublicDataCompromisedType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class NonPublicDataCompromisedType
class ExternalIDType(GeneratedsSuper):
"""The ExternalIDType provides a reference to an ID of an incident in a
remote system.Specifies the source of the External ID."""
subclass = None
superclass = None
def __init__(self, source=None, valueOf_=None):
self.source = _cast(None, source)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ExternalIDType.subclass:
return ExternalIDType.subclass(*args_, **kwargs_)
else:
return ExternalIDType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_source(self): return self.source
def set_source(self, source): self.source = source
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ExternalIDType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='ExternalIDType')
if self.hasContent_():
lwrite('>')
lwrite(unicode(self.valueOf_).encode(ExternalEncoding))
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='incident:', name_='ExternalIDType'):
if self.source is not None and 'source' not in already_processed:
already_processed.add('source')
lwrite(' source=%s' % (self.gds_format_string(quote_attrib(self.source).encode(ExternalEncoding), input_name='source'), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='ExternalIDType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('source', node)
if value is not None and 'source' not in already_processed:
already_processed.add('source')
self.source = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ExternalIDType
GDSClassesMapping = {}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Incident'
rootClass = IncidentType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Incident'
rootClass = IncidentType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Incident'
rootClass = IncidentType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_="Incident",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"PropertyAffectedType",
"AffectedAssetType",
"ImpactAssessmentType",
"ExternalImpactAssessmentModelType",
"COATakenType",
"JournalEntryType",
"COARequestedType",
"ContributorsType",
"COATimeType",
"LossEstimationType",
"TotalLossEstimationType",
"IndirectImpactSummaryType",
"DirectImpactSummaryType",
"NatureOfSecurityEffectType",
"HistoryItemType",
"HistoryType",
"AffectedAssetsType",
"TimeType",
"CategoriesType",
"EffectsType",
"AttributedThreatActorsType",
"RelatedIndicatorsType",
"RelatedObservablesType",
"LeveragedTTPsType",
"RelatedIncidentsType",
"AssetTypeType",
"IncidentType"
]
|
grossws/ansible-modules-core
|
refs/heads/devel
|
web_infrastructure/django_manage.py
|
36
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
required: false
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
choices: [ "yes", "no" ]
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python", for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage: command=cleanup app_path={{ django_dir }}
# Load the initial_data fixture into the application
- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }}
# Run syncdb on the application
- django_manage: >
command=syncdb
app_path={{ django_dir }}
settings={{ settings_app_name }}
pythonpath={{ settings_dir }}
virtualenv={{ virtualenv_dir }}
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest
# Create an initial superuser.
- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }}
"""
import os
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(os.path.expanduser(venv_param), 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command = dict(default=None, required=True),
app_path = dict(default=None, required=True),
settings = dict(default=None, required=False),
pythonpath = dict(default=None, required=False, aliases=['python_path']),
virtualenv = dict(default=None, required=False, aliases=['virtual_env']),
apps = dict(default=None, required=False),
cache_table = dict(default=None, required=False),
clear = dict(default=None, required=False, type='bool'),
database = dict(default=None, required=False),
failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures = dict(default=None, required=False),
liveserver = dict(default=None, required=False, aliases=['live_server']),
testrunner = dict(default=None, required=False, aliases=['test_runner']),
skip = dict(default=None, required=False, type='bool'),
merge = dict(default=None, required=False, type='bool'),
link = dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = os.path.expanduser(module.params['app_path'])
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=os.path.expanduser(app_path))
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = filter(filt, out.split('\n'))
if len(filtered_output):
changed = filtered_output
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
# import module snippets
from ansible.module_utils.basic import *
main()
|
mgogoulos/libcloud
|
refs/heads/trunk
|
docs/examples/container/ecs/instantiate_driver.py
|
30
|
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
cls = get_driver(Provider.ECS)
conn = cls(access_id='SDHFISJDIFJSIDFJ',
secret='THIS_IS)+_MY_SECRET_KEY+I6TVkv68o4H',
region='ap-southeast-2')
for container in conn.list_containers():
print(container.name)
for cluster in conn.list_clusters():
print(cluster.name)
|
tedye/leetcode
|
refs/heads/master
|
tools/leetcode.076.Minimum Window Substring/leetcode.076.Minimum Window Substring.submission7.py
|
2
|
class Solution:
# @param {string} s
# @param {string} t
# @return {string}
def minWindow(self, s, t):
if not s: return ''
d = {}
book = set()
for i in t:
if i not in d:
d[i] = 1
book.add(i)
else:
d[i] += 1
i = 0
l = len(s)
while book and i < l:
if s[i] in d:
d[s[i]] -= 1
if d[s[i]] == 0:
book.remove(s[i])
i += 1
if i == l and book:
return ''
lb = 0
while 1:
if s[lb] not in d:
lb += 1
elif d[s[lb]] < 0:
d[s[lb]] += 1
lb += 1
else:
break
res = s[lb:i]
length = i - lb
while i < l:
if s[i] in d:
d[s[i]] -= 1
else:
i += 1
continue
if s[i] == s[lb]:
while 1:
if s[lb] not in d:
lb += 1
elif d[s[lb]] < 0:
d[s[lb]] += 1
lb += 1
else:
break
if length > i - lb+1:
length = i - lb+1
res = s[lb:i+1]
i+=1
return res
|
minhphung171093/GreenERP_V9
|
refs/heads/master
|
openerp/addons/website_gengo/controllers/__init__.py
|
7372
|
import main
|
mhcrnl/PmwTkEx
|
refs/heads/master
|
java2s/bestway.py
|
1
|
import Tkinter as tk
from alarm import Alarm
class Demo1:
def __init__(self, master):
self.master= master
self.frame= tk.Frame(self.master)
self.button1=tk.Button(self.frame, text="new WIndow", width=25,
command=self.new_window)
self.button1.pack()
self.frame.pack()
def new_window(self):
self.newWindow = tk.Toplevel(self.master)
self.app = Demo2(self.newWindow)
class Demo2:
def __init__(self, master):
self.master = master
self.frame = tk.Frame(self.master)
self.quitButton = tk.Button(self.frame, text="Quit", width=25,
command=self.close_window)
self.quitButton.pack()
self.frame.pack()
def close_window(self):
self.master.destroy()
class Demo3():
def __init__(self, master):
self.master=master
self.frame = tk.Frame(self.master)
self.demo2 = Demo1(self.frame)
#self.alarm= Alarm(self.frame, msecs=1700)
self.addBtn = tk.Button(self.master, text="BTN",
command=self.demo2.new_window)
self.addBtn.pack()
self.addBtn1 = tk.Button(self.master, text="BTN1",
command=self.runAlarm)
self.addBtn1.pack()
self.frame.pack()
def runAlarm(self):
self.alarm = Alarm(self.frame, msecs=1700)
self.alarm.master.destroy()
def main():
root=tk.Tk()
app=Demo3(root)
root.mainloop()
if __name__ == "__main__":
main()
|
balloob/home-assistant
|
refs/heads/dev
|
tests/util/test_ruamel_yaml.py
|
21
|
"""Test Home Assistant ruamel.yaml loader."""
import os
from tempfile import mkdtemp
import pytest
from ruamel.yaml import YAML
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util.ruamel_yaml as util_yaml
TEST_YAML_A = """\
title: My Awesome Home
# Include external resources
resources:
- url: /local/my-custom-card.js
type: js
- url: /local/my-webfont.css
type: css
# Exclude entities from "Unused entities" view
excluded_entities:
- weblink.router
views:
# View tab title.
- title: Example
# Optional unique id for direct access /lovelace/${id}
id: example
# Optional background (overwrites the global background).
background: radial-gradient(crimson, skyblue)
# Each view can have a different theme applied.
theme: dark-mode
# The cards to show on this view.
cards:
# The filter card will filter entities for their state
- type: entity-filter
entities:
- device_tracker.paulus
- device_tracker.anne_there
state_filter:
- 'home'
card:
type: glance
title: People that are home
# The picture entity card will represent an entity with a picture
- type: picture-entity
image: https://www.home-assistant.io/images/default-social.png
entity: light.bed_light
# Specify a tab icon if you want the view tab to be an icon.
- icon: mdi:home-assistant
# Title of the view. Will be used as the tooltip for tab icon
title: Second view
cards:
- id: test
type: entities
title: Test card
# Entities card will take a list of entities and show their state.
- type: entities
# Title of the entities card
title: Example
# The entities here will be shown in the same order as specified.
# Each entry is an entity ID or a map with extra options.
entities:
- light.kitchen
- switch.ac
- entity: light.living_room
# Override the name to use
name: LR Lights
# The markdown card will render markdown text.
- type: markdown
title: Lovelace
content: >
Welcome to your **Lovelace UI**.
"""
TEST_YAML_B = """\
title: Home
views:
- title: Dashboard
id: dashboard
icon: mdi:home
cards:
- id: testid
type: vertical-stack
cards:
- type: picture-entity
entity: group.sample
name: Sample
image: /local/images/sample.jpg
tap_action: toggle
"""
# Test data that can not be loaded as YAML
TEST_BAD_YAML = """\
title: Home
views:
- title: Dashboard
icon: mdi:home
cards:
- id: testid
type: vertical-stack
"""
# Test unsupported YAML
TEST_UNSUP_YAML = """\
title: Home
views:
- title: Dashboard
icon: mdi:home
cards: !include cards.yaml
"""
TMP_DIR = None
def setup():
"""Set up for tests."""
global TMP_DIR
TMP_DIR = mkdtemp()
def teardown():
"""Clean up after tests."""
for fname in os.listdir(TMP_DIR):
os.remove(os.path.join(TMP_DIR, fname))
os.rmdir(TMP_DIR)
def _path_for(leaf_name):
return os.path.join(TMP_DIR, f"{leaf_name}.yaml")
def test_save_and_load():
"""Test saving and loading back."""
yaml = YAML(typ="rt")
fname = _path_for("test1")
open(fname, "w+").close()
util_yaml.save_yaml(fname, yaml.load(TEST_YAML_A))
data = util_yaml.load_yaml(fname, True)
assert data == yaml.load(TEST_YAML_A)
def test_overwrite_and_reload():
"""Test that we can overwrite an existing file and read back."""
yaml = YAML(typ="rt")
fname = _path_for("test2")
open(fname, "w+").close()
util_yaml.save_yaml(fname, yaml.load(TEST_YAML_A))
util_yaml.save_yaml(fname, yaml.load(TEST_YAML_B))
data = util_yaml.load_yaml(fname, True)
assert data == yaml.load(TEST_YAML_B)
def test_load_bad_data():
"""Test error from trying to load unserialisable data."""
fname = _path_for("test3")
with open(fname, "w") as fh:
fh.write(TEST_BAD_YAML)
with pytest.raises(HomeAssistantError):
util_yaml.load_yaml(fname, True)
|
RexFuzzle/sfepy
|
refs/heads/master
|
examples/linear_elasticity/prestress_fibres.py
|
1
|
r"""
Linear elasticity with a given prestress in one subdomain and a (pre)strain
fibre reinforcement in the other.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
+ \int_{\Omega_1} \sigma_{ij} e_{ij}(\ul{v})
+ \int_{\Omega_2} D^f_{ijkl} e_{ij}(\ul{v}) \left(d_k d_l\right)
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
The stiffness of fibres :math:`D^f_{ijkl}` is defined analogously,
:math:`\ul{d}` is the unit fibre direction vector and :math:`\sigma_{ij}` is
the prestress.
Visualization
-------------
Use the following to see the deformed structure with 10x magnified
displacements::
$ ./postproc.py block.vtk -b --vector-mode=warp_norm -s 1 --wireframe
"""
import numpy as nm
from sfepy.mechanics.matcoefs import stiffness_from_lame
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/block.mesh'
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < -4.99)', 'facet'),
'Omega1' : 'vertices in (x < 0.001)',
'Omega2' : 'vertices in (x > -0.001)',
}
materials = {
'solid' : ({
'D' : stiffness_from_lame(3, lam=1e2, mu=1e1),
'prestress' : 0.1 * nm.array([[1.0], [1.0], [1.0],
[0.5], [0.5], [0.5]],
dtype=nm.float64),
'DF' : stiffness_from_lame(3, lam=8e0, mu=8e-1),
'nu' : nm.array([[-0.5], [0.0], [0.5]], dtype=nm.float64),
},),
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
ebcs = {
'Fixed' : ('Left', {'u.all' : 0.0}),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.2.Omega( solid.D, v, u )
+ dw_lin_prestress.2.Omega1( solid.prestress, v )
+ dw_lin_strain_fib.2.Omega2( solid.DF, solid.nu, v )
= 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
|
WASPACDC/hmdsm.repository
|
refs/heads/master
|
plugin.video.acdcIPTV/CustomPlayer.py
|
88
|
# -*- coding: utf-8 -*-
import xbmc
class MyXBMCPlayer(xbmc.Player):
def __init__( self, *args, **kwargs ):
self.is_active = True
self.urlplayed = False
self.pdialogue=None
print "#XBMCPlayer#"
#def play(self, url, listitem):
# print 'Now im playing... %s' % url
# self.is_active = False
# self.urlplayed = False
# xbmc.Player().play(url, listitem)
#def setdialogue( self, pdialogue ):
# self.pdialogue=pdialogue
def onPlayBackStarted( self ):
print "#Playback Started#"
try:
print "#Im playing :: "
except:
print "#I failed get what Im playing#"
if (self.pdialogue):
self.pdialogue.close()
self.urlplayed = True
def onPlayBackEnded( self ):
print "#Playback Ended#"
self.is_active = False
def onPlayBackStopped( self ):
print "## Playback Stopped ##"
self.is_active = False
|
xinst/NoahGameFrame
|
refs/heads/master
|
Dependencies/protobuf/gtest/test/gtest_output_test.py
|
1733
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
|
trezorg/django
|
refs/heads/master
|
django/contrib/gis/maps/google/overlays.py
|
405
|
from django.utils.safestring import mark_safe
from django.contrib.gis.geos import fromstr, Point, LineString, LinearRing, Polygon
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GEvent
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __unicode__(self):
"Returns the parameter part of a GEvent."
return mark_safe('"%s", %s' %(self.event, self.action))
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join(['new GLatLng(%s,%s)' % (y, x) for x, y in coords])
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __unicode__(self):
"The string representation is the JavaScript API call."
return mark_safe('%s(%s)' % (self.__class__.__name__, self.js_params))
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, basestring): poly = fromstr(poly)
if isinstance(poly, (tuple, list)): poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to contsruct one.
if isinstance(geom, basestring): geom = fromstr(geom)
if isinstance(geom, (tuple, list)): geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
http://code.google.com/apis/maps/documentation/reference.html#GIcon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __cmp__(self, other):
return cmp(self.varname, other.varname)
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GMarker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, basestring): geom = fromstr(geom)
if isinstance(geom, (tuple, list)): geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' %(coords[1], coords[0])
def options(self):
result = []
if self.title: result.append('title: "%s"' % self.title)
if self.icon: result.append('icon: %s' % self.icon.varname)
if self.draggable: result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
|
jenalgit/django
|
refs/heads/master
|
tests/view_tests/default_urls.py
|
405
|
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
# This is the same as in the default project template
url(r'^admin/', admin.site.urls),
]
|
spthaolt/VTK
|
refs/heads/5.10.1_vs2013
|
Examples/Modelling/Python/faultLines.py
|
42
|
#!/usr/bin/env python
# Create a constrained Delaunay triangulation following fault lines. The
# fault lines serve as constraint edges in the Delaunay triangulation.
import vtk
from vtk.util.misc import vtkGetDataRoot
from vtk.util.colors import *
VTK_DATA_ROOT = vtkGetDataRoot()
# Generate some points by reading a VTK data file. The data file also
# has edges that represent constraint lines. This is originally from a
# geologic horizon.
reader = vtk.vtkPolyDataReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/faults.vtk")
# Perform a 2D triangulation with constraint edges.
delny = vtk.vtkDelaunay2D()
delny.SetInputConnection(reader.GetOutputPort())
delny.SetSourceConnection(reader.GetOutputPort())
delny.SetTolerance(0.00001)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(delny.GetOutputPort())
mapMesh = vtk.vtkPolyDataMapper()
mapMesh.SetInputConnection(normals.GetOutputPort())
meshActor = vtk.vtkActor()
meshActor.SetMapper(mapMesh)
meshActor.GetProperty().SetColor(beige)
# Now pretty up the mesh with tubed edges and balls at the vertices.
tuber = vtk.vtkTubeFilter()
tuber.SetInputConnection(reader.GetOutputPort())
tuber.SetRadius(25)
mapLines = vtk.vtkPolyDataMapper()
mapLines.SetInputConnection(tuber.GetOutputPort())
linesActor = vtk.vtkActor()
linesActor.SetMapper(mapLines)
linesActor.GetProperty().SetColor(1, 0, 0)
linesActor.GetProperty().SetColor(tomato)
# Create graphics objects
# Create the rendering window, renderer, and interactive renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(linesActor)
ren.AddActor(meshActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(350, 250)
cam1 = vtk.vtkCamera()
cam1.SetClippingRange(2580, 129041)
cam1.SetFocalPoint(461550, 6.58e+006, 2132)
cam1.SetPosition(463960, 6.559e+06, 16982)
cam1.SetViewUp(-0.321899, 0.522244, 0.78971)
light = vtk.vtkLight()
light.SetPosition(0, 0, 1)
light.SetFocalPoint(0, 0, 0)
ren.SetActiveCamera(cam1)
ren.AddLight(light)
ren.GetActiveCamera().Zoom(1.5)
iren.LightFollowCameraOff()
iren.Initialize()
renWin.Render()
iren.Start()
|
mawimawi/datadjables
|
refs/heads/master
|
datadjables/datadjable_testing/tests/__init__.py
|
12133432
| |
shinvdu/SmartQQBot
|
refs/heads/master
|
old_QQBot.py
|
13
|
# -*- coding: utf-8 -*-
# Code by Yinzo: https://github.com/Yinzo
# Origin repository: https://github.com/Yinzo/SmartQQBot
import re
import random
import json
import os
import sys
import datetime
import time
import threading
import logging
import ConfigParser
from HttpClient import HttpClient
reload(sys)
sys.setdefaultencoding("utf-8")
HttpClient_Ist = HttpClient()
ClientID = int(random.uniform(111111, 888888))
PTWebQQ = ''
APPID = 0
msgId = 0
FriendList = {}
GroupList = {}
ThreadList = []
GroupThreadList = []
GroupWatchList = []
PSessionID = ''
Referer = 'http://d.web2.qq.com/proxy.html?v=20130916001&callback=1&id=2'
SmartQQUrl = 'http://w.qq.com/login.html'
VFWebQQ = ''
AdminQQ = '0'
initTime = time.time()
conf = ConfigParser.ConfigParser()
if not os.path.isdir("./config"):
os.mkdir("./config")
print "已建立config文件夹"
if not os.path.exists("./config/QQBot_default.conf"):
open("./config/groupCheckList", "w")
print "已建立配置文件QQBot_default.conf"
else:
conf.read('./config/QQBot_default.conf')
print "读取QQBot_default.conf配置"
# pm config set
QA_activated = bool(conf.getint("pm", "QA_module_activated"))
# group config set
tucao_activated = bool(conf.getint("group", "tucao_module_activated"))
repeat_activated = bool(conf.getint("group", "repeat_module_activated"))
follow_activated = bool(conf.getint("group", "follow_module_activated"))
callout_activated = bool(conf.getint("group", "callout_module_activated"))
# -----------------
# 方法声明
# -----------------
def pass_time():
global initTime
rs = (time.time() - initTime)
initTime = time.time()
return str(round(rs, 3))
def getReValue(html, rex, er, ex):
v = re.search(rex, html)
if v is None:
logging.error(er)
if ex:
raise Exception, er
else:
print er
return ''
return v.group(1)
def date_to_millis(d):
return int(time.mktime(d.timetuple())) * 1000
# 查询QQ号,通常首次用时0.2s,以后基本不耗时
def uin_to_account(tuin):
# 如果消息的发送者的真实QQ号码不在FriendList中,则自动去取得真实的QQ号码并保存到缓存中
global FriendList
if tuin not in FriendList:
try:
info = json.loads(HttpClient_Ist.Get('http://s.web2.qq.com/api/get_friend_uin2?tuin={0}&type=1&vfwebqq={1}'.format(tuin, VFWebQQ), Referer))
logging.info("Get uin to account info:" + str(info))
if info['retcode'] != 0:
raise ValueError, info
info = info['result']
FriendList[tuin] = info['account']
except Exception as e:
logging.error(e)
logging.info("Now FriendList:" + str(FriendList))
return FriendList[tuin]
def command_handler(inputText):
global GroupWatchList
pattern = re.compile(r'^(group|ungroup) (\d+)$')
match = pattern.match(inputText)
if match and match.group(1) == 'group':
GroupWatchList.append(str(match.group(2)))
print "当前群关注列表:", GroupWatchList
elif match and match.group(1) == 'ungroup':
GroupWatchList.remove(str(match.group(2)))
print "当前群关注列表:", GroupWatchList
else:
pattern = re.compile(r'^(g)(\d+) (learn|delete) (.+) (.+)')
match = pattern.match(inputText)
if match and str(match.group(3)) == 'learn' and group_thread_exist(match.group(2)):
group_thread_exist(match.group(2)).learn(str(match.group(4)), str(match.group(5)), False)
elif match and match.group(3) == 'delete' and group_thread_exist(match.group(2)):
group_thread_exist(match.group(2)).delete(str(match.group(4)), str(match.group(5)), False)
def msg_handler(msgObj):
for msg in msgObj:
msgType = msg['poll_type']
# QQ私聊消息
if msgType == 'message' or msgType == 'sess_message': # 私聊 or 临时对话
txt = combine_msg(msg['value']['content'])
tuin = msg['value']['from_uin']
msgid = msg['value']['msg_id2']
msgTime = msg['value']['time']
from_account = uin_to_account(tuin)
isSess = 0
group_sig = ''
service_type = ''
# print "{0}:{1}".format(from_account, txt)
targetThread = thread_exist(from_account)
if targetThread:
targetThread.push(txt, msgid, msgTime)
else:
if msgType == 'sess_message':
isSess = 1
service_type = msg['value']['service_type']
myid = msg['value']['id']
ts = time.time()
while ts < 1000000000000:
ts = ts * 10
ts = int(ts)
info = json.loads(HttpClient_Ist.Get('http://d.web2.qq.com/channel/get_c2cmsg_sig2?id={0}&to_uin={1}&clientid={2}&psessionid={3}&service_type={4}&t={5}'.format(myid, tuin, ClientID, PSessionID, service_type, ts), Referer))
logging.info("Getting group sig :" + str(info))
if info['retcode'] != 0:
raise ValueError, info
info = info['result']
group_sig = info['value']
logging.info("Group sig: " + str(group_sig))
tmpThread = pmchat_thread(tuin, service_type, group_sig, isSess)
tmpThread.start()
ThreadList.append(tmpThread)
tmpThread.push(txt, msgid, msgTime)
# print "{0}:{1}".format(self.FriendList.get(tuin, 0), txt)
# if FriendList.get(tuin, 0) == AdminQQ:#如果消息的发送者与AdminQQ不相同, 则忽略本条消息不往下继续执行
# if txt[0] == '#':
# thread.start_new_thread(self.runCommand, (tuin, txt[1:].strip(), msgId))
# msgId += 1
# if txt[0:4] == 'exit':
# logging.info(self.Get('http://d.web2.qq.com/channel/logout2?ids=&clientid={0}&psessionid={1}'.format(self.ClientID, self.PSessionID), Referer))
# exit(0)
# 群消息
if msgType == 'group_message':
global GroupList, GroupWatchList
txt = combine_msg(msg['value']['content'])
guin = msg['value']['from_uin']
gid = msg['value']['info_seq']
tuin = msg['value']['send_uin']
seq = msg['value']['seq']
msgTime = msg['value']['time']
timeStr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msgTime))
GroupList[guin] = gid
if str(gid) in GroupWatchList:
g_exist = group_thread_exist(gid)
if g_exist:
print "%s 群(%s)的消息: %s" % (timeStr, str(gid), txt)
g_exist.handle(tuin, txt, seq, msgTime)
else:
tmpThread = group_thread(guin)
tmpThread.start()
GroupThreadList.append(tmpThread)
tmpThread.handle(tuin, txt, seq, msgTime)
print "群线程已生成"
else:
#print str(gid) + "群有动态,但是没有被监控"
pass
# from_account = get_account(tuin)
# print "{0}:{1}".format(from_account, txt)
# QQ号在另一个地方登陆, 被挤下线
if msgType == 'kick_message':
logging.error(msg['value']['reason'])
raise Exception, msg['value']['reason'] # 抛出异常, 重新启动WebQQ, 需重新扫描QRCode来完成登陆
def combine_msg(content):
msgTXT = ""
for part in content:
# print type(part)
if type(part) == type(u'\u0000'):
msgTXT += part
elif len(part) > 1:
# 如果是图片
if str(part[0]) == "offpic" or str(part[0]) == "cface":
msgTXT += "[图片]"
return msgTXT
def send_msg(tuin, content, service_type, group_sig, isSess, failTimes=0):
lastFailTimes = failTimes
try:
if isSess:
reqURL = "http://d.web2.qq.com/channel/send_sess_msg2"
data = (
('r', '{{"to":{0}, "face":594, "content":"[\\"{4}\\", [\\"font\\", {{\\"name\\":\\"Arial\\", \\"size\\":\\"10\\", \\"style\\":[0, 0, 0], \\"color\\":\\"000000\\"}}]]", "clientid":"{1}", "msg_id":{2}, "psessionid":"{3}", "group_sig":"{5}", "service_type":{6}}}'.format(tuin, ClientID, msgId, PSessionID, str(content), group_sig, service_type)),
('clientid', ClientID),
('psessionid', PSessionID),
('group_sig', group_sig),
('service_type', service_type)
)
else:
reqURL = "http://d.web2.qq.com/channel/send_buddy_msg2"
data = (
('r', '{{"to":{0}, "face":594, "content":"[\\"{4}\\", [\\"font\\", {{\\"name\\":\\"Arial\\", \\"size\\":\\"10\\", \\"style\\":[0, 0, 0], \\"color\\":\\"000000\\"}}]]", "clientid":"{1}", "msg_id":{2}, "psessionid":"{3}"}}'.format(tuin, ClientID, msgId, PSessionID, str(content))),
('clientid', ClientID),
('psessionid', PSessionID)
)
rsp = HttpClient_Ist.Post(reqURL, data, Referer)
rspp = json.loads(rsp)
if rspp['retcode'] != 0:
logging.error("reply pmchat error"+str(rspp['retcode']))
return rspp
except:
if lastFailTimes < 5:
logging.error("Response Error.Wait for 2s and Retrying."+str(lastFailTimes))
logging.info(rsp)
lastFailTimes += 1
time.sleep(2)
send_msg(tuin, content, service_type, group_sig, isSess, lastFailTimes)
else:
logging.error("Response Error over 5 times.Exit.")
raise ValueError, rsp
def thread_exist(tqq):
for t in ThreadList:
if t.tqq == tqq:
return t
return False
def group_thread_exist(gid):
for t in GroupThreadList:
if str(t.gid) == str(gid):
return t
return False
# -----------------
# 类声明
# -----------------
class Login(HttpClient):
MaxTryTime = 5
def __init__(self, vpath, qq=0):
global APPID, AdminQQ, PTWebQQ, VFWebQQ, PSessionID, msgId
self.VPath = vpath # QRCode保存路径
AdminQQ = int(qq)
print "正在获取登陆页面"
self.initUrl = getReValue(self.Get(SmartQQUrl), r'\.src = "(.+?)"', 'Get Login Url Error.', 1)
html = self.Get(self.initUrl + '0')
print "正在获取appid"
APPID = getReValue(html, r'var g_appid =encodeURIComponent\("(\d+)"\);', 'Get AppId Error', 1)
print "正在获取login_sig"
sign = getReValue(html, r'var g_login_sig=encodeURIComponent\("(.+?)"\);', 'Get Login Sign Error', 0)
logging.info('get sign : %s', sign)
print "正在获取pt_version"
JsVer = getReValue(html, r'var g_pt_version=encodeURIComponent\("(\d+)"\);', 'Get g_pt_version Error', 1)
logging.info('get g_pt_version : %s', JsVer)
print "正在获取mibao_css"
MiBaoCss = getReValue(html, r'var g_mibao_css=encodeURIComponent\("(.+?)"\);', 'Get g_mibao_css Error', 1)
logging.info('get g_mibao_css : %s', sign)
StarTime = date_to_millis(datetime.datetime.utcnow())
T = 0
while True:
T = T + 1
self.Download('https://ssl.ptlogin2.qq.com/ptqrshow?appid={0}&e=0&l=L&s=8&d=72&v=4'.format(APPID), self.VPath)
print "登陆二维码下载成功,请扫描"
logging.info('[{0}] Get QRCode Picture Success.'.format(T))
print "下载二维码用时" + pass_time() + "秒"
while True:
html = self.Get('https://ssl.ptlogin2.qq.com/ptqrlogin?webqq_type=10&remember_uin=1&login2qq=1&aid={0}&u1=http%3A%2F%2Fw.qq.com%2Fproxy.html%3Flogin2qq%3D1%26webqq_type%3D10&ptredirect=0&ptlang=2052&daid=164&from_ui=1&pttype=1&dumy=&fp=loginerroralert&action=0-0-{1}&mibao_css={2}&t=undefined&g=1&js_type=0&js_ver={3}&login_sig={4}'.format(APPID, date_to_millis(datetime.datetime.utcnow()) - StarTime, MiBaoCss, JsVer, sign), self.initUrl)
# logging.info(html)
ret = html.split("'")
if ret[1] == '65' or ret[1] == '0': # 65: QRCode 失效, 0: 验证成功, 66: 未失效, 67: 验证中
break
time.sleep(2)
if ret[1] == '0' or T > self.MaxTryTime:
break
logging.info(ret)
if ret[1] != '0':
return
print "二维码已扫描,正在登陆"
pass_time()
# 删除QRCode文件
if os.path.exists(self.VPath):
os.remove(self.VPath)
# 记录登陆账号的昵称
tmpUserName = ret[11]
html = self.Get(ret[5])
url = getReValue(html, r' src="(.+?)"', 'Get mibao_res Url Error.', 0)
if url != '':
html = self.Get(url.replace('&', '&'))
url = getReValue(html, r'location\.href="(.+?)"', 'Get Redirect Url Error', 1)
html = self.Get(url)
PTWebQQ = self.getCookie('ptwebqq')
logging.info('PTWebQQ: {0}'.format(PTWebQQ))
LoginError = 1
while LoginError > 0:
try:
html = self.Post('http://d.web2.qq.com/channel/login2', {
'r': '{{"ptwebqq":"{0}","clientid":{1},"psessionid":"{2}","status":"online"}}'.format(PTWebQQ, ClientID, PSessionID)
}, Referer)
ret = json.loads(html)
LoginError = 0
except:
LoginError += 1
print "登录失败,正在重试"
if ret['retcode'] != 0:
return
VFWebQQ = ret['result']['vfwebqq']
PSessionID = ret['result']['psessionid']
print "QQ号:{0} 登陆成功, 用户名:{1}".format(ret['result']['uin'], tmpUserName)
logging.info('Login success')
print "登陆二维码用时" + pass_time() + "秒"
msgId = int(random.uniform(20000, 50000))
class check_msg(threading.Thread):
# try:
# pass
# except KeybordInterrupt:
# try:
# user_input = (raw_input("回复系统:(输入格式:{群聊2or私聊1}, {群号or账号}, {内容})\n")).split(",")
# if (user_input[0] == 1):
# for kv in self.FriendList :
# if str(kv[1]) == str(user_input[1]):
# tuin == kv[0]
# self.send_msg(tuin, user_input[2])
# except KeybordInterrupt:
# exit(0)
# except Exception, e:
# print Exception, e
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global PTWebQQ
E = 0
# 心跳包轮询
while 1:
if E > 5:
break
try:
ret = self.check()
except:
E += 1
continue
# logging.info(ret)
# 返回数据有误
if ret == "":
E += 1
continue
# POST数据有误
if ret['retcode'] == 100006:
break
# 无消息
if ret['retcode'] == 102:
E = 0
continue
# 更新PTWebQQ值
if ret['retcode'] == 116:
PTWebQQ = ret['p']
E = 0
continue
# 收到消息
if ret['retcode'] == 0:
# 信息分发
msg_handler(ret['result'])
E = 0
continue
print "轮询错误超过五次"
# 向服务器查询新消息
def check(self):
html = HttpClient_Ist.Post('http://d.web2.qq.com/channel/poll2', {
'r': '{{"ptwebqq":"{1}","clientid":{2},"psessionid":"{0}","key":""}}'.format(PSessionID, PTWebQQ, ClientID)
}, Referer)
logging.info("Check html: " + str(html))
try:
ret = json.loads(html)
except Exception as e:
logging.error(e)
print "Check error occured, retrying."
return self.check()
return ret
class pmchat_thread(threading.Thread):
replys = [
'对话激活,请输入第一项:',
'第一项输入完毕,输入2:',
'2输完,输3:',
'输完了',
]
inputs = []
# con = threading.Condition()
stage = 0
# newIp = ''
def __init__(self, tuin, service_type, group_sig, isSess):
threading.Thread.__init__(self)
self.tuin = tuin
self.tqq = uin_to_account(tuin)
self.inputs = []
self.stage = 0
self.isSess = isSess
self.service_type = service_type
self.group_sig = group_sig
self.lastMsgId = 0
def run(self):
while 1:
self.stage = 0
time.sleep(1800)
def reply(self, content):
send_msg(self.tuin, str(content), self.service_type, self.group_sig, self.isSess)
logging.info("Reply to " + str(self.tqq) + ":" + str(content))
def push(self, ipContent, msgid, msgTime):
if not QA_activated:
return False
if msgid != self.lastMsgId:
self.reply(self.replys[self.stage])
self.inputs.append(ipContent)
logging.info(str(self.tqq) + " :" + str(ipContent))
self.stage += 1
if self.stage == len(self.replys):
self.reply(self.inputs)
self.stage = 0
self.inputs = []
else:
logging.info("pm message repeat detected.")
self.lastMsgId = msgid
class group_thread(threading.Thread):
last1 = ''
lastseq = 0
replyList = {}
followList = []
# 属性
repeatPicture = False
def __init__(self, guin):
threading.Thread.__init__(self)
self.guin = guin
self.gid = GroupList[guin]
self.load()
if not os.path.isdir("./groupReplys"):
os.makedirs("./groupReplys")
def learn(self, key, value, needreply=True):
if not tucao_activated:
return False
if key in self.replyList:
self.replyList[key].append(value)
else:
self.replyList[key] = [value]
if needreply:
self.reply("学习成功!快对我说" + str(key) + "试试吧!")
self.save()
def delete(self, key, value, needreply=True):
if not tucao_activated:
return False
if key in self.replyList and self.replyList[key].count(value):
self.replyList[key].remove(value)
if needreply:
self.reply("呜呜呜我再也不说" + str(value) + "了")
self.save()
else:
if needreply:
self.reply("没找到你说的那句话哦")
def reply(self, content):
fix_content = str(content.replace("\\", "\\\\\\\\").replace("\n", "\\\\n").replace("\t", "\\\\t")).decode("utf-8")
reqURL = "http://d.web2.qq.com/channel/send_qun_msg2"
data = (
('r', '{{"group_uin":{0}, "face":564,"content":"[\\"{4}\\",[\\"font\\",{{\\"name\\":\\"Arial\\",\\"size\\":\\"10\\",\\"style\\":[0,0,0],\\"color\\":\\"000000\\"}}]]","clientid":"{1}","msg_id":{2},"psessionid":"{3}"}}'.format(self.guin, ClientID, msgId, PSessionID, fix_content)),
('clientid', ClientID),
('psessionid', PSessionID)
)
# print data
logging.info("Reply package: " + str(data))
rsp = HttpClient_Ist.Post(reqURL, data, Referer)
if rsp:
print "[reply content]:", content, "[rsp]:", rsp
logging.info("[Reply to group " + str(self.gid) + "]:" + str(content))
return rsp
def handle(self, send_uin, content, seq, msgTime):
# 避免重复处理相同信息
if seq != self.lastseq:
pattern = re.compile(r'^(?:!|!)(learn|delete) {(.+)}{(.+)}')
match = pattern.match(content)
if match:
if match.group(1) == 'learn':
self.learn(str(match.group(2)).decode('UTF-8'), str(match.group(3)).decode('UTF-8'))
print self.replyList
if match.group(1) == 'delete':
self.delete(str(match.group(2)).decode('UTF-8'), str(match.group(3)).decode('UTF-8'))
print self.replyList
else:
# if not self.follow(send_uin, content):
# if not self.tucao(content):
# if not self.repeat(content):
# if not self.callout(content):
# pass
if self.follow(send_uin, content):
return
if self.tucao(content):
return
if self.repeat(content):
return
if self.callout(content):
return
else:
print "message seq repeat detected."
self.lastseq = seq
def tucao(self, content):
if not tucao_activated:
return False
for key in self.replyList:
if str(key) in content and self.replyList[key]:
rd = random.randint(0, len(self.replyList[key]) - 1)
self.reply(self.replyList[key][rd])
print str(self.replyList[key][rd])
return True
return False
def repeat(self, content):
if not repeat_activated:
return False
if self.last1 == str(content) and content != '' and content != ' ':
if self.repeatPicture or "[图片]" not in content:
self.reply(content)
print "已复读:{" + str(content) + "}"
return True
self.last1 = content
return False
def follow(self, send_uin, content):
if not follow_activated:
return False
pattern = re.compile(r'^(?:!|!)(follow|unfollow) (\d+|me)')
match = pattern.match(content)
if match:
target = str(match.group(2))
if target == 'me':
target = str(uin_to_account(send_uin))
if match.group(1) == 'follow' and target not in self.followList:
self.followList.append(target)
self.reply("正在关注" + target)
return True
if match.group(1) == 'unfollow' and target in self.followList:
self.followList.remove(target)
self.reply("我不关注" + target + "了!")
return True
else:
if str(uin_to_account(send_uin)) in self.followList:
self.reply(content)
return True
return False
def save(self):
with open("./groupReplys/" + str(self.gid) + ".save", "w+") as savefile:
savefile.write(json.dumps(self.replyList))
def load(self):
try:
with open("./groupReplys/" + str(self.gid) + ".save", "r") as savefile:
saves = savefile.read()
if saves:
self.replyList = json.loads(saves)
except Exception, e:
print "读取存档出错", e, Exception
def callout(self, content):
if "智障机器人" in content and callout_activated:
self.reply("干嘛(‘·д·)")
print str(self.gid) + "有人叫我"
return True
return False
# -----------------
# 主程序
# -----------------
if __name__ == "__main__":
vpath = './v.jpg'
qq = 0
if len(sys.argv) > 1:
vpath = sys.argv[1]
if len(sys.argv) > 2:
qq = sys.argv[2]
try:
pass_time()
qqLogin = Login(vpath, qq)
except Exception, e:
print e
t_check = check_msg()
t_check.setDaemon(True)
t_check.start()
while 1:
tmpList = []
if not os.path.isdir("./config"):
os.mkdir("./config")
print "已建立config文件夹"
if not os.path.exists("./config/groupCheckList"):
open("./config/groupCheckList", "w")
print "已建立群关注列表文件groupCheckList"
with open("./config/groupCheckList") as groupListFile:
for group in groupListFile:
tmpList.append(str(int(group)))
if GroupWatchList != tmpList:
GroupWatchList = tmpList
print "当前群关注列表:", GroupWatchList
# 更新config
conf.read('./config/QQBot_default.conf')
# pm config set
QA_activated = conf.getint("pm", "QA_module_activated")
# group config set
tucao_activated = conf.getint("group", "tucao_module_activated")
repeat_activated = conf.getint("group", "repeat_module_activated")
follow_activated = conf.getint("group", "follow_module_activated")
callout_activated = conf.getint("group", "callout_module_activated")
# print callout_activated
time.sleep(5)
|
Suwings/Suwings.github.io
|
refs/heads/master
|
mine/parallel time/PapaProject/PythonGet/note/pyquery_save.py
|
1
|
import os
import time
from urllib.parse import urlparse
import pymysql
from pyquery import PyQuery as pquery
import requests
# 类似的表
# 标题 text 不可空
# 链接 text 不可空
# 时间 time
# 爬取时间 time
# 网站标题
# 网站编码
# 简介 text(24)
# 内容指针 int
# try:
# BD_coon = pymysql.connect(
# host='127.0.0.1', user='root', passwd='toortoor',
# port=3306, db='papapa', charset='utf8'
# )
# except:
# print("数据库连接失败,程序停止.")
# exit(0)
# cursor = BD_coon.cursor()
def insert_pa_data(data):
# INSERT INTO
sql = "insert into `news_a1` (`title`, `domain`, `link`, `time`, `get_time`, `Introduction`, `context`) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '0');" % (
data['title'],
data['netloc'],
data['href'],
data['time'],
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())),
""
)
try:
cursor.execute(sql)
# 提交到数据库执行
BD_coon.commit()
except Exception as err:
# 如果发生错误则回滚
BD_coon.rollback()
print(err)
def init_reptile(tar_url):
web_res_context = requests.get(tar_url)
web_res_context.encoding = 'utf-8'
document = pquery(web_res_context.text)
# 添加属性
reptile = {}
reptile['ext_tar_url'] = tar_url
reptile['document'] = document
return reptile
def get_context_website(reptile, configs):
"""
只要页面匹配,即可抓取,用于文章匹配
Use: get_context_website({
"时间":"a>time",
"标题":"#titile"
})
"""
document = reptile['document']
result = {}
for k, v in configs.items():
jq_elems = document.find(v).items()
tmp_context = ""
for je in jq_elems:
tmp_context += je.text()
result[k] = tmp_context
print(result)
return result
def get_one_webstie(reptile, mainElem, linkElem, TimeElem, titleElem=None):
"""仅仅用于抓取新闻标题"""
document = reptile['document']
objs = document.find(mainElem).items()
results = []
for v in objs:
tmps = {}
# 解析 ParseResult(scheme='http', netloc='www.chenxm.cc', path='/post/719.html', params='', query='', fragment='')
url = reptile['ext_tar_url']
url_obj = urlparse(url)
# 标题
if titleElem == None:
tmps['title'] = v.children(linkElem).text()
else:
tmps['title'] = v.children(titleElem).text()
# 链接
href_url = v.children(linkElem).attr('href')
if href_url[:4] in 'http':
tmps['href'] = href_url
else:
tmps['href'] = url_obj.scheme + "://" + url_obj.netloc + os.path.normpath(os.path.join(
os.path.dirname(url_obj.path), href_url)).replace("\\", "/")
# 文本时间
tmps['time'] = v.children(TimeElem).text()
# 原始URL
tmps['original_url'] = url
# 数据库中的 URL 是解析完成的 URL
tmps['url'] = os.path.normpath(url_obj.path)
# 主机名
tmps['netloc'] = url_obj.netloc
results.append(tmps)
return results
# get_context_website('http://www.moj.gov.cn/news/content/2019-03/15/zfyw_230595.html', {
# "title": ".con_bt",
# "context": "#content"
# })
# get_context_website(init_reptile('http://www.miit.gov.cn/n1146290/n1146392/c6669125/content.html'), {
# "title": "#con_title",
# "context": "div#con_con"
# })
news_center = []
news_center += get_one_webstie(init_reptile("http://www.gov.cn/zhengce/zuixin.htm"),
".news_box>.list h4", 'a', 'span.date')
news_center += get_one_webstie(init_reptile("http://www.miit.gov.cn/n1146295/n1652858/n1653018/index.html"),
".clist_con li", 'a', 'span>a')
# news_center += get_one_webstie("http://www.mohrss.gov.cn/gkml/zcjd/index.html",
# "#documentContainer>.row", '.mc a', '.fbrq>font',
# )
# news_center += get_one_webstie("http://www.moj.gov.cn/news/node_zfyw.html",
# "ul.font_black_16>li", 'dt>a', 'dd',)
news_count = 1
for news in news_center:
print(str(news_count) + "." + news['title'] +
"\n 时间 "+news['time']+" | 链接:" + news['href'])
news_count += 1
# insert_pa_data(news)
# if news_count > 10:
# break
"""
可能的问题
URL 处理1x
../../../n1146295/n1652858/n1653018/c6635223/content.html
http://www.gov.cn/zhengce/content/2018-12/29/content.html
/zhengce/2019-01/25/content_5361054.htm
xxx/xxxx.html
"""
|
hzwjava/mongo-connector
|
refs/heads/master
|
mongo_connector/locking_dict.py
|
99
|
import threading
class LockingDict():
def __init__(self):
self.dict = {}
self.lock = threading.Lock()
def __enter__(self):
self.acquire_lock()
return self
def __exit__(self, type, value, traceback):
self.release_lock()
def get_dict(self):
return self.dict
def acquire_lock(self):
self.lock.acquire()
def release_lock(self):
self.lock.release()
|
PearsonIOKI/compose-forum
|
refs/heads/master
|
askbot/migrations/0040_delete_old_tag_filter_strategies.py
|
20
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from askbot import const
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model country fields to the model auth_user
db.delete_column(u'auth_user', 'hide_ignored_questions')
db.delete_column(u'auth_user', 'tag_filter_setting')
def backwards(self, orm):
db.add_column(
u'auth_user',
'hide_ignored_questions',
self.gf(
'django.db.models.fields.BooleanField'
)(default = False)
)
db.add_column(
u'auth_user',
'tag_filter_setting',
self.gf(
'django.db.models.fields.CharField'
)(default = 'ignored', max_length = 16)
)
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'through': "'ActivityAuditStatus'", 'to': "orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.answerrevision': {
'Meta': {'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionrevision': {
'Meta': {'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True', 'null': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'has_custom_avatar': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
nullishzero/Portage
|
refs/heads/master
|
pym/_emerge/Task.py
|
16
|
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.util.SlotObject import SlotObject
class Task(SlotObject):
__slots__ = ("_hash_key", "_hash_value")
def __eq__(self, other):
try:
return self._hash_key == other._hash_key
except AttributeError:
# depgraph._pkg() generates _hash_key
# for lookups here, so handle that
return self._hash_key == other
def __ne__(self, other):
try:
return self._hash_key != other._hash_key
except AttributeError:
return True
def __hash__(self):
return self._hash_value
def __len__(self):
return len(self._hash_key)
def __getitem__(self, key):
return self._hash_key[key]
def __iter__(self):
return iter(self._hash_key)
def __contains__(self, key):
return key in self._hash_key
def __str__(self):
"""
Emulate tuple.__repr__, but don't show 'foo' as u'foo' for unicode
strings.
"""
return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
def __repr__(self):
if self._hash_key is None:
# triggered by python-trace
return SlotObject.__repr__(self)
return "<%s (%s)>" % (self.__class__.__name__,
", ".join(("'%s'" % x for x in self._hash_key)))
|
eSedano/hoplite
|
refs/heads/master
|
1.0/lib/metis.py
|
1
|
#
# --------------------------------------------------------------------------------------------------
# __ ______ ____ __ ________________
# / / / / __ \/ __ \/ / / _/_ __/ ____/
# / /_/ / / / / /_/ / / / / / / / __/
# / __ / /_/ / ____/ /____/ / / / / /___
# /_/ /_/\____/_/ /_____/___/ /_/ /_____/ (v1.0 . Achilles)
#
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# --------------------------------------------------------------------------------------------------
"""
Interface to METIS partitioner
"""
# ------------------------------------------
# Imports section
# ------------------------------------------
import sys
# -------------------------
sys.dont_write_bytecode = True
# -------------------------
import subprocess
import hoplitebase
# ------------------------------------------
class Metis(hoplitebase.HopliteBase):
""" SystemGraph class
"""
def __init__(self, parent=None, work_path=None, log=None):
super(Metis, self).__init__('metis', parent, work_path, log)
self._check_install()
self._sg = parent.systemgraph
self._nodes = None
self._edges = None
def get_partitions(self, nodes=None):
self.debug('metis.get_partitions start')
self._get_nodes_edges(nodes)
self._systemgraph_to_metis()
self._call_metis()
self._metis_to_systemgraph()
self.debug('metis.get_partitions end')
def _check_install(self):
pass
def _get_nodes_edges(self, nodes):
""" Fill the class _nodes and _edges information with the data provided
"""
nodes = self._sg.keys() if nodes is None else nodes
self._nodes = nodes
# Count the number of unique edges in the graph. If edges A->B and B->A appear in the
# graph, they will be only counted as one.
self._edges = set([[n, s].sort() for n in nodes for s in self._sg[n].get('successors', [])])
def _systemgraph_to_metis(self, nodes):
metis_path = os.path.join(self._work_path, self._config['input_file'])
# Header
with open(metis_path ,'w') as metis:
metis.write('%% Header')
vertices = len(self._nodes)
edges = self._count_edges()
metis.write('%s %s', vertices, edges)
def _call_metis(self):
pass
def _metis_to_systemgraph(self):
pass
# --------------------------------------------------------------------------------------------------
# 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 0
# 5 0 5 0 5 0 5 0 5 0 5 0 5 0 5 0 5 0 5 0
|
jjlee9/openthread
|
refs/heads/master
|
tests/scripts/thread-cert/Cert_5_3_04_AddressMapCache.py
|
5
|
#!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ROUTER1 = 2
SED1 = 3
ED2 = 4
ED3 = 5
ED4 = 6
ED5 = 7
class Cert_5_3_4_AddressMapCache(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,8):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED2].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED3].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED4].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED5].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[SED1].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[SED1].set_panid(0xface)
self.nodes[SED1].set_mode('rsn')
self.nodes[SED1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[SED1].enable_whitelist()
self.nodes[ED2].set_panid(0xface)
self.nodes[ED2].set_mode('rsn')
self.nodes[ED2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED2].enable_whitelist()
self.nodes[ED3].set_panid(0xface)
self.nodes[ED3].set_mode('rsn')
self.nodes[ED3].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED3].enable_whitelist()
self.nodes[ED4].set_panid(0xface)
self.nodes[ED4].set_mode('rsn')
self.nodes[ED4].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED4].enable_whitelist()
self.nodes[ED5].set_panid(0xface)
self.nodes[ED5].set_mode('rsn')
self.nodes[ED5].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED5].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[SED1].start()
time.sleep(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.nodes[ED2].start()
time.sleep(5)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[ED3].start()
time.sleep(5)
self.assertEqual(self.nodes[ED3].get_state(), 'child')
self.nodes[ED4].start()
time.sleep(5)
self.assertEqual(self.nodes[ED4].get_state(), 'child')
self.nodes[ED5].start()
time.sleep(5)
self.assertEqual(self.nodes[ED5].get_state(), 'child')
for i in range(4, 8):
addrs = self.nodes[i].get_addrs()
for addr in addrs:
if addr[0:4] != 'fe80':
self.assertTrue(self.nodes[SED1].ping(addr))
for i in range(4, 8):
addrs = self.nodes[i].get_addrs()
for addr in addrs:
if addr[0:4] != 'fe80':
self.assertTrue(self.nodes[SED1].ping(addr))
if __name__ == '__main__':
unittest.main()
|
eleonrk/SickRage
|
refs/heads/master
|
lib/pbr/options.py
|
99
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import os
TRUE_VALUES = ('true', '1', 'yes')
def get_boolean_option(option_dict, option_name, env_name):
return ((option_name in option_dict
and option_dict[option_name][1].lower() in TRUE_VALUES) or
str(os.getenv(env_name)).lower() in TRUE_VALUES)
|
cschnei3/forseti-security
|
refs/heads/master
|
google/cloud/security/common/data_access/sql_queries/create_tables.py
|
2
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQL queries to create Cloud SQL tables."""
CREATE_APPENGINE_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`app_id` varchar(255) DEFAULT NULL,
`dispatch_rules` json DEFAULT NULL,
`auth_domain` varchar(255) DEFAULT NULL,
`location_id` varchar(255) DEFAULT NULL,
`code_bucket` varchar(255) DEFAULT NULL,
`default_cookie_expiration` varchar(255) DEFAULT NULL,
`serving_status` varchar(255) DEFAULT NULL,
`default_hostname` varchar(255) DEFAULT NULL,
`default_bucket` varchar(255) DEFAULT NULL,
`iap` json DEFAULT NULL,
`gcr_domain` varchar(255) DEFAULT NULL,
`raw_application` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BACKEND_SERVICES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`affinity_cookie_ttl_sec` int DEFAULT NULL,
`backends` json DEFAULT NULL,
`cdn_policy` json DEFAULT NULL,
`connection_draining` json DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`enable_cdn` bool DEFAULT NULL,
`health_checks` json DEFAULT NULL,
`iap` json DEFAULT NULL,
`load_balancing_scheme` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`port_name` varchar(255) DEFAULT NULL,
`port` int DEFAULT NULL,
`protocol` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`session_affinity` varchar(255) DEFAULT NULL,
`timeout_sec` varchar(255) DEFAULT NULL,
`raw_backend_service` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BIGQUERY_DATASETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`dataset_id` varchar(255) DEFAULT NULL,
`access_domain` varchar(255) DEFAULT NULL,
`access_user_by_email` varchar(255) DEFAULT NULL,
`access_special_group` varchar(255) DEFAULT NULL,
`access_group_by_email` varchar(255) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`access_view_project_id` varchar(255) DEFAULT NULL,
`access_view_table_id` varchar(255) DEFAULT NULL,
`access_view_dataset_id` varchar(255) DEFAULT NULL,
`raw_access_map` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BUCKETS_ACL_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`bucket` varchar(255) DEFAULT NULL,
`domain` varchar(255) DEFAULT NULL,
`email` varchar(255) DEFAULT NULL,
`entity` varchar(255) DEFAULT NULL,
`entity_id` varchar(255) DEFAULT NULL,
`acl_id` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`project_team` json DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`bucket_acl_selflink` varchar(255) DEFAULT NULL,
`raw_bucket_acl` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_BUCKETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`bucket_id` varchar(255) DEFAULT NULL,
`bucket_name` varchar(255) DEFAULT NULL,
`bucket_kind` varchar(255) DEFAULT NULL,
`bucket_storage_class` varchar(255) DEFAULT NULL,
`bucket_location` varchar(255) DEFAULT NULL,
`bucket_create_time` datetime DEFAULT NULL,
`bucket_update_time` datetime DEFAULT NULL,
`bucket_selflink` varchar(255) DEFAULT NULL,
`bucket_lifecycle_raw` json DEFAULT NULL,
`raw_bucket` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_INSTANCES_TABLE = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`name` varchar(255) DEFAULT NULL,
`project` varchar(255) DEFAULT NULL,
`backend_type` varchar(255) DEFAULT NULL,
`connection_name` varchar(255) DEFAULT NULL,
`current_disk_size` bigint DEFAULT NULL,
`database_version` varchar(255) DEFAULT NULL,
`failover_replica_available` varchar(255) DEFAULT NULL,
`failover_replica_name` varchar(255) DEFAULT NULL,
`instance_type` varchar(255) DEFAULT NULL,
`ipv6_address` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`master_instance_name` varchar(255) DEFAULT NULL,
`max_disk_size` bigint DEFAULT NULL,
`on_premises_configuration_host_port` varchar(255) DEFAULT NULL,
`on_premises_configuration_kind` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`replica_configuration` json DEFAULT NULL,
`replica_names` json DEFAULT NULL,
`self_link` varchar(255) DEFAULT NULL,
`server_ca_cert` json DEFAULT NULL,
`service_account_email_address` varchar(255) DEFAULT NULL,
`settings_activation_policy` varchar(255) DEFAULT NULL,
`settings_authorized_gae_applications` json DEFAULT NULL,
`settings_availability_type` varchar(255) DEFAULT NULL,
`settings_backup_configuration_binary_log_enabled` varchar(255) DEFAULT NULL,
`settings_backup_configuration_enabled` varchar(255) DEFAULT NULL,
`settings_backup_configuration_kind` varchar(255) DEFAULT NULL,
`settings_backup_configuration_start_time` varchar(255) DEFAULT NULL,
`settings_crash_safe_replication_enabled` varchar(255) DEFAULT NULL,
`settings_data_disk_size_gb` bigint DEFAULT NULL,
`settings_data_disk_type` varchar(255) DEFAULT NULL,
`settings_database_flags` json DEFAULT NULL,
`settings_database_replication_enabled` varchar(255) DEFAULT NULL,
`settings_ip_configuration_ipv4_enabled` varchar(255) DEFAULT NULL,
`settings_ip_configuration_require_ssl` varchar(255) DEFAULT NULL,
`settings_kind` varchar(255) DEFAULT NULL,
`settings_labels` json DEFAULT NULL,
`settings_location_preference_follow_gae_application` varchar(255) DEFAULT NULL,
`settings_location_preference_kind` varchar(255) DEFAULT NULL,
`settings_location_preference_zone` varchar(255) DEFAULT NULL,
`settings_maintenance_window` json DEFAULT NULL,
`settings_pricing_plan` varchar(255) DEFAULT NULL,
`settings_replication_type` varchar(255) DEFAULT NULL,
`settings_settings_version` bigint DEFAULT NULL,
`settings_storage_auto_resize` varchar(255) DEFAULT NULL,
`settings_storage_auto_resize_limit` bigint DEFAULT NULL,
`settings_tier` varchar(255) DEFAULT NULL,
`state` varchar(255) DEFAULT NULL,
`suspension_reason` json DEFAULT NULL,
`raw_cloudsql_instance` json DEFAULT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_IPADDRESSES_TABLE = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`instance_name` varchar(255) DEFAULT NULL,
`type` varchar(255) DEFAULT NULL,
`ip_address` varchar(255) DEFAULT NULL,
`time_to_retire` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_CLOUDSQL_IPCONFIGURATION_AUTHORIZEDNETWORKS = """
CREATE TABLE {0} (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`instance_name` varchar(255) DEFAULT NULL,
`kind` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`value` varchar(255) DEFAULT NULL,
`expiration_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FIREWALL_RULES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`firewall_rule_id` bigint(20) unsigned NOT NULL,
`project_id` varchar(255) NOT NULL,
`firewall_rule_name` varchar(255) DEFAULT NULL,
`firewall_rule_description` varchar(512) DEFAULT NULL,
`firewall_rule_kind` varchar(255) DEFAULT NULL,
`firewall_rule_network` varchar(255) DEFAULT NULL,
`firewall_rule_priority` smallint(5) unsigned,
`firewall_rule_direction` varchar(255) DEFAULT NULL,
`firewall_rule_source_ranges` json DEFAULT NULL,
`firewall_rule_destination_ranges` json DEFAULT NULL,
`firewall_rule_source_tags` json DEFAULT NULL,
`firewall_rule_target_tags` json DEFAULT NULL,
`firewall_rule_allowed` json DEFAULT NULL,
`firewall_rule_denied` json DEFAULT NULL,
`firewall_rule_self_link` varchar(255) DEFAULT NULL,
`firewall_rule_create_time` datetime(3) DEFAULT NULL,
`raw_firewall_rule` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FOLDER_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`folder_id` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FOLDERS_TABLE = """
CREATE TABLE `{0}` (
`folder_id` bigint(20) unsigned NOT NULL,
`name` varchar(255) NOT NULL,
`display_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('ACTIVE','DELETE_REQUESTED',
'DELETED','LIFECYCLE_STATE_UNSPECIFIED') DEFAULT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_folder` json DEFAULT NULL,
`create_time` datetime DEFAULT NULL,
PRIMARY KEY (`folder_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_FORWARDING_RULES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL,
`project_id` varchar(255) NOT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`ip_address` varchar(255) DEFAULT NULL,
`ip_protocol` enum('TCP','UDP','ESP','AH','SCTP','ICMP') DEFAULT NULL,
`port_range` varchar(255) DEFAULT NULL,
`ports` json DEFAULT NULL,
`target` varchar(255) DEFAULT NULL,
`load_balancing_scheme` enum('INTERNAL','EXTERNAL') DEFAULT NULL,
`subnetwork` varchar(255) DEFAULT NULL,
`network` varchar(255) DEFAULT NULL,
`backend_service` varchar(255) DEFAULT NULL,
`raw_forwarding_rule` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
# TODO: Add a RAW_GROUP_MEMBERS_TABLE.
CREATE_GROUP_MEMBERS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`group_id` varchar(255) DEFAULT NULL,
`member_kind` varchar(255) DEFAULT NULL,
`member_role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_status` varchar(255) DEFAULT NULL,
`member_id` varchar(255) DEFAULT NULL,
`member_email` varchar(255) DEFAULT NULL,
`raw_member` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_GROUPS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`group_id` varchar(255) DEFAULT NULL,
`group_email` varchar(255) DEFAULT NULL,
`group_kind` varchar(255) DEFAULT NULL,
`direct_member_count` bigint(20) DEFAULT NULL,
`raw_group` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_GROUPS_VIOLATIONS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`member_email` varchar(255) NOT NULL,
`group_email` varchar(255) NOT NULL,
`rule_name` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`can_ip_forward` bool DEFAULT NULL,
`cpu_platform` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`disks` json DEFAULT NULL,
`machine_type` varchar(255) DEFAULT NULL,
`metadata` json DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`network_interfaces` json DEFAULT NULL,
`scheduling` json DEFAULT NULL,
`service_accounts` json DEFAULT NULL,
`status` varchar(255) DEFAULT NULL,
`status_message` varchar(255) DEFAULT NULL,
`tags` json DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_GROUPS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`named_ports` json DEFAULT NULL,
`network` varchar(255) DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`size` int DEFAULT NULL,
`subnetwork` varchar(255) DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance_group` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_TEMPLATES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`properties` json DEFAULT NULL,
`raw_instance_template` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_INSTANCE_GROUP_MANAGERS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_id` varchar(255) DEFAULT NULL,
`base_instance_name` varchar(255) DEFAULT NULL,
`creation_timestamp` datetime DEFAULT NULL,
`current_actions` json DEFAULT NULL,
`description` varchar(512) DEFAULT NULL,
`instance_group` varchar(255) DEFAULT NULL,
`instance_template` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`named_ports` json DEFAULT NULL,
`region` varchar(255) DEFAULT NULL,
`target_pools` json DEFAULT NULL,
`target_size` int DEFAULT NULL,
`zone` varchar(255) DEFAULT NULL,
`raw_instance_group_manager` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_ORGANIZATIONS_TABLE = """
CREATE TABLE `{0}` (
`org_id` bigint(20) unsigned NOT NULL,
`name` varchar(255) NOT NULL,
`display_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('LIFECYCLE_STATE_UNSPECIFIED','ACTIVE',
'DELETE_REQUESTED', 'DELETED') NOT NULL,
`raw_org` json DEFAULT NULL,
`creation_time` datetime DEFAULT NULL,
PRIMARY KEY (`org_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_PROJECT_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`project_id` varchar(255) NOT NULL,
`project_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('LIFECYCLE_STATE_UNSPECIFIED','ACTIVE',
'DELETE_REQUESTED','DELETED') NOT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_project` json DEFAULT NULL,
`create_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `project_id_UNIQUE` (`project_id`),
UNIQUE KEY `project_number_UNIQUE` (`project_number`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_BUCKETS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`buckets` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_FOLDER_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`folder_id` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
# TODO: define the violation_type enum as a list
CREATE_VIOLATIONS_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`resource_type` varchar(255) NOT NULL,
`resource_id` varchar(255) NOT NULL,
`rule_name` varchar(255) DEFAULT NULL,
`rule_index` int DEFAULT NULL,
`violation_type` enum('UNSPECIFIED','ADDED','REMOVED',
'BIGQUERY_VIOLATION', 'BUCKET_VIOLATION',
'CLOUD_SQL_VIOLATION') NOT NULL,
`violation_data` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
|
alexhersh/kubernetes
|
refs/heads/master
|
cluster/juju/charms/trusty/kubernetes-master/hooks/setup.py
|
149
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pre_install():
"""
Do any setup required before the install hook.
"""
install_charmhelpers()
install_path()
def install_charmhelpers():
"""
Install the charmhelpers library, if not present.
"""
try:
import charmhelpers # noqa
except ImportError:
import subprocess
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
subprocess.check_call(['pip', 'install', 'charmhelpers'])
def install_path():
"""
Install the path.py library, when not present.
"""
try:
import path # noqa
except ImportError:
import subprocess
subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
subprocess.check_call(['pip', 'install', 'path.py'])
|
unnikrishnankgs/va
|
refs/heads/master
|
venv/lib/python3.5/site-packages/ipykernel/tests/test_jsonutil.py
|
8
|
# coding: utf-8
"""Test suite for our JSON utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import sys
if sys.version_info < (3,):
from base64 import decodestring as decodebytes
else:
from base64 import decodebytes
from datetime import datetime
import numbers
import nose.tools as nt
from .. import jsonutil
from ..jsonutil import json_clean, encode_images
from ipython_genutils.py3compat import unicode_to_str, str_to_bytes, iteritems
class MyInt(object):
def __int__(self):
return 389
numbers.Integral.register(MyInt)
class MyFloat(object):
def __float__(self):
return 3.14
numbers.Real.register(MyFloat)
def test():
# list of input/expected output. Use None for the expected output if it
# can be the same as the input.
pairs = [(1, None), # start with scalars
(1.0, None),
('a', None),
(True, None),
(False, None),
(None, None),
# Containers
([1, 2], None),
((1, 2), [1, 2]),
(set([1, 2]), [1, 2]),
(dict(x=1), None),
({'x': 1, 'y':[1,2,3], '1':'int'}, None),
# More exotic objects
((x for x in range(3)), [0, 1, 2]),
(iter([1, 2]), [1, 2]),
(datetime(1991, 7, 3, 12, 00), "1991-07-03T12:00:00.000000"),
(MyFloat(), 3.14),
(MyInt(), 389)
]
for val, jval in pairs:
if jval is None:
jval = val
out = json_clean(val)
# validate our cleanup
nt.assert_equal(out, jval)
# and ensure that what we return, indeed encodes cleanly
json.loads(json.dumps(out))
def test_encode_images():
# invalid data, but the header and footer are from real files
pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82'
jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9'
pdfdata = b'%PDF-1.\ntrailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>]>>>>>>'
fmt = {
'image/png' : pngdata,
'image/jpeg' : jpegdata,
'application/pdf' : pdfdata
}
encoded = encode_images(fmt)
for key, value in iteritems(fmt):
# encoded has unicode, want bytes
decoded = decodebytes(encoded[key].encode('ascii'))
nt.assert_equal(decoded, value)
encoded2 = encode_images(encoded)
nt.assert_equal(encoded, encoded2)
b64_str = {}
for key, encoded in iteritems(encoded):
b64_str[key] = unicode_to_str(encoded)
encoded3 = encode_images(b64_str)
nt.assert_equal(encoded3, b64_str)
for key, value in iteritems(fmt):
# encoded3 has str, want bytes
decoded = decodebytes(str_to_bytes(encoded3[key]))
nt.assert_equal(decoded, value)
def test_lambda():
with nt.assert_raises(ValueError):
json_clean(lambda : 1)
def test_exception():
bad_dicts = [{1:'number', '1':'string'},
{True:'bool', 'True':'string'},
]
for d in bad_dicts:
nt.assert_raises(ValueError, json_clean, d)
def test_unicode_dict():
data = {u'üniço∂e': u'üniço∂e'}
clean = jsonutil.json_clean(data)
nt.assert_equal(data, clean)
|
gnieboer/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/monitors.py
|
14
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors instrument the training process.
@@get_default_monitors
@@BaseMonitor
@@CaptureVariable
@@CheckpointSaver
@@EveryN
@@ExportMonitor
@@GraphDump
@@LoggingTrainable
@@NanLoss
@@PrintTensor
@@StepCounter
@@StopAtStep
@@SummarySaver
@@ValidationMonitor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import summary_io
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_inspect
# TODO(ptucker): Split each monitor class into a separate file.
# TODO(ptucker): Fail if epoch or step does not monotonically increase?
class BaseMonitor(object):
"""Base class for Monitors.
Defines basic interfaces of Monitors.
Monitors can either be run on all workers or, more commonly, restricted
to run exclusively on the elected chief worker.
"""
@deprecation.deprecated(
"2016-12-05",
"Monitors are deprecated. Please use tf.train.SessionRunHook.")
def __init__(self):
self._begun = False
self._current_epoch = None
self._current_step = None
self._max_steps = None
self._estimator = None
@property
def run_on_all_workers(self):
return False
def set_estimator(self, estimator):
"""A setter called automatically by the target estimator.
If the estimator is locked, this method does nothing.
Args:
estimator: the estimator that this monitor monitors.
Raises:
ValueError: if the estimator is None.
"""
if estimator is None:
raise ValueError("Missing estimator.")
# TODO(mdan): This should fail if called twice with the same estimator.
self._estimator = estimator
def begin(self, max_steps=None):
"""Called at the beginning of training.
When called, the default graph is the one we are executing.
Args:
max_steps: `int`, the maximum global step this training will run until.
Raises:
ValueError: if we've already begun a run.
"""
if self._begun:
raise ValueError("begin called twice without end.")
self._max_steps = max_steps
self._begun = True
def end(self, session=None):
"""Callback at the end of training/evaluation.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: if we've not begun a run.
"""
_ = session
if not self._begun:
raise ValueError("end called without begin.")
self._max_steps = None
self._begun = False
def epoch_begin(self, epoch):
"""Begin epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've already begun an epoch, or `epoch` < 0.
"""
if self._current_epoch is not None:
raise ValueError("epoch_begin called twice without epoch_end.")
if epoch < 0:
raise ValueError("Invalid epoch %s." % epoch)
self._current_epoch = epoch
def epoch_end(self, epoch):
"""End epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've not begun an epoch, or `epoch` number does not match.
"""
if self._current_epoch != epoch:
raise ValueError(
"epoch_end expected %s but got %s.", self._current_epoch, epoch)
self._current_epoch = None
def step_begin(self, step):
"""Callback before training step begins.
You may use this callback to request evaluation of additional tensors
in the graph.
Args:
step: `int`, the current value of the global step.
Returns:
List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
`step` > `max_steps`.
"""
if (step < 0) or (
(self._max_steps is not None) and (step > self._max_steps)):
raise ValueError("Invalid step %s." % step)
self._current_step = step
return []
def step_end(self, step, output): # pylint: disable=unused-argument
"""Callback after training step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Note that this method is not called if the call to `Session.run()` that
followed the last call to `step_begin()` failed.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Raises:
ValueError: if we've not begun a step, or `step` number does not match.
"""
if self._current_step != step:
raise ValueError(
"step_end expected %s but got %s.", self._current_step, step)
self._current_step = None
return False
def post_step(self, step, session): # pylint: disable=unused-argument
"""Callback after the step is finished.
Called after step_end and receives session to perform extra session.run
calls. If failure occurred in the process, will be called as well.
Args:
step: `int`, global step of the model.
session: `Session` object.
"""
_ = step, session
def _extract_output(outputs, request):
if request in outputs:
return outputs[request]
return outputs[request.name]
class EveryN(BaseMonitor):
"""Base class for monitors that execute callbacks every N steps.
This class adds three new callbacks:
- every_n_step_begin
- every_n_step_end
- every_n_post_step
The callbacks are executed every n steps, or optionally every step for the
first m steps, where m and n can both be user-specified.
When extending this class, note that if you wish to use any of the
`BaseMonitor` callbacks, you must call their respective super implementation:
def step_begin(self, step):
super(ExampleMonitor, self).step_begin(step)
return []
Failing to call the super implementation will cause unpredictable behavior.
The `every_n_post_step()` callback is also called after the last step if it
was not already called through the regular conditions. Note that
`every_n_step_begin()` and `every_n_step_end()` do not receive that special
treatment.
"""
# TODO(ipolosukhin): Add also every n seconds.
def __init__(self, every_n_steps=100, first_n_steps=1):
"""Initializes an `EveryN` monitor.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
first_n_steps: `int`, specifying the number of initial steps during
which the callbacks will always be executed, regardless of the value
of `every_n_steps`. Note that this value is relative to the global step
"""
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
# Last step in the model.
self._last_successful_step = None
# Last step at which we called one of the every_n methods
self._last_active_step = 0
self._every_n_step_begin_called = False
def every_n_step_begin(self, step): # pylint: disable=unused-argument
"""Callback before every n'th step begins.
Args:
step: `int`, the current value of the global step.
Returns:
A `list` of tensors that will be evaluated at this step.
"""
return []
def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument
"""Callback after every n'th step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Args:
step: `int`, the current value of the global step.
outputs: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
"""
return False
def every_n_post_step(self, step, session):
"""Callback after a step is finished or `end()` is called.
Args:
step: `int`, the current value of the global step.
session: `Session` object.
"""
pass
def step_begin(self, step):
"""Overrides `BaseMonitor.step_begin`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
Returns:
A `list`, the result of every_n_step_begin, if that was called this step,
or an empty list otherwise.
Raises:
ValueError: if called more than once during a step.
"""
super(EveryN, self).step_begin(step)
if (step <= self._first_n_steps or
step >= (self._every_n_steps + self._last_active_step) or
step == self._max_steps): # Note: max_steps can be None here.
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
def step_end(self, step, output):
"""Overrides `BaseMonitor.step_end`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`, the result of every_n_step_end, if that was called this step,
or `False` otherwise.
"""
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
def post_step(self, step, session):
super(EveryN, self).post_step(step, session)
if self._every_n_step_begin_called:
self.every_n_post_step(step, session)
self._last_active_step = step
self._last_successful_step = step
def end(self, session=None):
super(EveryN, self).end(session=session)
if self._last_successful_step != self._last_active_step:
self.every_n_post_step(self._last_successful_step, session)
class StopAtStep(BaseMonitor):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep monitor.
This monitor requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `step_begin()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(StopAtStep, self).__init__()
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
@property
def run_on_all_workers(self):
return True
def step_begin(self, step):
super(StopAtStep, self).step_begin(step)
if self._last_step is None:
self._last_step = step + self._num_steps - 1
return []
def step_end(self, step, output):
super(StopAtStep, self).step_end(step, output)
return step >= self._last_step
# TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout.
class PrintTensor(EveryN):
"""Prints given tensors every N steps.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensor_names, every_n=100, first_n=1):
"""Initializes a PrintTensor monitor.
Args:
tensor_names: `dict` of tag to tensor names or
`iterable` of tensor names (strings).
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(PrintTensor, self).__init__(every_n, first_n)
if not isinstance(tensor_names, dict):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
def every_n_step_begin(self, step):
super(PrintTensor, self).every_n_step_begin(step)
return list(self._tensor_names.values())
def every_n_step_end(self, step, outputs):
super(PrintTensor, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._tensor_names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Step %d: %s", step, ", ".join(stats))
class LoggingTrainable(EveryN):
"""Writes trainable variable values into log every N steps.
Write the tensors in trainable variables `every_n` steps,
starting with the `first_n`th step.
"""
def __init__(self, scope=None, every_n=100, first_n=1):
"""Initializes LoggingTrainable monitor.
Args:
scope: An optional string to match variable names using re.match.
every_n: Print every N steps.
first_n: Print first N steps.
"""
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
def every_n_step_begin(self, step):
super(LoggingTrainable, self).every_n_step_begin(step)
# Get a list of trainable variables at the beginning of every N steps.
# We cannot get this in __init__ because train_op has not been generated.
trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
scope=self._scope)
self._names = {}
for var in trainables:
self._names[var.name] = var.value().name
return list(self._names.values())
def every_n_step_end(self, step, outputs):
super(LoggingTrainable, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats))
class SummarySaver(EveryN):
"""Saves summaries every N steps."""
def __init__(self,
summary_op,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None):
"""Initializes a `SummarySaver` monitor.
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `summary.scalar` or
`summary.merge_all`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
"""
# TODO(ipolosukhin): Implement every N seconds.
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = summary_io.SummaryWriter(output_dir)
self._scaffold = scaffold
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def set_estimator(self, estimator):
super(SummarySaver, self).set_estimator(estimator)
# TODO(mdan): This line looks redundant.
if self._summary_writer is None:
self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)
def every_n_step_begin(self, step):
super(SummarySaver, self).every_n_step_begin(step)
if self._summary_op is None and self._scaffold is not None:
self._summary_op = self._scaffold.summary_op
if self._summary_op is not None:
return [self._summary_op]
return []
def every_n_step_end(self, step, outputs):
super(SummarySaver, self).every_n_step_end(step, outputs)
if self._summary_op is not None:
summary_strs = _extract_output(outputs, self._summary_op)
if self._summary_writer:
self._summary_writer.add_summary(summary_strs, step)
return False
def end(self, session=None):
super(SummarySaver, self).end(session=session)
if self._summary_writer:
self._summary_writer.flush()
class ValidationMonitor(EveryN):
"""Runs evaluation of a given estimator, at most every N steps.
Note that the evaluation is done based on the saved checkpoint, which will
usually be older than the current step.
Can do early stopping on validation metrics if `early_stopping_rounds` is
provided.
"""
def __init__(self, x=None, y=None, input_fn=None, batch_size=None,
eval_steps=None,
every_n_steps=100, metrics=None, hooks=None,
early_stopping_rounds=None,
early_stopping_metric="loss",
early_stopping_metric_minimize=True, name=None):
"""Initializes a ValidationMonitor.
Args:
x: See `BaseEstimator.evaluate`.
y: See `BaseEstimator.evaluate`.
input_fn: See `BaseEstimator.evaluate`.
batch_size: See `BaseEstimator.evaluate`.
eval_steps: See `BaseEstimator.evaluate`.
every_n_steps: Check for new checkpoints to evaluate every N steps. If a
new checkpoint is found, it is evaluated. See `EveryN`.
metrics: See `BaseEstimator.evaluate`.
hooks: A list of `SessionRunHook` hooks to pass to the
`Estimator`'s `evaluate` function.
early_stopping_rounds: `int`. If the metric indicated by
`early_stopping_metric` does not change according to
`early_stopping_metric_minimize` for this many steps, then training
will be stopped.
early_stopping_metric: `string`, name of the metric to check for early
stopping.
early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is
expected to decrease (thus early stopping occurs when this metric
stops decreasing), False if `early_stopping_metric` is expected to
increase. Typically, `early_stopping_metric_minimize` is True for
loss metrics like mean squared error, and False for performance
metrics like accuracy.
name: See `BaseEstimator.evaluate`.
Raises:
ValueError: If both x and input_fn are provided.
"""
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps,
first_n_steps=-1)
# TODO(mdan): Checks like this are already done by evaluate.
if x is None and input_fn is None:
raise ValueError("Either x or input_fn should be provided.")
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.hooks = hooks
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
@property
def early_stopped(self):
"""Returns True if this monitor caused an early stop."""
return self._early_stopped
@property
def best_step(self):
"""Returns the step at which the best early stopping metric was found."""
return self._best_value_step
@property
def best_value(self):
"""Returns the best early stopping metric value found so far."""
return self._best_value
def _evaluate_estimator(self):
if isinstance(self._estimator, core_estimator.Estimator):
if any((x is not None for x in
[self.x, self.y, self.batch_size, self.metrics])):
raise ValueError(
"tf.estimator.Estimator does not support following "
"arguments: x, y, batch_size, metrics. Should set as `None` "
"in ValidationMonitor")
return self._estimator.evaluate(
input_fn=self.input_fn, steps=self.eval_steps, hooks=self.hooks,
name=self.name)
else:
return self._estimator.evaluate(
x=self.x, y=self.y, input_fn=self.input_fn,
batch_size=self.batch_size, steps=self.eval_steps,
metrics=self.metrics, hooks=self.hooks, name=self.name)
def every_n_step_end(self, step, outputs):
super(ValidationMonitor, self).every_n_step_end(step, outputs)
# TODO(mdan): The use of step below is probably misleading.
# The code should probably use the step from the checkpoint, because
# that's what is being evaluated.
if self._estimator is None:
raise ValueError("Missing call to set_estimator.")
# Check that we are not running evaluation on the same checkpoint.
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.debug("Skipping evaluation since model has not been saved yet "
"at step %d.", step)
return False
if latest_path is not None and latest_path == self._latest_path:
logging.debug("Skipping evaluation due to same checkpoint %s for step %d "
"as for step %d.", latest_path, step,
self._latest_path_step)
return False
self._latest_path = latest_path
self._latest_path_step = step
# Run evaluation and log it.
validation_outputs = self._evaluate_estimator()
stats = []
for name in validation_outputs:
stats.append("%s = %s" % (name, str(validation_outputs[name])))
logging.info("Validation (step %d): %s", step, ", ".join(stats))
# Early stopping logic.
if self.early_stopping_rounds is not None:
if self.early_stopping_metric not in validation_outputs:
raise ValueError("Metric %s missing from outputs %s." % (
self.early_stopping_metric, set(validation_outputs.keys())))
current_value = validation_outputs[self.early_stopping_metric]
if (self._best_value is None or (self.early_stopping_metric_minimize and
(current_value < self._best_value)) or
(not self.early_stopping_metric_minimize and
(current_value > self._best_value))):
self._best_value = current_value
self._best_value_step = step
stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
if stop_now:
logging.info("Stopping. Best step: {} with {} = {}."
.format(self._best_value_step,
self.early_stopping_metric, self._best_value))
self._early_stopped = True
return True
return False
# TODO(ptucker): This really reads any tensor, not just vars, and requires the
# ':0' suffix on var_name.
class CaptureVariable(EveryN):
"""Captures a variable's values into a collection.
This monitor is useful for unit testing. You should exercise caution when
using this monitor in production, since it never discards values.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
"""
def __init__(self, var_name, every_n=100, first_n=1):
"""Initializes a CaptureVariable monitor.
Args:
var_name: `string`. The variable name, including suffix (typically ":0").
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
@property
def values(self):
"""Returns the values captured so far.
Returns:
`dict` mapping `int` step numbers to that values of the variable at the
respective step.
"""
return self._var_values
def every_n_step_begin(self, step):
super(CaptureVariable, self).every_n_step_begin(step)
return [self._var_name]
def every_n_step_end(self, step, outputs):
super(CaptureVariable, self).every_n_step_end(step, outputs)
self._var_values[step] = _extract_output(outputs, self._var_name)
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,
output_dir=None, summary_writer=None):
"""Returns a default set of typically-used monitors.
Args:
loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
output_dir: See `SummarySaver`.
summary_writer: See `SummarySaver`.
Returns:
`list` of monitors.
"""
monitors = []
if loss_op is not None:
monitors.append(PrintTensor(tensor_names={"loss": loss_op.name}))
if summary_op is not None:
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps,
output_dir=output_dir,
summary_writer=summary_writer))
return monitors
class GraphDump(BaseMonitor):
"""Dumps almost all tensors in the graph at every step.
Note, this is very expensive, prefer `PrintTensor` in production.
"""
IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder",
"RandomUniform", "Cast", "RestoreSlice"]
def __init__(self, ignore_ops=None):
"""Initializes GraphDump monitor.
Args:
ignore_ops: `list` of `string`. Names of ops to ignore.
If None, `GraphDump.IGNORE_OPS` is used.
"""
super(GraphDump, self).__init__()
self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS
self._data = {}
def begin(self, max_steps=None):
super(GraphDump, self).begin(max_steps=max_steps)
self._tensors = []
graph = ops.get_default_graph()
graph_def = graph.as_graph_def()
for node in graph_def.node:
if node.op in self._ignore_ops:
continue
logging.info("op=%s name=%s.", node.op, node.name)
try:
self._tensors.append(graph.get_tensor_by_name(node.name + ":0"))
except KeyError:
pass
def step_begin(self, step):
super(GraphDump, self).step_begin(step)
return self._tensors
def step_end(self, step, output):
super(GraphDump, self).step_end(step, output)
self._data[step] = output
@property
def data(self):
return self._data
# TODO(ptucker): Handle keys that are in one but not the other.
def compare(self, other_dump, step, atol=1e-06):
"""Compares two `GraphDump` monitors and returns differences.
Args:
other_dump: Another `GraphDump` monitor.
step: `int`, step to compare on.
atol: `float`, absolute tolerance in comparison of floating arrays.
Returns:
Returns tuple:
matched: `list` of keys that matched.
non_matched: `dict` of keys to tuple of 2 mismatched values.
Raises:
ValueError: if a key in `data` is missing from `other_dump` at `step`.
"""
non_matched = {}
matched = []
this_output = self.data[step] if step in self.data else {}
other_output = other_dump.data[step] if step in other_dump.data else {}
for key in this_output:
if not isinstance(key, str) and not isinstance(key, unicode):
continue
if key not in other_output:
raise ValueError("%s missing at step %s.", (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if not np.allclose(value1, value2, atol=atol):
non_matched[key] = value1 - value2
else:
matched.append(key)
else:
if value1 != value2:
non_matched[key] = (value1, value2)
else:
matched.append(key)
return matched, non_matched
class ExportMonitor(EveryN):
"""Monitor that exports Estimator every N steps."""
@deprecated("2017-03-25",
"ExportMonitor is deprecated. Please pass an "
"ExportStrategy to Experiment instead.")
def __init__(self,
every_n_steps,
export_dir,
input_fn=None,
input_feature_key=None,
exports_to_keep=5,
signature_fn=None,
default_batch_size=1):
"""Initializes ExportMonitor.
Args:
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
(features, labels), where features is a dict of string key to `Tensor`
and labels is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
`input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Should be `None` if and only if
you're passing in a `signature_fn` that does not use the first arg
(`Tensor` of `Example` strings).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
ValueError: If `input_fn` and `input_feature_key` are not both defined or
are not both `None`.
"""
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = input_fn is None
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
@property
def export_dir(self):
return self._export_dir
@property
def exports_to_keep(self):
return self._exports_to_keep
@property
def signature_fn(self):
return self._signature_fn
@property
def last_export_dir(self):
"""Returns the directory containing the last completed export.
Returns:
The string path to the exported directory. NB: this functionality was
added on 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because the
estimator being fitted does not yet return a value during export.
"""
return self._last_export_dir
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
if isinstance(self._estimator, core_estimator.Estimator):
raise ValueError(
"ExportMonitor does not support `tf.estimator.Estimator. `. "
"Please pass an ExportStrategy to Experiment instead.")
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
# Exports depend on saved checkpoints for constructing the graph and
# getting the global step from the graph instance saved in the checkpoint.
# If the checkpoint is stale with respect to current step, the global step
# is taken to be the last saved checkpoint's global step and exporter
# doesn't export the same checkpoint again with the following error.
logging.info("Skipping exporting because the existing checkpoint has "
"already been exported. "
"Consider exporting less frequently.")
def end(self, session=None):
super(ExportMonitor, self).end(session=session)
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.info("Skipping export at the end since model has not been saved "
"yet.")
return
if isinstance(self._estimator, core_estimator.Estimator):
raise ValueError(
"ExportMonitor does not support `tf.estimator.Estimator. `. "
"Please pass an ExportStrategy to Experiment instead.")
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
logging.info("Skipping exporting for the same step.")
class CheckpointSaver(BaseMonitor):
"""Saves checkpoints every N steps or N seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
"""
logging.info("Create CheckpointSaver.")
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self, max_steps=None):
super(CheckpointSaver, self).begin(max_steps)
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
def step_begin(self, step):
super(CheckpointSaver, self).step_begin(step)
self._last_begin_step = step
def post_step(self, step, session):
super(CheckpointSaver, self).post_step(step, session)
if self._last_saved_time is None:
self._save(step, session)
if self._save_steps is not None:
if step >= self._last_saved_step + self._save_steps:
self._save(step, session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(step, session)
def end(self, session=None):
super(CheckpointSaver, self).end(session)
self._save(self._last_begin_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounter(EveryN):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None,
summary_writer=None):
super(StepCounter, self).__init__(every_n_steps=every_n_steps)
self._summary_tag = "global_step/sec"
self._last_reported_step = None
self._last_reported_time = None
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def set_estimator(self, estimator):
super(StepCounter, self).set_estimator(estimator)
if self._summary_writer is None:
self._summary_writer = SummaryWriterCache.get(estimator.model_dir)
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanLoss(EveryN):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def every_n_step_begin(self, step):
super(NanLoss, self).every_n_step_begin(step)
return [self._loss_tensor]
def every_n_step_end(self, step, outputs):
super(NanLoss, self).every_n_step_end(step, outputs)
if np.isnan(_extract_output(outputs, self._loss_tensor)):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
# without an exception.
return True
class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
"""Wraps monitors into a SessionRunHook."""
def __init__(self, monitors):
self._monitors = monitors
def begin(self):
self._last_step = None
self._global_step_tensor = contrib_variables.get_global_step()
for m in self._monitors:
m.begin(max_steps=None)
def before_run(self, run_context):
if self._last_step is None:
self._last_step = run_context.session.run(self._global_step_tensor) + 1
request = {self._global_step_tensor: self._global_step_tensor}
monitor_fetches = []
for m in self._monitors:
monitor_requests = m.step_begin(self._last_step)
if monitor_requests:
if not isinstance(monitor_requests, list):
raise ValueError("Monitor.step_begin should return a list.")
monitor_fetches.extend(monitor_requests)
if monitor_fetches:
request["monitors"] = dict(
zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))
return session_run_hook.SessionRunArgs(request)
def after_run(self, run_context, run_values):
result = run_values.results[
"monitors"] if "monitors" in run_values.results else {}
for m in self._monitors:
induce_stop = m.step_end(self._last_step, result)
if induce_stop:
run_context.request_stop()
for m in self._monitors:
m.post_step(self._last_step, run_context.session)
self._last_step = run_values.results[self._global_step_tensor] + 1
def end(self, session):
self._last_step = None
for m in self._monitors:
if "session" in tf_inspect.getargspec(m.end).args:
m.end(session=session)
else:
m.end()
def replace_monitors_with_hooks(monitors_or_hooks, estimator):
"""Wraps monitors with a hook.
`Monitor` is deprecated in favor of `SessionRunHook`. If you're using a
monitor, you can wrap it with a hook using function. It is recommended to
implement hook version of your monitor.
Args:
monitors_or_hooks: A `list` may contain both monitors and hooks.
estimator: An `Estimator` that monitor will be used with.
Returns:
Returns a list of hooks. If there is any monitor in the given list, it is
replaced by a hook.
"""
monitors_or_hooks = monitors_or_hooks or []
hooks = [
m for m in monitors_or_hooks
if isinstance(m, session_run_hook.SessionRunHook)
]
deprecated_monitors = [
m for m in monitors_or_hooks
if not isinstance(m, session_run_hook.SessionRunHook)
]
if not estimator.config.is_chief:
# Prune list of monitor to the ones runnable on all workers.
deprecated_monitors = [
m for m in deprecated_monitors if m.run_on_all_workers
]
# Setup monitors.
for monitor in deprecated_monitors:
monitor.set_estimator(estimator)
if deprecated_monitors:
hooks.append(RunHookAdapterForMonitors(deprecated_monitors))
return hooks
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
|
jinluyuan/osf.io
|
refs/heads/develop
|
website/addons/wiki/tests/config.py
|
66
|
EXAMPLE_DOCS = [ # Collection stored as "docs"
{
"_data": "one two",
"_type": "http://sharejs.org/types/textv1",
"_v": 8,
"_m": {
"mtime": 1415654366808,
"ctime": 1415654358668
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"_data": "XXX",
"_type": "http://sharejs.org/types/textv1",
"_v": 4,
"_m": {
"mtime": 1415654385628,
"ctime": 1415654381131
},
"_id": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a"
}
]
EXAMPLE_OPS = [ # Collection stored as "docs_ops"
{
"op": None,
"v": 0,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 1,
"create": {
"type": "http://sharejs.org/types/textv1",
"data": None
},
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654358667
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v0",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": [
"o"
],
"v": 1,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 2,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654363751
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v1",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": [
1,
"n"
],
"v": 2,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 3,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654363838
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v2",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": [
2,
"e"
],
"v": 3,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 4,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654364007
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v3",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": [
3,
" "
],
"v": 4,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 5,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654366367
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v4",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": [
4,
"t"
],
"v": 5,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 6,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654366542
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v5",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": [
5,
"w"
],
"v": 6,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 7,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654366678
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v6",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": [
6,
"o"
],
"v": 7,
"src": "94ae709f9736c24d821301de2dfc71df",
"seq": 8,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654366808
},
"_id": "26aabd89-541b-5c02-9e6a-ad332ba43118 v7",
"name": "26aabd89-541b-5c02-9e6a-ad332ba43118"
},
{
"op": None,
"v": 0,
"src": "166028c1b14818475eec6fab9720af7b",
"seq": 1,
"create": {
"type": "http://sharejs.org/types/textv1",
"data": None
},
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654381130
},
"_id": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a v0",
"name": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a"
},
{
"op": [
"X"
],
"v": 1,
"src": "166028c1b14818475eec6fab9720af7b",
"seq": 2,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654384929
},
"_id": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a v1",
"name": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a"
},
{
"op": [
1,
"X"
],
"v": 2,
"src": "166028c1b14818475eec6fab9720af7b",
"seq": 3,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654385266
},
"_id": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a v2",
"name": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a"
},
{
"op": [
2,
"X"
],
"v": 3,
"src": "166028c1b14818475eec6fab9720af7b",
"seq": 4,
"preValidate": None,
"validate": None,
"m": {
"ts": 1415654385626
},
"_id": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a v3",
"name": "9a68120a-d3c5-5ba6-b399-fe39e8f2028a"
}
]
EXAMPLE_DOCS_6 = {
"_id": "9a247ce9-b219-5f7d-b2c8-ef31661b38d7",
"data": {
"v": 20,
"meta": {
"mtime": 1413229471447.0,
"ctime": 1413229471447.0,
},
"snapshot": "one two three four! ",
"type": "text",
}
}
|
tuxfux-hlp-notes/python-batches
|
refs/heads/master
|
archieves/batch-59/files/myvenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langthaimodel.py
|
2929
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
has2k1/numpy
|
refs/heads/master
|
numpy/distutils/command/build_src.py
|
141
|
""" Build swig, f2py, pyrex sources.
"""
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import shlex
import copy
from distutils.command import build_ext
from distutils.dep_util import newer_group, newer
from distutils.util import get_platform
from distutils.errors import DistutilsError, DistutilsSetupError
def have_pyrex():
try:
import Pyrex.Compiler.Main
return True
except ImportError:
return False
# this import can't be done here, as it uses numpy stuff only available
# after it's installed
#import numpy.f2py
from numpy.distutils import log
from numpy.distutils.misc_util import fortran_ext_match, \
appendpath, is_string, is_sequence, get_cmd
from numpy.distutils.from_template import process_file as process_f_file
from numpy.distutils.conv_template import process_file as process_c_file
def subst_vars(target, source, d):
"""Substitute any occurence of @foo@ by d['foo'] from source file into
target."""
var = re.compile('@([a-zA-Z_]+)@')
fs = open(source, 'r')
try:
ft = open(target, 'w')
try:
for l in fs:
m = var.search(l)
if m:
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
else:
ft.write(l)
finally:
ft.close()
finally:
fs.close()
class build_src(build_ext.build_ext):
description = "build sources from SWIG, F2PY files or a function"
user_options = [
('build-src=', 'd', "directory to \"build\" sources to"),
('f2py-opts=', None, "list of f2py command line options"),
('swig=', None, "path to the SWIG executable"),
('swig-opts=', None, "list of SWIG command line options"),
('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
('force', 'f', "forcibly build everything (ignore file timestamps)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
]
boolean_options = ['force', 'inplace']
help_options = []
def initialize_options(self):
self.extensions = None
self.package = None
self.py_modules = None
self.py_modules_dict = None
self.build_src = None
self.build_lib = None
self.build_base = None
self.force = None
self.inplace = None
self.package_dir = None
self.f2pyflags = None # obsolete
self.f2py_opts = None
self.swigflags = None # obsolete
self.swig_opts = None
self.swig_cpp = None
self.swig = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('force', 'force'))
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
self.libraries = self.distribution.libraries or []
self.py_modules = self.distribution.py_modules or []
self.data_files = self.distribution.data_files or []
if self.build_src is None:
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
# py_modules_dict is used in build_py.find_package_modules
self.py_modules_dict = {}
if self.f2pyflags:
if self.f2py_opts:
log.warn('ignoring --f2pyflags as --f2py-opts already used')
else:
self.f2py_opts = self.f2pyflags
self.f2pyflags = None
if self.f2py_opts is None:
self.f2py_opts = []
else:
self.f2py_opts = shlex.split(self.f2py_opts)
if self.swigflags:
if self.swig_opts:
log.warn('ignoring --swigflags as --swig-opts already used')
else:
self.swig_opts = self.swigflags
self.swigflags = None
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = shlex.split(self.swig_opts)
# use options from build_ext command
build_ext = self.get_finalized_command('build_ext')
if self.inplace is None:
self.inplace = build_ext.inplace
if self.swig_cpp is None:
self.swig_cpp = build_ext.swig_cpp
for c in ['swig', 'swig_opt']:
o = '--'+c.replace('_', '-')
v = getattr(build_ext, c, None)
if v:
if getattr(self, c):
log.warn('both build_src and build_ext define %s option' % (o))
else:
log.info('using "%s=%s" option from build_ext command' % (o, v))
setattr(self, c, v)
def run(self):
log.info("build_src")
if not (self.extensions or self.libraries):
return
self.build_sources()
def build_sources(self):
if self.inplace:
self.get_package_dir = \
self.get_finalized_command('build_py').get_package_dir
self.build_py_modules_sources()
for libname_info in self.libraries:
self.build_library_sources(*libname_info)
if self.extensions:
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension_sources(ext)
self.build_data_files_sources()
self.build_npy_pkg_config()
def build_data_files_sources(self):
if not self.data_files:
return
log.info('building data_files sources')
from numpy.distutils.misc_util import get_data_files
new_data_files = []
for data in self.data_files:
if isinstance(data, str):
new_data_files.append(data)
elif isinstance(data, tuple):
d, files = data
if self.inplace:
build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
else:
build_dir = os.path.join(self.build_src, d)
funcs = [f for f in files if hasattr(f, '__call__')]
files = [f for f in files if not hasattr(f, '__call__')]
for f in funcs:
if f.__code__.co_argcount==1:
s = f(build_dir)
else:
s = f()
if s is not None:
if isinstance(s, list):
files.extend(s)
elif isinstance(s, str):
files.append(s)
else:
raise TypeError(repr(s))
filenames = get_data_files((d, files))
new_data_files.append((d, filenames))
else:
raise TypeError(repr(data))
self.data_files[:] = new_data_files
def _build_npy_pkg_config(self, info, gd):
import shutil
template, install_dir, subst_dict = info
template_dir = os.path.dirname(template)
for k, v in gd.items():
subst_dict[k] = v
if self.inplace == 1:
generated_dir = os.path.join(template_dir, install_dir)
else:
generated_dir = os.path.join(self.build_src, template_dir,
install_dir)
generated = os.path.basename(os.path.splitext(template)[0])
generated_path = os.path.join(generated_dir, generated)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
subst_vars(generated_path, template, subst_dict)
# Where to install relatively to install prefix
full_install_dir = os.path.join(template_dir, install_dir)
return full_install_dir, generated_path
def build_npy_pkg_config(self):
log.info('build_src: building npy-pkg config files')
# XXX: another ugly workaround to circumvent distutils brain damage. We
# need the install prefix here, but finalizing the options of the
# install command when only building sources cause error. Instead, we
# copy the install command instance, and finalize the copy so that it
# does not disrupt how distutils want to do things when with the
# original install command instance.
install_cmd = copy.copy(get_cmd('install'))
if not install_cmd.finalized == 1:
install_cmd.finalize_options()
build_npkg = False
gd = {}
if self.inplace == 1:
top_prefix = '.'
build_npkg = True
elif hasattr(install_cmd, 'install_libbase'):
top_prefix = install_cmd.install_libbase
build_npkg = True
if build_npkg:
for pkg, infos in self.distribution.installed_pkg_config.items():
pkg_path = self.distribution.package_dir[pkg]
prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
d = {'prefix': prefix}
for info in infos:
install_dir, generated = self._build_npy_pkg_config(info, d)
self.distribution.data_files.append((install_dir,
[generated]))
def build_py_modules_sources(self):
if not self.py_modules:
return
log.info('building py_modules sources')
new_py_modules = []
for source in self.py_modules:
if is_sequence(source) and len(source)==3:
package, module_base, source = source
if self.inplace:
build_dir = self.get_package_dir(package)
else:
build_dir = os.path.join(self.build_src,
os.path.join(*package.split('.')))
if hasattr(source, '__call__'):
target = os.path.join(build_dir, module_base + '.py')
source = source(target)
if source is None:
continue
modules = [(package, module_base, source)]
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
self.py_modules_dict[package] += modules
else:
new_py_modules.append(source)
self.py_modules[:] = new_py_modules
def build_library_sources(self, lib_name, build_info):
sources = list(build_info.get('sources', []))
if not sources:
return
log.info('building library "%s" sources' % (lib_name))
sources = self.generate_sources(sources, (lib_name, build_info))
sources = self.template_sources(sources, (lib_name, build_info))
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
self.package, h_files)
#for f in h_files:
# self.distribution.headers.append((lib_name,f))
build_info['sources'] = sources
return
def build_extension_sources(self, ext):
sources = list(ext.sources)
log.info('building extension "%s" sources' % (ext.name))
fullname = self.get_ext_fullname(ext.name)
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
if self.inplace:
self.ext_target_dir = self.get_package_dir(package)
sources = self.generate_sources(sources, ext)
sources = self.template_sources(sources, ext)
sources = self.swig_sources(sources, ext)
sources = self.f2py_sources(sources, ext)
sources = self.pyrex_sources(sources, ext)
sources, py_files = self.filter_py_files(sources)
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
modules = []
for f in py_files:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
self.py_modules_dict[package] += modules
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
package, h_files)
#for f in h_files:
# self.distribution.headers.append((package,f))
ext.sources = sources
def generate_sources(self, sources, extension):
new_sources = []
func_sources = []
for source in sources:
if is_string(source):
new_sources.append(source)
else:
func_sources.append(source)
if not func_sources:
return new_sources
if self.inplace and not is_sequence(extension):
build_dir = self.ext_target_dir
else:
if is_sequence(extension):
name = extension[0]
# if 'include_dirs' not in extension[1]:
# extension[1]['include_dirs'] = []
# incl_dirs = extension[1]['include_dirs']
else:
name = extension.name
# incl_dirs = extension.include_dirs
#if self.build_src not in incl_dirs:
# incl_dirs.append(self.build_src)
build_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
self.mkpath(build_dir)
for func in func_sources:
source = func(extension, build_dir)
if not source:
continue
if is_sequence(source):
[log.info(" adding '%s' to sources." % (s,)) for s in source]
new_sources.extend(source)
else:
log.info(" adding '%s' to sources." % (source,))
new_sources.append(source)
return new_sources
def filter_py_files(self, sources):
return self.filter_files(sources, ['.py'])
def filter_h_files(self, sources):
return self.filter_files(sources, ['.h', '.hpp', '.inc'])
def filter_files(self, sources, exts = []):
new_sources = []
files = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext in exts:
files.append(source)
else:
new_sources.append(source)
return new_sources, files
def template_sources(self, sources, extension):
new_sources = []
if is_sequence(extension):
depends = extension[1].get('depends')
include_dirs = extension[1].get('include_dirs')
else:
depends = extension.depends
include_dirs = extension.include_dirs
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.src': # Template file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
self.mkpath(target_dir)
target_file = os.path.join(target_dir, os.path.basename(base))
if (self.force or newer_group([source] + depends, target_file)):
if _f_pyf_ext_match(base):
log.info("from_template:> %s" % (target_file))
outstr = process_f_file(source)
else:
log.info("conv_template:> %s" % (target_file))
outstr = process_c_file(source)
fid = open(target_file, 'w')
fid.write(outstr)
fid.close()
if _header_ext_match(target_file):
d = os.path.dirname(target_file)
if d not in include_dirs:
log.info(" adding '%s' to include_dirs." % (d))
include_dirs.append(d)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def pyrex_sources(self, sources, extension):
new_sources = []
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyx':
target_file = self.generate_a_pyrex_source(base, ext_name,
source,
extension)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def generate_a_pyrex_source(self, base, ext_name, source, extension):
if self.inplace or not have_pyrex():
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
target_file = os.path.join(target_dir, ext_name + '.c')
depends = [source] + extension.depends
if self.force or newer_group(depends, target_file, 'newer'):
if have_pyrex():
import Pyrex.Compiler.Main
log.info("pyrexc:> %s" % (target_file))
self.mkpath(target_dir)
options = Pyrex.Compiler.Main.CompilationOptions(
defaults=Pyrex.Compiler.Main.default_options,
include_path=extension.include_dirs,
output_file=target_file)
pyrex_result = Pyrex.Compiler.Main.compile(source,
options=options)
if pyrex_result.num_errors != 0:
raise DistutilsError("%d errors while compiling %r with Pyrex" \
% (pyrex_result.num_errors, source))
elif os.path.isfile(target_file):
log.warn("Pyrex required for compiling %r but not available,"\
" using old target %r"\
% (source, target_file))
else:
raise DistutilsError("Pyrex required for compiling %r"\
" but notavailable" % (source,))
return target_file
def f2py_sources(self, sources, extension):
new_sources = []
f2py_sources = []
f_sources = []
f2py_targets = {}
target_dirs = []
ext_name = extension.name.split('.')[-1]
skip_f2py = 0
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyf': # F2PY interface file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
if os.path.isfile(source):
name = get_f2py_modulename(source)
if name != ext_name:
raise DistutilsSetupError('mismatch of extension names: %s '
'provides %r but expected %r' % (
source, name, ext_name))
target_file = os.path.join(target_dir, name+'module.c')
else:
log.debug(' source %s does not exist: skipping f2py\'ing.' \
% (source))
name = ext_name
skip_f2py = 1
target_file = os.path.join(target_dir, name+'module.c')
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %smodule.c was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = os.path.join(target_dir, name+'module.c')
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.info(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
f2py_sources.append(source)
f2py_targets[source] = target_file
new_sources.append(target_file)
elif fortran_ext_match(ext):
f_sources.append(source)
else:
new_sources.append(source)
if not (f2py_sources or f_sources):
return new_sources
for d in target_dirs:
self.mkpath(d)
f2py_options = extension.f2py_options + self.f2py_opts
if self.distribution.libraries:
for name, build_info in self.distribution.libraries:
if name in extension.libraries:
f2py_options.extend(build_info.get('f2py_options', []))
log.info("f2py options: %s" % (f2py_options))
if f2py_sources:
if len(f2py_sources) != 1:
raise DistutilsSetupError(
'only one .pyf file is allowed per extension module but got'\
' more: %r' % (f2py_sources,))
source = f2py_sources[0]
target_file = f2py_targets[source]
target_dir = os.path.dirname(target_file) or '.'
depends = [source] + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
log.info("f2py: %s" % (source))
import numpy.f2py
numpy.f2py.run_main(f2py_options
+ ['--build-dir', target_dir, source])
else:
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
else:
#XXX TODO: --inplace support for sdist command
if is_sequence(extension):
name = extension[0]
else: name = extension.name
target_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
target_file = os.path.join(target_dir, ext_name + 'module.c')
new_sources.append(target_file)
depends = f_sources + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
log.info("f2py:> %s" % (target_file))
self.mkpath(target_dir)
import numpy.f2py
numpy.f2py.run_main(f2py_options + ['--lower',
'--build-dir', target_dir]+\
['-m', ext_name]+f_sources)
else:
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
% (target_file))
if not os.path.isfile(target_file):
raise DistutilsError("f2py target file %r not generated" % (target_file,))
target_c = os.path.join(self.build_src, 'fortranobject.c')
target_h = os.path.join(self.build_src, 'fortranobject.h')
log.info(" adding '%s' to sources." % (target_c))
new_sources.append(target_c)
if self.build_src not in extension.include_dirs:
log.info(" adding '%s' to include_dirs." \
% (self.build_src))
extension.include_dirs.append(self.build_src)
if not skip_f2py:
import numpy.f2py
d = os.path.dirname(numpy.f2py.__file__)
source_c = os.path.join(d, 'src', 'fortranobject.c')
source_h = os.path.join(d, 'src', 'fortranobject.h')
if newer(source_c, target_c) or newer(source_h, target_h):
self.mkpath(os.path.dirname(target_c))
self.copy_file(source_c, target_c)
self.copy_file(source_h, target_h)
else:
if not os.path.isfile(target_c):
raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
if not os.path.isfile(target_h):
raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
filename = os.path.join(target_dir, ext_name + name_ext)
if os.path.isfile(filename):
log.info(" adding '%s' to sources." % (filename))
f_sources.append(filename)
return new_sources + f_sources
def swig_sources(self, sources, extension):
# Assuming SWIG 1.3.14 or later. See compatibility note in
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
new_sources = []
swig_sources = []
swig_targets = {}
target_dirs = []
py_files = [] # swig generated .py files
target_ext = '.c'
if '-c++' in extension.swig_opts:
typ = 'c++'
is_cpp = True
extension.swig_opts.remove('-c++')
elif self.swig_cpp:
typ = 'c++'
is_cpp = True
else:
typ = None
is_cpp = False
skip_swig = 0
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.i': # SWIG interface file
# the code below assumes that the sources list
# contains not more than one .i SWIG interface file
if self.inplace:
target_dir = os.path.dirname(base)
py_target_dir = self.ext_target_dir
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
py_target_dir = target_dir
if os.path.isfile(source):
name = get_swig_modulename(source)
if name != ext_name[1:]:
raise DistutilsSetupError(
'mismatch of extension names: %s provides %r'
' but expected %r' % (source, name, ext_name[1:]))
if typ is None:
typ = get_swig_target(source)
is_cpp = typ=='c++'
else:
typ2 = get_swig_target(source)
if typ2 is None:
log.warn('source %r does not define swig target, assuming %s swig target' \
% (source, typ))
elif typ!=typ2:
log.warn('expected %r but source %r defines %r swig target' \
% (typ, source, typ2))
if typ2=='c++':
log.warn('resetting swig target to c++ (some targets may have .c extension)')
is_cpp = True
else:
log.warn('assuming that %r has c++ swig target' % (source))
if is_cpp:
target_ext = '.cpp'
target_file = os.path.join(target_dir, '%s_wrap%s' \
% (name, target_ext))
else:
log.warn(' source %s does not exist: skipping swig\'ing.' \
% (source))
name = ext_name[1:]
skip_swig = 1
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %s_wrap.{c,cpp} was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.warn(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
new_sources.append(target_file)
py_files.append(os.path.join(py_target_dir, name+'.py'))
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
if skip_swig:
return new_sources + py_files
for d in target_dirs:
self.mkpath(d)
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"] + extension.swig_opts
if is_cpp:
swig_cmd.append('-c++')
for d in extension.include_dirs:
swig_cmd.append('-I'+d)
for source in swig_sources:
target = swig_targets[source]
depends = [source] + extension.depends
if self.force or newer_group(depends, target, 'newer'):
log.info("%s: %s" % (os.path.basename(swig) \
+ (is_cpp and '++' or ''), source))
self.spawn(swig_cmd + self.swig_opts \
+ ["-o", target, '-outdir', py_target_dir, source])
else:
log.debug(" skipping '%s' swig interface (up-to-date)" \
% (source))
return new_sources + py_files
_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match
#### SWIG related auxiliary functions ####
_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
re.I).match
_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search
_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search
def get_swig_target(source):
f = open(source, 'r')
result = None
line = f.readline()
if _has_cpp_header(line):
result = 'c++'
if _has_c_header(line):
result = 'c'
f.close()
return result
def get_swig_modulename(source):
f = open(source, 'r')
name = None
for line in f:
m = _swig_module_name_match(line)
if m:
name = m.group('name')
break
f.close()
return name
def _find_swig_target(target_dir, name):
for ext in ['.cpp', '.c']:
target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
if os.path.isfile(target):
break
return target
#### F2PY related auxiliary functions ####
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\
'__user__[\w_]*)', re.I).match
def get_f2py_modulename(source):
name = None
f = open(source)
for line in f:
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
f.close()
return name
##########################################
|
aam-at/tensorflow
|
refs/heads/master
|
tensorflow/python/lib/io/file_io.py
|
3
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File IO methods that wrap the C++ FileSystem API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import os
import uuid
import six
from tensorflow.python import _pywrap_file_io
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# A good default block size depends on the system in question.
# A somewhat conservative default chosen here.
_DEFAULT_BLOCK_SIZE = 16 * 1024 * 1024
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: [path-like object](https://docs.python.org/3/glossary.html#term-path-like-object)
giving the pathname of the file to be opened.
mode: one of `r`, `w`, `a`, `r+`, `w+`, `a+`. Append `b` for bytes mode.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
self._binary_mode = "b" in mode
mode = mode.replace("b", "")
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
self._read_buf = _pywrap_file_io.BufferedInputStream(
compat.path_to_str(self.__name), 1024 * 512)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
self._writable_file = _pywrap_file_io.WritableFile(
compat.path_to_bytes(self.__name), compat.as_bytes(self.__mode))
def _prepare_value(self, val):
if self._binary_mode:
return compat.as_bytes(val)
else:
return compat.as_str_any(val)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
self._writable_file.append(compat.as_bytes(file_content))
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read `n` bytes if `n != -1`. If `n = -1`, reads to end of file.
Returns:
`n` bytes of the file (or whole file) in bytes mode or `n` bytes of the
string if in string (regular) mode.
"""
self._preread_check()
if n == -1:
length = self.size() - self.tell()
else:
length = n
return self._prepare_value(self._read_buf.read(length))
@deprecation.deprecated_args(
None, "position is deprecated in favor of the offset argument.",
"position")
def seek(self, offset=None, whence=0, position=None):
# TODO(jhseu): Delete later. Used to omit `position` from docs.
# pylint: disable=g-doc-args
"""Seeks to the offset in the file.
Args:
offset: The byte count relative to the whence argument.
whence: Valid values for whence are:
0: start of the file (default)
1: relative to the current position of the file
2: relative to the end of file. `offset` is usually negative.
"""
# pylint: enable=g-doc-args
self._preread_check()
# We needed to make offset a keyword argument for backwards-compatibility.
# This check exists so that we can convert back to having offset be a
# positional argument.
# TODO(jhseu): Make `offset` a positional argument after `position` is
# deleted.
if offset is None and position is None:
raise TypeError("seek(): offset argument required")
if offset is not None and position is not None:
raise TypeError("seek(): offset and position may not be set "
"simultaneously.")
if position is not None:
offset = position
if whence == 0:
pass
elif whence == 1:
offset += self.tell()
elif whence == 2:
offset += self.size()
else:
raise errors.InvalidArgumentError(
None, None,
"Invalid whence argument: {}. Valid values are 0, 1, or 2.".format(
whence))
self._read_buf.seek(offset)
def readline(self):
r"""Reads the next line, keeping \n. At EOF, returns ''."""
self._preread_check()
return self._prepare_value(self._read_buf.readline())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
if self._read_check_passed:
self._preread_check()
return self._read_buf.tell()
else:
self._prewrite_check()
return self._writable_file.tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def __next__(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def next(self):
return self.__next__()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
self._writable_file.flush()
def close(self):
"""Closes FileIO. Should be called for the WritableFile to be flushed."""
self._read_buf = None
if self._writable_file:
self._writable_file.close()
self._writable_file = None
def seekable(self):
"""Returns True as FileIO supports random access ops of seek()/tell()"""
return True
@tf_export(v1=["gfile.Exists"])
def file_exists(filename):
"""Determines whether a path exists or not.
Args:
filename: string, a path
Returns:
True if the path exists, whether it's a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
return file_exists_v2(filename)
@tf_export("io.gfile.exists")
def file_exists_v2(path):
"""Determines whether a path exists or not.
Args:
path: string, a path
Returns:
True if the path exists, whether it's a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
try:
_pywrap_file_io.FileExists(compat.path_to_bytes(path))
except errors.NotFoundError:
return False
return True
@tf_export(v1=["gfile.Remove"])
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
`NotFoundError` if the file does not exist.
"""
delete_file_v2(filename)
@tf_export("io.gfile.remove")
def delete_file_v2(path):
"""Deletes the path located at 'path'.
Args:
path: string, a path
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
`NotFoundError` if the path does not exist.
"""
_pywrap_file_io.DeleteFile(compat.path_to_bytes(path))
def read_file_to_string(filename, binary_mode=False):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
binary_mode: whether to open the file in binary mode or not. This changes
the type of the object returned.
Returns:
contents of the file as a string or bytes.
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
`NotFoundError` etc.
"""
if binary_mode:
f = FileIO(filename, mode="rb")
else:
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
@tf_export(v1=["gfile.Glob"])
def get_matching_files(filename):
"""Returns a list of files that match the given pattern(s).
Args:
filename: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
* errors.OpError: If there are filesystem / directory listing errors.
* errors.NotFoundError: If pattern to be matched is an invalid directory.
"""
return get_matching_files_v2(filename)
@tf_export("io.gfile.glob")
def get_matching_files_v2(pattern):
r"""Returns a list of files that match the given pattern(s).
The patterns are defined as strings. Supported patterns are defined
here. Note that the pattern can be a Python iteratable of string patterns.
The format definition of the pattern is:
**pattern**: `{ term }`
**term**:
* `'*'`: matches any sequence of non-'/' characters
* `'?'`: matches a single non-'/' character
* `'[' [ '^' ] { match-list } ']'`: matches any single
character (not) on the list
* `c`: matches character `c` where `c != '*', '?', '\\', '['`
* `'\\' c`: matches character `c`
**character range**:
* `c`: matches character `c` while `c != '\\', '-', ']'`
* `'\\' c`: matches character `c`
* `lo '-' hi`: matches character `c` for `lo <= c <= hi`
Examples:
>>> tf.io.gfile.glob("*.py")
... # For example, ['__init__.py']
>>> tf.io.gfile.glob("__init__.??")
... # As above
>>> files = {"*.py"}
>>> the_iterator = iter(files)
>>> tf.io.gfile.glob(the_iterator)
... # As above
See the C++ function `GetMatchingPaths` in
[`core/platform/file_system.h`]
(../../../core/platform/file_system.h)
for implementation details.
Args:
pattern: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
errors.OpError: If there are filesystem / directory listing errors.
errors.NotFoundError: If pattern to be matched is an invalid directory.
"""
if isinstance(pattern, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in _pywrap_file_io.GetMatchingFiles(
compat.as_bytes(pattern))
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename) # pylint: disable=g-complex-comprehension
for single_filename in pattern
for matching_filename in _pywrap_file_io.GetMatchingFiles(
compat.as_bytes(single_filename))
]
@tf_export(v1=["gfile.MkDir"])
def create_dir(dirname):
"""Creates a directory with the name `dirname`.
Args:
dirname: string, name of the directory to be created
Notes: The parent directories need to exist. Use `tf.io.gfile.makedirs`
instead if there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
create_dir_v2(dirname)
@tf_export("io.gfile.mkdir")
def create_dir_v2(path):
"""Creates a directory with the name given by `path`.
Args:
path: string, name of the directory to be created
Notes: The parent directories need to exist. Use `tf.io.gfile.makedirs`
instead if there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.CreateDir(compat.path_to_bytes(path))
@tf_export(v1=["gfile.MakeDirs"])
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
It succeeds if dirname already exists and is writable.
Args:
dirname: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
recursive_create_dir_v2(dirname)
@tf_export("io.gfile.makedirs")
def recursive_create_dir_v2(path):
"""Creates a directory and all parent/intermediate directories.
It succeeds if path already exists and is writable.
Args:
path: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.RecursivelyCreateDir(compat.path_to_bytes(path))
@tf_export(v1=["gfile.Copy"])
def copy(oldpath, newpath, overwrite=False):
"""Copies data from `oldpath` to `newpath`.
Args:
oldpath: string, name of the file who's contents need to be copied
newpath: string, name of the file to which to copy to
overwrite: boolean, if false it's an error for `newpath` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails.
"""
copy_v2(oldpath, newpath, overwrite)
@tf_export("io.gfile.copy")
def copy_v2(src, dst, overwrite=False):
"""Copies data from `src` to `dst`.
Args:
src: string, name of the file whose contents need to be copied
dst: string, name of the file to which to copy to
overwrite: boolean, if false it's an error for `dst` to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.CopyFile(
compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)
@tf_export(v1=["gfile.Rename"])
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `newname` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails.
"""
rename_v2(oldname, newname, overwrite)
@tf_export("io.gfile.rename")
def rename_v2(src, dst, overwrite=False):
"""Rename or move a file / directory.
Args:
src: string, pathname for a file
dst: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `dst` to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.RenameFile(
compat.path_to_bytes(src), compat.path_to_bytes(dst), overwrite)
def atomic_write_string_to_file(filename, contents, overwrite=True):
"""Writes to `filename` atomically.
This means that when `filename` appears in the filesystem, it will contain
all of `contents`. With write_string_to_file, it is possible for the file
to appear in the filesystem with `contents` only partially written.
Accomplished by writing to a temp file and then renaming it.
Args:
filename: string, pathname for a file
contents: string, contents that need to be written to the file
overwrite: boolean, if false it's an error for `filename` to be occupied by
an existing file.
"""
if not has_atomic_move(filename):
write_string_to_file(filename, contents)
else:
temp_pathname = filename + ".tmp" + uuid.uuid4().hex
write_string_to_file(temp_pathname, contents)
try:
rename(temp_pathname, filename, overwrite)
except errors.OpError:
delete_file(temp_pathname)
raise
@tf_export(v1=["gfile.DeleteRecursively"])
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails.
"""
delete_recursively_v2(dirname)
@tf_export("io.gfile.rmtree")
def delete_recursively_v2(path):
"""Deletes everything under path recursively.
Args:
path: string, a path
Raises:
errors.OpError: If the operation fails.
"""
_pywrap_file_io.DeleteRecursively(compat.path_to_bytes(path))
@tf_export(v1=["gfile.IsDirectory"])
def is_directory(dirname):
"""Returns whether the path is a directory or not.
Args:
dirname: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
return is_directory_v2(dirname)
@tf_export("io.gfile.isdir")
def is_directory_v2(path):
"""Returns whether the path is a directory or not.
Args:
path: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
try:
return _pywrap_file_io.IsDirectory(compat.path_to_bytes(path))
except errors.OpError:
return False
def has_atomic_move(path):
"""Checks whether the file system supports atomic moves.
Returns whether or not the file system of the given path supports the atomic
move operation for a file or folder. If atomic move is supported, it is
recommended to use a temp location for writing and then move to the final
location.
Args:
path: string, path to a file
Returns:
True, if the path is on a file system that supports atomic move
False, if the file system does not support atomic move. In such cases
we need to be careful about using moves. In some cases it is safer
not to use temporary locations in this case.
"""
try:
return _pywrap_file_io.HasAtomicMove(compat.path_to_bytes(path))
except errors.OpError:
# defaults to True
return True
@tf_export(v1=["gfile.ListDirectory"])
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
dirname: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
return list_directory_v2(dirname)
@tf_export("io.gfile.listdir")
def list_directory_v2(path):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
path: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
if not is_directory(path):
raise errors.NotFoundError(
node_def=None,
op=None,
message="Could not find directory {}".format(path))
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [
compat.as_str_any(filename)
for filename in _pywrap_file_io.GetChildren(compat.path_to_bytes(path))
]
@tf_export(v1=["gfile.Walk"])
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
in_order: bool, Traverse in order if True, post order if False. Errors that
happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files. That is, each yield looks like:
`(dirname, [subdirname, subdirname, ...], [filename, filename, ...])`.
Each item is a string.
"""
return walk_v2(top, in_order)
@tf_export("io.gfile.walk")
def walk_v2(top, topdown=True, onerror=None):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
topdown: bool, Traverse pre order if True, post order if False.
onerror: optional handler for errors. Should be a function, it will be
called with the error as argument. Rethrowing the error aborts the walk.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files. That is, each yield looks like:
`(dirname, [subdirname, subdirname, ...], [filename, filename, ...])`.
Each item is a string.
"""
def _make_full_path(parent, item):
# Since `os.path.join` discards paths before one that starts with the path
# separator (https://docs.python.org/3/library/os.path.html#os.path.join),
# we have to manually handle that case as `/` is a valid character on GCS.
if item[0] == os.sep:
return "".join([os.path.join(parent, ""), item])
return os.path.join(parent, item)
top = compat.as_str_any(compat.path_to_str(top))
try:
listing = list_directory(top)
except errors.NotFoundError as err:
if onerror:
onerror(err)
else:
return
files = []
subdirs = []
for item in listing:
full_path = _make_full_path(top, item)
if is_directory(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if topdown:
yield here
for subdir in subdirs:
for subitem in walk_v2(
_make_full_path(top, subdir), topdown, onerror=onerror):
yield subitem
if not topdown:
yield here
@tf_export(v1=["gfile.Stat"])
def stat(filename):
"""Returns file statistics for a given path.
Args:
filename: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
return stat_v2(filename)
@tf_export("io.gfile.stat")
def stat_v2(path):
"""Returns file statistics for a given path.
Args:
path: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
return _pywrap_file_io.Stat(compat.path_to_str(path))
def filecmp(filename_a, filename_b):
"""Compare two files, returning True if they are the same, False otherwise.
We check size first and return False quickly if the files are different sizes.
If they are the same size, we continue to generating a crc for the whole file.
You might wonder: why not use Python's `filecmp.cmp()` instead? The answer is
that the builtin library is not robust to the many different filesystems
TensorFlow runs on, and so we here perform a similar comparison with
the more robust FileIO.
Args:
filename_a: string path to the first file.
filename_b: string path to the second file.
Returns:
True if the files are the same, False otherwise.
"""
size_a = FileIO(filename_a, "rb").size()
size_b = FileIO(filename_b, "rb").size()
if size_a != size_b:
return False
# Size is the same. Do a full check.
crc_a = file_crc32(filename_a)
crc_b = file_crc32(filename_b)
return crc_a == crc_b
def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE):
"""Get the crc32 of the passed file.
The crc32 of a file can be used for error checking; two files with the same
crc32 are considered equivalent. Note that the entire file must be read
to produce the crc32.
Args:
filename: string, path to a file
block_size: Integer, process the files by reading blocks of `block_size`
bytes. Use -1 to read the file as once.
Returns:
hexadecimal as string, the crc32 of the passed file.
"""
crc = 0
with FileIO(filename, mode="rb") as f:
chunk = f.read(n=block_size)
while chunk:
crc = binascii.crc32(chunk, crc)
chunk = f.read(n=block_size)
return hex(crc & 0xFFFFFFFF)
|
sserrot/champion_relationships
|
refs/heads/master
|
venv/Lib/site-packages/nbconvert/tests/test_nbconvertapp.py
|
1
|
# -*- coding: utf-8 -*-
"""Test NbConvertApp"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import io
from .base import TestsBase
from ..postprocessors import PostProcessorBase
from ..tests.utils import onlyif_cmds_exist
from nbconvert import nbconvertapp
from nbconvert.exporters import Exporter
from traitlets.tests.utils import check_help_all_output
from testpath import tempdir
import pytest
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DummyPost(PostProcessorBase):
def postprocess(self, filename):
print("Dummy:%s" % filename)
class TestNbConvertApp(TestsBase):
"""Collection of NbConvertApp tests"""
def test_notebook_help(self):
"""Will help show if no notebooks are specified?"""
with self.create_temp_cwd():
out, err = self.nbconvert('--log-level 0', ignore_return_code=True)
self.assertIn("--help-all", out)
def test_help_output(self):
"""ipython nbconvert --help-all works"""
check_help_all_output('nbconvert')
def test_glob(self):
"""
Do search patterns work for notebook names?
"""
with self.create_temp_cwd(['notebook*.ipynb']):
self.nbconvert('--to python *.ipynb --log-level 0')
assert os.path.isfile('notebook1.py')
assert os.path.isfile('notebook2.py')
def test_glob_subdir(self):
"""
Do search patterns work for subdirectory notebook names?
"""
with self.create_temp_cwd():
self.copy_files_to(['notebook*.ipynb'], 'subdir/')
self.nbconvert('--to python --log-level 0 ' +
os.path.join('subdir', '*.ipynb'))
assert os.path.isfile(os.path.join('subdir', 'notebook1.py'))
assert os.path.isfile(os.path.join('subdir', 'notebook2.py'))
def test_build_dir(self):
"""build_directory affects export location"""
with self.create_temp_cwd():
self.copy_files_to(['notebook*.ipynb'], 'subdir/')
self.nbconvert('--to python --log-level 0 --output-dir . ' +
os.path.join('subdir', '*.ipynb'))
assert os.path.isfile('notebook1.py')
assert os.path.isfile('notebook2.py')
def test_convert_full_qualified_name(self):
"""
Test that nbconvert can convert file using a full qualified name for a
package, import and use it.
"""
with self.create_temp_cwd():
self.copy_files_to(['notebook*.ipynb'], 'subdir')
self.nbconvert('--to nbconvert.tests.fake_exporters.MyExporter --log-level 0 ' +
os.path.join('subdir', '*.ipynb'))
assert os.path.isfile(os.path.join('subdir', 'notebook1.test_ext'))
assert os.path.isfile(os.path.join('subdir', 'notebook2.test_ext'))
def test_explicit(self):
"""
Do explicit notebook names work?
"""
with self.create_temp_cwd(['notebook*.ipynb']):
self.nbconvert('--log-level 0 --to python notebook2')
assert not os.path.isfile('notebook1.py')
assert os.path.isfile('notebook2.py')
def test_absolute_template_file(self):
"""--template '/path/to/template.tpl'"""
with self.create_temp_cwd(['notebook*.ipynb']), tempdir.TemporaryDirectory() as td:
template = os.path.join(td, 'mytemplate.tpl')
test_output = 'success!'
with open(template, 'w') as f:
f.write(test_output)
self.nbconvert('--log-level 0 notebook2 --template %s' % template)
assert os.path.isfile('notebook2.html')
with open('notebook2.html') as f:
text = f.read()
assert text == test_output
def test_relative_template_file(self):
"""Test --template 'relative/path.tpl'"""
with self.create_temp_cwd(['notebook*.ipynb']):
os.mkdir('relative')
template = os.path.join('relative', 'path.tpl')
test_output = 'success!'
with open(template, 'w') as f:
f.write(test_output)
self.nbconvert('--log-level 0 notebook2 --template %s' % template)
assert os.path.isfile('notebook2.html')
with open('notebook2.html') as f:
text = f.read()
assert text == test_output
@onlyif_cmds_exist('pandoc', 'xelatex')
def test_filename_spaces(self):
"""
Generate PDFs with graphics if notebooks have spaces in the name?
"""
with self.create_temp_cwd(['notebook2.ipynb']):
os.rename('notebook2.ipynb', 'notebook with spaces.ipynb')
self.nbconvert('--log-level 0 --to pdf'
' "notebook with spaces"'
' --PDFExporter.latex_count=1'
' --PDFExporter.verbose=True'
)
assert os.path.isfile('notebook with spaces.pdf')
@onlyif_cmds_exist('pandoc', 'xelatex')
def test_pdf(self):
"""
Check to see if pdfs compile, even if strikethroughs are included.
"""
with self.create_temp_cwd(['notebook2.ipynb']):
self.nbconvert('--log-level 0 --to pdf'
' "notebook2"'
' --PDFExporter.latex_count=1'
' --PDFExporter.verbose=True'
)
assert os.path.isfile('notebook2.pdf')
def test_post_processor(self):
"""Do post processors work?"""
with self.create_temp_cwd(['notebook1.ipynb']):
out, err = self.nbconvert('--log-level 0 --to python notebook1 '
'--post nbconvert.tests.test_nbconvertapp.DummyPost')
self.assertIn('Dummy:notebook1.py', out)
@onlyif_cmds_exist('pandoc')
def test_spurious_cr(self):
"""Check for extra CR characters"""
with self.create_temp_cwd(['notebook2.ipynb']):
self.nbconvert('--log-level 0 --to latex notebook2')
assert os.path.isfile('notebook2.tex')
with open('notebook2.tex') as f:
tex = f.read()
self.nbconvert('--log-level 0 --to html notebook2')
assert os.path.isfile('notebook2.html')
with open('notebook2.html') as f:
html = f.read()
self.assertEqual(tex.count('\r'), tex.count('\r\n'))
self.assertEqual(html.count('\r'), html.count('\r\n'))
@onlyif_cmds_exist('pandoc')
def test_png_base64_html_ok(self):
"""Is embedded png data well formed in HTML?"""
with self.create_temp_cwd(['notebook2.ipynb']):
self.nbconvert('--log-level 0 --to HTML '
'notebook2.ipynb --template full')
assert os.path.isfile('notebook2.html')
with open('notebook2.html') as f:
assert "data:image/png;base64,b'" not in f.read()
@onlyif_cmds_exist('pandoc')
def test_template(self):
"""
Do export templates work?
"""
with self.create_temp_cwd(['notebook2.ipynb']):
self.nbconvert('--log-level 0 --to slides '
'notebook2.ipynb')
assert os.path.isfile('notebook2.slides.html')
with open('notebook2.slides.html') as f:
assert '/reveal.css' in f.read()
def test_output_ext(self):
"""test --output=outputfile[.ext]"""
with self.create_temp_cwd(['notebook1.ipynb']):
self.nbconvert('--log-level 0 --to python '
'notebook1.ipynb --output nb.py')
assert os.path.exists('nb.py')
self.nbconvert('--log-level 0 --to python '
'notebook1.ipynb --output nb2')
assert os.path.exists('nb2.py')
def test_glob_explicit(self):
"""
Can a search pattern be used along with matching explicit notebook names?
"""
with self.create_temp_cwd(['notebook*.ipynb']):
self.nbconvert('--log-level 0 --to python '
'*.ipynb notebook1.ipynb notebook2.ipynb')
assert os.path.isfile('notebook1.py')
assert os.path.isfile('notebook2.py')
def test_explicit_glob(self):
"""
Can explicit notebook names be used and then a matching search pattern?
"""
with self.create_temp_cwd(['notebook*.ipynb']):
self.nbconvert('--log-level 0 --to=python '
'notebook1.ipynb notebook2.ipynb *.ipynb')
assert os.path.isfile('notebook1.py')
assert os.path.isfile('notebook2.py')
def test_default_config(self):
"""
Does the default config work?
"""
with self.create_temp_cwd(['notebook*.ipynb', 'jupyter_nbconvert_config.py']):
self.nbconvert('--log-level 0')
assert os.path.isfile('notebook1.py')
assert not os.path.isfile('notebook2.py')
def test_override_config(self):
"""
Can the default config be overridden?
"""
with self.create_temp_cwd(['notebook*.ipynb',
'jupyter_nbconvert_config.py',
'override.py']):
self.nbconvert('--log-level 0 --config="override.py"')
assert not os.path.isfile('notebook1.py')
assert os.path.isfile('notebook2.py')
def test_accents_in_filename(self):
"""
Can notebook names include accents?
"""
with self.create_temp_cwd():
self.create_empty_notebook(u'nb1_análisis.ipynb')
self.nbconvert('--log-level 0 --to Python nb1_*')
assert os.path.isfile(u'nb1_análisis.py')
@onlyif_cmds_exist('xelatex', 'pandoc')
def test_filename_accent_pdf(self):
"""
Generate PDFs if notebooks have an accent in their name?
"""
with self.create_temp_cwd():
self.create_empty_notebook(u'nb1_análisis.ipynb')
self.nbconvert('--log-level 0 --to pdf "nb1_*"'
' --PDFExporter.latex_count=1'
' --PDFExporter.verbose=True')
assert os.path.isfile(u'nb1_análisis.pdf')
def test_cwd_plugin(self):
"""
Verify that an extension in the cwd can be imported.
"""
with self.create_temp_cwd(['hello.py']):
self.create_empty_notebook(u'empty.ipynb')
self.nbconvert('empty --to html --NbConvertApp.writer_class=\'hello.HelloWriter\'')
assert os.path.isfile(u'hello.txt')
def test_output_suffix(self):
"""
Verify that the output suffix is applied
"""
with self.create_temp_cwd():
self.create_empty_notebook('empty.ipynb')
self.nbconvert('empty.ipynb --to notebook')
assert os.path.isfile('empty.nbconvert.ipynb')
def test_different_build_dir(self):
"""
Verify that the output suffix is not applied
"""
with self.create_temp_cwd():
self.create_empty_notebook('empty.ipynb')
os.mkdir('output')
self.nbconvert(
'empty.ipynb --to notebook '
'--FilesWriter.build_directory=output')
assert os.path.isfile('output/empty.ipynb')
def test_inplace(self):
"""
Verify that the notebook is converted in place
"""
with self.create_temp_cwd():
self.create_empty_notebook('empty.ipynb')
self.nbconvert('empty.ipynb --inplace')
assert os.path.isfile('empty.ipynb')
assert not os.path.isfile('empty.nbconvert.ipynb')
assert not os.path.isfile('empty.html')
def test_no_prompt(self):
"""
Verify that the html has no prompts when given --no-prompt.
"""
with self.create_temp_cwd(["notebook1.ipynb"]):
self.nbconvert('notebook1.ipynb --log-level 0 --no-prompt --to html')
assert os.path.isfile('notebook1.html')
with open("notebook1.html",'r') as f:
text = f.read()
assert "In [" not in text
assert "Out[" not in text
self.nbconvert('notebook1.ipynb --log-level 0 --to html')
assert os.path.isfile('notebook1.html')
with open("notebook1.html",'r') as f:
text2 = f.read()
assert "In [" in text2
assert "Out[" in text2
def test_cell_tag_output(self):
"""
Verify that the html has tags in cell attributes if they exist.
"""
with self.create_temp_cwd(["notebook_tags.ipynb"]):
self.nbconvert('notebook_tags.ipynb --log-level 0 --to html')
assert os.path.isfile('notebook_tags.html')
with open("notebook_tags.html",'r') as f:
text = f.read()
assert 'code_cell rendered celltag_mycelltag celltag_mysecondcelltag">' in text
assert 'code_cell rendered">' in text
assert 'text_cell rendered celltag_mymarkdowncelltag">' in text
assert 'text_cell rendered">' in text
def test_no_input(self):
"""
Verify that the html has no input when given --no-input.
"""
with self.create_temp_cwd(["notebook1.ipynb"]):
self.nbconvert('notebook1.ipynb --log-level 0 --no-input --to html')
assert os.path.isfile('notebook1.html')
with open("notebook1.html",'r') as f:
text = f.read()
assert "In [" not in text
assert "Out[" not in text
assert ('<span class="n">x</span>'
'<span class="p">,</span>'
'<span class="n">y</span>'
'<span class="p">,</span>'
'<span class="n">z</span> '
'<span class="o">=</span> '
'<span class="n">symbols</span>'
'<span class="p">(</span>'
'<span class="s1">'x y z'</span>'
'<span class="p">)</span>') not in text
self.nbconvert('notebook1.ipynb --log-level 0 --to html')
assert os.path.isfile('notebook1.html')
with open("notebook1.html",'r') as f:
text2 = f.read()
assert "In [" in text2
assert "Out[" in text2
assert ('<span class="n">x</span>'
'<span class="p">,</span>'
'<span class="n">y</span>'
'<span class="p">,</span>'
'<span class="n">z</span> '
'<span class="o">=</span> '
'<span class="n">symbols</span>'
'<span class="p">(</span>'
'<span class="s1">'x y z'</span>'
'<span class="p">)</span>') in text2
def test_allow_errors(self):
"""
Verify that conversion is aborted with '--execute' if an error is
encountered, but that conversion continues if '--allow-errors' is
used in addition.
"""
with self.create_temp_cwd(['notebook3*.ipynb']):
# Convert notebook containing a cell that raises an error,
# both without and with cell execution enabled.
output1, _ = self.nbconvert('--to markdown --stdout notebook3*.ipynb') # no cell execution
output2, _ = self.nbconvert('--to markdown --allow-errors --stdout notebook3*.ipynb') # no cell execution; --allow-errors should have no effect
output3, _ = self.nbconvert('--execute --allow-errors --to markdown --stdout notebook3*.ipynb') # with cell execution; errors are allowed
# Un-executed outputs should not contain either
# of the two numbers computed in the notebook.
assert '23' not in output1
assert '42' not in output1
assert '23' not in output2
assert '42' not in output2
# Executed output should contain both numbers.
assert '23' in output3
assert '42' in output3
# Executing the notebook should raise an exception if --allow-errors is not specified
with pytest.raises(OSError):
self.nbconvert('--execute --to markdown --stdout notebook3*.ipynb')
def test_errors_print_traceback(self):
"""
Verify that the stderr output contains the traceback of the cell execution exception.
"""
with self.create_temp_cwd(['notebook3_with_errors.ipynb']):
_, error_output = self.nbconvert('--execute --to markdown --stdout notebook3_with_errors.ipynb',
ignore_return_code=True)
assert 'print("Some text before the error")' in error_output
assert 'raise RuntimeError("This is a deliberate exception")' in error_output
assert 'RuntimeError: This is a deliberate exception' in error_output
def test_fenced_code_blocks_markdown(self):
"""
Verify that input cells use fenced code blocks with the language
name in nb.metadata.kernelspec.language, if that exists
"""
with self.create_temp_cwd(["notebook1*.ipynb"]):
# this notebook doesn't have nb.metadata.kernelspec, so it should
# just do a fenced code block, with no language
output1, _ = self.nbconvert('--to markdown --stdout notebook1.ipynb')
assert '```python' not in output1 # shouldn't have language
assert "```" in output1 # but should have fenced blocks
with self.create_temp_cwd(["notebook_jl*.ipynb"]):
output2, _ = self.nbconvert('--to markdown --stdout notebook_jl.ipynb')
assert '```julia' in output2 # shouldn't have language
assert "```" in output2 # but should also plain ``` to close cell
def test_convert_from_stdin_to_stdout(self):
"""
Verify that conversion can be done via stdin to stdout
"""
with self.create_temp_cwd(["notebook1.ipynb"]):
with io.open('notebook1.ipynb') as f:
notebook = f.read().encode()
output1, _ = self.nbconvert('--to markdown --stdin --stdout', stdin=notebook)
assert '```python' not in output1 # shouldn't have language
assert "```" in output1 # but should have fenced blocks
def test_convert_from_stdin(self):
"""
Verify that conversion can be done via stdin.
"""
with self.create_temp_cwd(["notebook1.ipynb"]):
with io.open('notebook1.ipynb') as f:
notebook = f.read().encode()
self.nbconvert('--to markdown --stdin', stdin=notebook)
assert os.path.isfile("notebook.md") # default name for stdin input
with io.open('notebook.md') as f:
output1 = f.read()
assert '```python' not in output1 # shouldn't have language
assert "```" in output1 # but should have fenced blocks
@onlyif_cmds_exist('pandoc', 'xelatex')
def test_linked_images(self):
"""
Generate PDFs with an image linked in a markdown cell
"""
with self.create_temp_cwd(['latex-linked-image.ipynb', 'testimage.png']):
self.nbconvert('--to pdf latex-linked-image.ipynb')
assert os.path.isfile('latex-linked-image.pdf')
@onlyif_cmds_exist('pandoc')
def test_embedded_jpeg(self):
"""
Verify that latex conversion succeeds
with a notebook with an embedded .jpeg
"""
with self.create_temp_cwd(['notebook4_jpeg.ipynb',
'containerized_deployments.jpeg']):
self.nbconvert('--to latex notebook4_jpeg.ipynb')
assert os.path.isfile('notebook4_jpeg.tex')
@onlyif_cmds_exist('pandoc')
def test_markdown_display_priority(self):
"""
Check to see if markdown conversion embeds PNGs,
even if an (unsupported) PDF is present.
"""
with self.create_temp_cwd(['markdown_display_priority.ipynb']):
self.nbconvert('--log-level 0 --to markdown '
'"markdown_display_priority.ipynb"')
assert os.path.isfile('markdown_display_priority.md')
with io.open('markdown_display_priority.md') as f:
markdown_output = f.read()
assert ("markdown_display_priority_files/"
"markdown_display_priority_0_1.png") in markdown_output
@onlyif_cmds_exist('pandoc')
def test_write_figures_to_custom_path(self):
"""
Check if figure files are copied to configured path.
"""
def fig_exists(path):
return (len(os.listdir(path)) > 0)
# check absolute path
with self.create_temp_cwd(['notebook4_jpeg.ipynb',
'containerized_deployments.jpeg']):
output_dir = tempdir.TemporaryDirectory()
path = os.path.join(output_dir.name, 'files')
self.nbconvert(
'--log-level 0 notebook4_jpeg.ipynb --to rst '
'--NbConvertApp.output_files_dir={}'
.format(path))
assert fig_exists(path)
output_dir.cleanup()
# check relative path
with self.create_temp_cwd(['notebook4_jpeg.ipynb',
'containerized_deployments.jpeg']):
self.nbconvert(
'--log-level 0 notebook4_jpeg.ipynb --to rst '
'--NbConvertApp.output_files_dir=output')
assert fig_exists('output')
# check default path with notebook name
with self.create_temp_cwd(['notebook4_jpeg.ipynb',
'containerized_deployments.jpeg']):
self.nbconvert(
'--log-level 0 notebook4_jpeg.ipynb --to rst')
assert fig_exists('notebook4_jpeg_files')
|
DDShadoww/grab
|
refs/heads/master
|
grab/response.py
|
12
|
# Back-ward compatibility
from grab.document import * # noqa
from grab.document import Document as Response # noqa
|
gojira/tensorflow
|
refs/heads/master
|
tensorflow/contrib/deprecated/__init__.py
|
67
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core alias for the deprecated tf.X_summary ops.
For TensorFlow 1.0, we have reorganized the TensorFlow summary ops into a
submodule, and made some semantic tweaks. The first thing to note is that we
moved the APIs around as follows:
```python
tf.scalar_summary -> tf.summary.scalar
tf.histogram_summary -> tf.summary.histogram
tf.audio_summary -> tf.summary.audio
tf.image_summary -> tf.summary.image
tf.merge_summary -> tf.summary.merge
tf.merge_all_summaries -> tf.summary.merge_all
```
We think this API is cleaner and will improve long-term discoverability and
clarity of the TensorFlow API. But we also took the opportunity to make an
important change to how summary "tags" work. The "tag" of a summary is the
string that is associated with the output data, i.e. the key for organizing the
generated protobufs.
Previously, the tag was allowed to be any unique string; it had no relation
to the summary op generating it, and no relation to the TensorFlow name system.
This behavior made it very difficult to write reusable that would add
summary ops to the graph. If you had a function to add summary ops, you would
need to pass in a `tf.name_scope`, manually, to that function to create
deduplicated tags. Otherwise your program would fail with a runtime error due
to tag collision.
The new summary APIs under `tf.summary` throw away the "tag" as an independent
concept; instead, the first argument is the node name. So summary tags now
automatically inherit the surrounding `tf.name_scope`, and automatically
are deduplicated if there is a conflict. Now however, the only allowed
characters are alphanumerics, underscores, and forward slashes. To make
migration easier, the new APIs automatically convert illegal characters to
underscores.
Just as an example, consider the following "before" and "after" code snippets:
```python
# Before
def add_activation_summaries(v, scope):
tf.scalar_summary("%s/fraction_of_zero" % scope, tf.nn.fraction_of_zero(v))
tf.histogram_summary("%s/activations" % scope, v)
# After
def add_activation_summaries(v):
tf.summary.scalar("fraction_of_zero", tf.nn.fraction_of_zero(v))
tf.summary.histogram("activations", v)
```
Now, so long as the add_activation_summaries function is called from within the
right `tf.name_scope`, the behavior is the same.
Because this change does modify the behavior and could break tests, we can't
automatically migrate usage to the new APIs. That is why we are making the old
APIs temporarily available here at `tf.contrib.deprecated`.
In addition to the name change described above, there are two further changes
to the new summary ops:
- the "max_images" argument for `tf.image_summary` was renamed to "max_outputs
for `tf.summary.image`
- `tf.scalar_summary` accepted arbitrary tensors of tags and values. But
`tf.summary.scalar` requires a single scalar name and scalar value. In most
cases, you can create `tf.summary.scalar` in a loop to get the same behavior
As before, TensorBoard groups charts by the top-level `tf.name_scope` which may
be inconvenient, for in the new summary ops, the summary will inherit that
`tf.name_scope` without user control. We plan to add more grouping mechanisms
to TensorBoard, so it will be possible to specify the TensorBoard group for
each summary via the summary API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.logging_ops import audio_summary
from tensorflow.python.ops.logging_ops import histogram_summary
from tensorflow.python.ops.logging_ops import image_summary
from tensorflow.python.ops.logging_ops import merge_all_summaries
from tensorflow.python.ops.logging_ops import merge_summary
from tensorflow.python.ops.logging_ops import scalar_summary
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long
_allowed_symbols = ['audio_summary', 'histogram_summary',
'image_summary', 'merge_all_summaries',
'merge_summary', 'scalar_summary']
remove_undocumented(__name__, _allowed_symbols)
|
vitan/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex_multi_apps/__init__.py
|
12133432
| |
dhermes/google-cloud-python
|
refs/heads/master
|
redis/google/cloud/redis_v1beta1/gapic/__init__.py
|
12133432
| |
mcldev/geonode
|
refs/heads/master
|
geonode/layers/migrations/__init__.py
|
12133432
| |
piquadrat/django
|
refs/heads/master
|
django/core/management/base.py
|
21
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
import os
import sys
from argparse import ArgumentParser
from io import TextIOBase
import django
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.db import DEFAULT_DB_ALIAS, connections
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super().__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super().parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super().error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(TextIOBase):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(style_func(msg))
class BaseCommand:
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_migrations_checks``
A boolean; if ``True``, the command prints a warning if the set of
migrations on disk don't match the migrations in the database.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
``stealth_options``
A tuple of any options the command uses which aren't defined by the
argument parser.
"""
# Metadata about this command.
help = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_migrations_checks = False
requires_system_checks = True
# Arguments, common to all commands, which aren't defined by the argument
# parser.
base_stealth_options = ('skip_checks', 'stderr', 'stdout')
# Command-specific options not defined by the argument parser.
stealth_options = ()
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(
self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument(
'-v', '--verbosity', action='store', dest='verbosity', default=1,
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output',
)
parser.add_argument(
'--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument(
'--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".',
)
parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions')
parser.add_argument(
'--no-color', action='store_true', dest='no_color',
help="Don't colorize the command output.",
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
try:
connections.close_all()
except ImproperlyConfigured:
# Ignore if connections aren't setup at this point (e.g. no
# configured settings).
pass
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options['no_color']:
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options['stderr'], self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if self.requires_system_checks and not options.get('skip_checks'):
self.check()
if self.requires_migrations_checks:
self.check_migrations()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
output = '%s\n%s\n%s' % (
self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()),
output,
self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()),
)
self.stdout.write(output)
finally:
if saved_locale is not None:
translation.activate(saved_locale)
return output
def _run_checks(self, **kwargs):
return checks.run_checks(**kwargs)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False, fail_level=checks.ERROR):
"""
Use the system check framework to validate entire Django project.
Raise CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), print them to stderr
and don't raise an exception.
"""
all_issues = self._run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(str(e))
if e.is_serious()
else self.style.WARNING(str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def check_migrations(self):
"""
Print a warning if the set of migrations on disk don't match the
migrations in the database.
"""
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted({migration.app_label for migration, backwards in plan})
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unpplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s." % {
"unpplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
|
SerCeMan/intellij-community
|
refs/heads/master
|
python/testData/wrap/WrapInArgumentList.py
|
83
|
def foo(abracadabra1, abracadabra2, abracadabra3, abracadabra4, abracadabra5<caret>
|
denis-pitul/django
|
refs/heads/master
|
django/contrib/auth/__init__.py
|
387
|
import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(**credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
request.session.get(HASH_SESSION_KEY) != session_auth_hash):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = user.backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get(LANGUAGE_SESSION_KEY)
request.session.flush()
if language is not None:
request.session[LANGUAGE_SESSION_KEY] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if ('django.contrib.auth.middleware.SessionAuthenticationMiddleware'
in settings.MIDDLEWARE_CLASSES and hasattr(user, 'get_session_auth_hash')):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user if
django.contrib.auth.middleware.SessionAuthenticationMiddleware is enabled.
This function takes the current request and the updated user object from
which the new session hash will be derived and updates the session hash
appropriately to prevent a password change from logging out the session
from which the password was changed.
"""
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig'
|
sergio-incaser/odoo
|
refs/heads/8.0
|
openerp/report/print_xml.py
|
338
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import openerp
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval
import print_fnc
from openerp.osv.orm import BaseModel
class InheritDict(dict):
# Might be usefull when we're doing name lookup for call or eval.
def __init__(self, parent=None):
self.parent = parent
def __getitem__(self, name):
if name in self:
return super(InheritDict, self).__getitem__(name)
else:
if not self.parent:
raise KeyError
else:
return self.parent[name]
def tounicode(val):
if isinstance(val, str):
unicode_val = unicode(val, 'utf-8')
elif isinstance(val, unicode):
unicode_val = val
else:
unicode_val = unicode(val)
return unicode_val
class document(object):
def __init__(self, cr, uid, datas, func=False):
# create a new document
self.cr = cr
self.pool = openerp.registry(cr.dbname)
self.func = func or {}
self.datas = datas
self.uid = uid
self.bin_datas = {}
def node_attrs_get(self, node):
if len(node.attrib):
return node.attrib
return {}
def get_value(self, browser, field_path):
fields = field_path.split('.')
if not len(fields):
return ''
value = browser
for f in fields:
if isinstance(value, (BaseModel, list)):
if not value:
return ''
value = value[0]
value = value[f]
return value or ''
def get_value2(self, browser, field_path):
value = self.get_value(browser, field_path)
if isinstance(value, BaseModel):
return value.id
else:
return value
def eval(self, record, expr):
#TODO: support remote variables (eg address.title) in expr
# how to do that: parse the string, find dots, replace those dotted variables by temporary
# "simple ones", fetch the value of those variables and add them (temporarily) to the _data
# dictionary passed to eval
#FIXME: it wont work if the data hasn't been fetched yet... this could
# happen if the eval node is the first one using this Record
# the next line is a workaround for the problem: it causes the resource to be loaded
#Pinky: Why not this ? eval(expr, browser) ?
# name = browser.name
# data_dict = browser._data[self.get_value(browser, 'id')]
return safe_eval(expr, {}, {'obj': record})
def parse_node(self, node, parent, browser, datas=None):
attrs = self.node_attrs_get(node)
if 'type' in attrs:
if attrs['type']=='field':
value = self.get_value(browser, attrs['name'])
#TODO: test this
if value == '' and 'default' in attrs:
value = attrs['default']
el = etree.SubElement(parent, node.tag)
el.text = tounicode(value)
#TODO: test this
for key, value in attrs.iteritems():
if key not in ('type', 'name', 'default'):
el.set(key, value)
elif attrs['type']=='attachment':
model = browser._name
value = self.get_value(browser, attrs['name'])
ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model','=',model),('res_id','=',int(value))])
datas = self.pool['ir.attachment'].read(self.cr, self.uid, ids)
if len(datas):
# if there are several, pick first
datas = datas[0]
fname = str(datas['datas_fname'])
ext = fname.split('.')[-1].lower()
if ext in ('jpg','jpeg', 'png'):
import base64
from StringIO import StringIO
dt = base64.decodestring(datas['datas'])
fp = StringIO()
fp.write(dt)
i = str(len(self.bin_datas))
self.bin_datas[i] = fp
el = etree.SubElement(parent, node.tag)
el.text = i
elif attrs['type']=='data':
#TODO: test this
txt = self.datas.get('form', {}).get(attrs['name'], '')
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='function':
if attrs['name'] in self.func:
txt = self.func[attrs['name']](node)
else:
txt = print_fnc.print_fnc(attrs['name'], node)
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='eval':
value = self.eval(browser, attrs['expr'])
el = etree.SubElement(parent, node.tag)
el.text = str(value)
elif attrs['type']=='fields':
fields = attrs['name'].split(',')
vals = {}
for b in browser:
value = tuple([self.get_value2(b, f) for f in fields])
if not value in vals:
vals[value]=[]
vals[value].append(b)
keys = vals.keys()
keys.sort()
if 'order' in attrs and attrs['order']=='desc':
keys.reverse()
v_list = [vals[k] for k in keys]
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
elif attrs['type']=='call':
if len(attrs['args']):
#TODO: test this
# fetches the values of the variables which names where passed in the args attribute
args = [self.eval(browser, arg) for arg in attrs['args'].split(',')]
else:
args = []
# get the object
if 'model' in attrs:
obj = self.pool[attrs['model']]
else:
obj = browser # the record(set) is an instance of the model
# get the ids
if 'ids' in attrs:
ids = self.eval(browser, attrs['ids'])
else:
ids = browse.ids
# call the method itself
newdatas = getattr(obj, attrs['name'])(self.cr, self.uid, ids, *args)
def parse_result_tree(node, parent, datas):
if not node.tag == etree.Comment:
el = etree.SubElement(parent, node.tag)
atr = self.node_attrs_get(node)
if 'value' in atr:
if not isinstance(datas[atr['value']], (str, unicode)):
txt = str(datas[atr['value']])
else:
txt = datas[atr['value']]
el.text = txt
else:
for el_cld in node:
parse_result_tree(el_cld, el, datas)
if not isinstance(newdatas, (BaseModel, list)):
newdatas = [newdatas]
for newdata in newdatas:
parse_result_tree(node, parent, newdata)
elif attrs['type']=='zoom':
value = self.get_value(browser, attrs['name'])
if value:
if not isinstance(value, (BaseModel, list)):
v_list = [value]
else:
v_list = value
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
else:
# if there is no "type" attribute in the node, copy it to the xml data and parse its children
if not node.tag == etree.Comment:
if node.tag == parent.tag:
el = parent
else:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld,el, browser)
def xml_get(self):
return etree.tostring(self.doc,encoding="utf-8",xml_declaration=True,pretty_print=True)
def parse_tree(self, ids, model, context=None):
if not context:
context={}
browser = self.pool[model].browse(self.cr, self.uid, ids, context)
self.parse_node(self.dom, self.doc, browser)
def parse_string(self, xml, ids, model, context=None):
if not context:
context={}
# parses the xml template to memory
self.dom = etree.XML(xml)
# create the xml data from the xml template
self.parse_tree(ids, model, context)
def parse(self, filename, ids, model, context=None):
if not context:
context={}
# parses the xml template to memory
src_file = tools.file_open(filename)
try:
self.dom = etree.XML(src_file.read())
self.doc = etree.Element(self.dom.tag)
self.parse_tree(ids, model, context)
finally:
src_file.close()
def close(self):
self.doc = None
self.dom = None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
droidroidz/USCC_R970_kernel
|
refs/heads/master
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
sjsucohort6/openstack
|
refs/heads/master
|
python/venv/lib/python2.7/site-packages/openstack/tests/unit/orchestration/v1/test_stack.py
|
3
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from openstack.orchestration.v1 import stack
FAKE_ID = 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf'
FAKE_NAME = 'test_stack'
FAKE = {
'capabilities': '1',
'creation_time': '2',
'description': '3',
'disable_rollback': True,
'id': FAKE_ID,
'links': '6',
'notification_topics': '7',
'outputs': '8',
'parameters': {'OS::stack_id': '9'},
'name': FAKE_NAME,
'status': '11',
'status_reason': '12',
'template_description': '13',
'template_url': 'http://www.example.com/wordpress.yaml',
'timeout_mins': '14',
'updated_time': '15',
}
FAKE_CREATE_RESPONSE = {
'stack': {
'id': FAKE_ID,
'links': [{
'href': 'stacks/%s/%s' % (FAKE_NAME, FAKE_ID),
'rel': 'self'}]}
}
class TestStack(testtools.TestCase):
def test_basic(self):
sot = stack.Stack()
self.assertEqual('stack', sot.resource_key)
self.assertEqual('stacks', sot.resources_key)
self.assertEqual('/stacks', sot.base_path)
self.assertEqual('orchestration', sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_retrieve)
self.assertFalse(sot.allow_update)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = stack.Stack(FAKE)
self.assertEqual(FAKE['capabilities'], sot.capabilities)
self.assertEqual(FAKE['creation_time'], sot.created_at)
self.assertEqual(FAKE['description'], sot.description)
self.assertEqual(FAKE['disable_rollback'], sot.disable_rollback)
self.assertEqual(FAKE['id'], sot.id)
self.assertEqual(FAKE['links'], sot.links)
self.assertEqual(FAKE['notification_topics'],
sot.notification_topics)
self.assertEqual(FAKE['outputs'], sot.outputs)
self.assertEqual(FAKE['parameters'], sot.parameters)
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['status'], sot.status)
self.assertEqual(FAKE['status_reason'],
sot.status_reason)
self.assertEqual(FAKE['template_description'],
sot.template_description)
self.assertEqual(FAKE['template_url'],
sot.template_url)
self.assertEqual(FAKE['timeout_mins'], sot.timeout_mins)
self.assertEqual(FAKE['updated_time'], sot.updated_at)
def test_create(self):
resp = mock.MagicMock()
resp.body = FAKE_CREATE_RESPONSE
sess = mock.Mock()
sess.post = mock.MagicMock()
sess.post.return_value = resp
sot = stack.Stack(FAKE)
sot.create(sess)
url = '/stacks'
body = FAKE.copy()
body.pop('id')
body.pop('name')
sess.post.assert_called_with(url, service=sot.service, json=body)
self.assertEqual(FAKE_ID, sot.id)
self.assertEqual(FAKE_NAME, sot.name)
def test_check(self):
session_mock = mock.MagicMock()
sot = stack.Stack(FAKE)
sot._action = mock.MagicMock()
body = {'check': ''}
sot.check(session_mock)
sot._action.assert_called_with(session_mock, body)
|
h3biomed/ansible
|
refs/heads/h3
|
lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_facts.py
|
13
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqlserver_facts
version_added: "2.7"
short_description: Get Azure MySQL Server facts.
description:
- Get facts of MySQL Server.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
name:
description:
- The name of the server.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of MySQL Server
azure_rm_mysqlserver_facts:
resource_group: myResourceGroup
name: server_name
- name: List instances of MySQL Server
azure_rm_mysqlserver_facts:
resource_group: myResourceGroup
'''
RETURN = '''
servers:
description: A list of dictionaries containing facts for MySQL servers.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/myabdud1223
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: myResourceGroup
name:
description:
- Resource name.
returned: always
type: str
sample: myabdud1223
location:
description:
- The location the resource resides in.
returned: always
type: str
sample: eastus
sku:
description:
- The SKU of the server.
returned: always
type: complex
contains:
name:
description:
- The name of the SKU
returned: always
type: str
sample: GP_Gen4_2
tier:
description:
- The tier of the particular SKU
returned: always
type: str
sample: GeneralPurpose
capacity:
description:
- The scale capacity.
returned: always
type: int
sample: 2
storage_mb:
description:
- The maximum storage allowed for a server.
returned: always
type: int
sample: 128000
enforce_ssl:
description:
- Enable SSL enforcement.
returned: always
type: bool
sample: False
admin_username:
description:
- "The administrator's login name of a server."
returned: always
type: str
sample: serveradmin
version:
description:
- Server version.
returned: always
type: str
sample: "9.6"
user_visible_state:
description:
- A state of a server that is visible to user.
returned: always
type: str
sample: Ready
fully_qualified_domain_name:
description:
- The fully qualified domain name of a server.
returned: always
type: str
sample: myabdud1223.mys.database.azure.com
tags:
description: Tags assigned to the resource. Dictionary of string:string pairs.
type: dict
sample: { tag1: abc }
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMySqlServerFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str'
),
tags=dict(
type='list'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.name = None
self.tags = None
super(AzureRMMySqlServerFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.name is not None):
self.results['servers'] = self.get()
elif (self.resource_group is not None):
self.results['servers'] = self.list_by_resource_group()
return self.results
def get(self):
response = None
results = []
try:
response = self.mysql_client.servers.get(resource_group_name=self.resource_group,
server_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for MySQL Server.')
if response and self.has_tags(response.tags, self.tags):
results.append(self.format_item(response))
return results
def list_by_resource_group(self):
response = None
results = []
try:
response = self.mysql_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for MySQL Servers.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'id': d['id'],
'resource_group': self.resource_group,
'name': d['name'],
'sku': d['sku'],
'location': d['location'],
'storage_mb': d['storage_profile']['storage_mb'],
'version': d['version'],
'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
'admin_username': d['administrator_login'],
'user_visible_state': d['user_visible_state'],
'fully_qualified_domain_name': d['fully_qualified_domain_name'],
'tags': d.get('tags')
}
return d
def main():
AzureRMMySqlServerFacts()
if __name__ == '__main__':
main()
|
austinzheng/swift
|
refs/heads/master
|
utils/swift_build_support/swift_build_support/xcrun.py
|
47
|
# swift_build_support/xcrun.py - Invoke xcrun from Python -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Python wrappers for invoking `xcrun` on the command-line.
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import
from . import cache_util
from . import shell
@cache_util.cached
def find(tool, sdk=None, toolchain=None):
"""
Return the path for the given tool, according to `xcrun --find`, using
the given sdk and toolchain.
If `xcrun --find` cannot find the tool, return None.
"""
command = ['xcrun', '--find', tool]
if sdk is not None:
command += ['--sdk', sdk]
if toolchain is not None:
command += ['--toolchain', toolchain]
# `xcrun --find` prints to stderr when it fails to find the
# given tool. We swallow that output with a pipe.
out = shell.capture(
command,
stderr=shell.DEVNULL, dry_run=False, echo=False, optional=True)
if out is None:
return None
return out.rstrip()
@cache_util.cached
def sdk_path(sdk):
"""
Return the path string for given SDK, according to `xcrun --show-sdk-path`.
If `xcrun --show-sdk-path` cannot find the SDK, return None.
"""
command = ['xcrun', '--sdk', sdk, '--show-sdk-path']
out = shell.capture(command, dry_run=False, echo=False, optional=True)
if out is None:
return None
return out.rstrip()
|
conejoninja/plugin.video.pelisalacarta
|
refs/heads/master
|
pelisalacarta/channels/pordede.py
|
3
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para pordede
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core import jsontools
from core.item import Item
from servers import servertools
DEBUG = config.get_setting("debug")
__category__ = "A"
__type__ = "generic"
__title__ = "Pordede"
__channel__ = "pordede"
__language__ = "ES"
__creationdate__ = "20140615"
DEFAULT_HEADERS = []
DEFAULT_HEADERS.append( ["User-Agent","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"] )
DEFAULT_HEADERS.append( ["Referer","http://www.pordede.com"] )
def isGeneric():
return True
def login():
url = "http://www.pordede.com/site/login"
post = "LoginForm[username]="+config.get_setting("pordedeuser")+"&LoginForm[password]="+config.get_setting("pordedepassword")
headers = DEFAULT_HEADERS[:]
data = scrapertools.cache_page(url,headers=headers,post=post)
def mainlist(item):
logger.info("pelisalacarta.channels.pordede mainlist")
itemlist = []
if config.get_setting("pordedeaccount")!="true":
itemlist.append( Item( channel=__channel__ , title="Habilita tu cuenta en la configuración..." , action="openconfig" , url="" , folder=False ) )
else:
login()
itemlist.append( Item(channel=__channel__, action="menuseries" , title="Series" , url="" ))
itemlist.append( Item(channel=__channel__, action="menupeliculas" , title="Películas" , url="" ))
itemlist.append( Item(channel=__channel__, action="listas_sigues" , title="Listas que sigues" , url="http://www.pordede.com/lists/following" ))
itemlist.append( Item(channel=__channel__, action="tus_listas" , title="Tus listas" , url="http://www.pordede.com/lists/yours" ))
itemlist.append( Item(channel=__channel__, action="listas_sigues" , title="Top listas" , url="http://www.pordede.com/lists" ))
return itemlist
def openconfig(item):
if "xbmc" in config.get_platform() or "boxee" in config.get_platform():
config.open_settings( )
return []
def menuseries(item):
logger.info("pelisalacarta.channels.pordede menuseries")
itemlist = []
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Novedades" , url="http://www.pordede.com/series/loadmedia/offset/0/showlist/hot" ))
itemlist.append( Item(channel=__channel__, action="generos" , title="Por géneros" , url="http://www.pordede.com/series" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Siguiendo" , url="http://www.pordede.com/series/following" ))
itemlist.append( Item(channel=__channel__, action="siguientes" , title="Siguientes Capítulos" , url="http://www.pordede.com/index2.php" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/series/favorite" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/series/pending" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Terminadas" , url="http://www.pordede.com/series/seen" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/series/recommended" ))
itemlist.append( Item(channel=__channel__, action="search" , title="Buscar..." , url="http://www.pordede.com/series" ))
return itemlist
def menupeliculas(item):
logger.info("pelisalacarta.channels.pordede menupeliculas")
itemlist = []
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Novedades" , url="http://www.pordede.com/pelis/loadmedia/offset/0/showlist/hot" ))
itemlist.append( Item(channel=__channel__, action="generos" , title="Por géneros" , url="http://www.pordede.com/pelis" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/pelis/favorite" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/pelis/pending" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Vistas" , url="http://www.pordede.com/pelis/seen" ))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/pelis/recommended" ))
itemlist.append( Item(channel=__channel__, action="search" , title="Buscar..." , url="http://www.pordede.com/pelis" ))
return itemlist
def generos(item):
logger.info("pelisalacarta.channels.pordede generos")
headers = DEFAULT_HEADERS[:]
# Descarga la pagina
data = scrapertools.cache_page(item.url, headers=headers)
if (DEBUG): logger.info("data="+data)
# Extrae las entradas (carpetas)
data = scrapertools.find_single_match(data,'<div class="section genre">(.*?)</div>')
patron = '<a class="mediaFilterLink" data-value="([^"]+)" href="([^"]+)">([^<]+)<span class="num">\((\d+)\)</span></a>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for textid,scrapedurl,scrapedtitle,cuantos in matches:
title = scrapedtitle.strip()+" ("+cuantos+")"
thumbnail = ""
plot = ""
#http://www.pordede.com/pelis/loadmedia/offset/30/genre/science%20fiction/showlist/all?popup=1
if "/pelis" in item.url:
url = "http://www.pordede.com/pelis/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
else:
url = "http://www.pordede.com/series/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title))
return itemlist
def search(item,texto):
logger.info("pelisalacarta.channels.pordede search")
if item.url=="":
item.url="http://www.pordede.com/pelis"
texto = texto.replace(" ","-")
# Mete el referer en item.extra
item.extra = item.url
item.url = item.url+"/search/query/"+texto+"/years/1950/on/undefined/showlist/all"
try:
return buscar(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def buscar(item):
logger.info("pelisalacarta.channels.pordede buscar")
# Descarga la pagina
headers = DEFAULT_HEADERS[:]
#headers.append(["Referer",item.extra])
headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cache_page(item.url,headers=headers)
if (DEBUG): logger.info("data="+data)
# Extrae las entradas (carpetas)
json_object = jsontools.load_json(data)
if (DEBUG): logger.info("html="+json_object["html"])
data = json_object["html"]
return parse_mixed_results(item,data)
def parse_mixed_results(item,data):
patron = '<a class="defaultLink extended" href="([^"]+)"[^<]+'
patron += '<div class="coverMini shadow tiptip" title="([^"]+)"[^<]+'
patron += '<img class="centeredPic.*?src="([^"]+)"'
patron += '[^<]+<img[^<]+<div class="extra-info">'
patron += '<span class="year">([^<]+)</span>'
patron += '<span class="value"><i class="icon-star"></i>([^<]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedyear,scrapedvalue in matches:
title = scrapertools.htmlclean(scrapedtitle)
if scrapedyear != '':
title += " ("+scrapedyear+")"
if scrapedvalue != '':
title += " ("+scrapedvalue+")"
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
#http://www.pordede.com/peli/the-lego-movie
#http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1
if "/peli/" in scrapedurl:
referer = urlparse.urljoin(item.url,scrapedurl)
url = referer.replace("/peli/","/links/view/slug/")+"/what/peli"
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, viewmode="movie"))
else:
referer = item.url
url = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=__channel__, action="episodios" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, viewmode="movie"))
if "offset/" in item.url:
old_offset = scrapertools.find_single_match(item.url,"offset/(\d+)/")
new_offset = int(old_offset)+30
url = item.url.replace("offset/"+old_offset,"offset/"+str(new_offset))
itemlist.append( Item(channel=__channel__, action="lista" , title=">> Página siguiente" , extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def siguientes(item):
logger.info("pelisalacarta.channels.pordede siguientes")
# Descarga la pagina
headers = DEFAULT_HEADERS[:]
#headers.append(["Referer",item.extra])
headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cache_page(item.url,headers=headers)
if (DEBUG): logger.info("data="+data)
# Extrae las entradas (carpetas)
json_object = jsontools.load_json(data)
if (DEBUG): logger.info("html2="+json_object["html"])
data = json_object["html"]
patron = ''
patron += '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
patron += '<div class="extra-info"><span class="year">[^<]+'
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
patron += '</span></div>[^<]+'
patron += '</div>[^<]+'
patron += '</a>[^<]+'
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
#for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
for scrapedtitle,scrapedthumbnail,scrapedurl,scrapedsession,scrapedepisode in matches:
title = scrapertools.htmlclean(scrapedtitle)
session = scrapertools.htmlclean(scrapedsession)
episode = scrapertools.htmlclean(scrapedepisode)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
title = session + "x" + episode + " - " + title
#http://www.pordede.com/peli/the-lego-movie
#http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1
referer = urlparse.urljoin(item.url,scrapedurl)
url = referer
#itemlist.append( Item(channel=__channel__, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, viewmode="movie"))
itemlist.append( Item(channel=__channel__, action="episodio" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, viewmode="movie", extra=session+"|"+episode))
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
return itemlist
def episodio(item):
logger.info("pelisalacarta.channels.pordede episodio")
itemlist = []
headers = DEFAULT_HEADERS[:]
# Descarga la pagina
data = scrapertools.cache_page(item.url, headers=headers)
if (DEBUG): logger.info("data="+data)
session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1]))
patrontemporada = '<div class="checkSeason"[^>]+>Temporada '+session+'<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas:
if (DEBUG): logger.info("bloque_episodios="+bloque_episodios)
# Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">'+episode+' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
for scrapedurl,scrapedtitle,info,visto in matches:
visto_string = "[visto] " if visto.strip()=="active" else ""
numero=episode
title = visto_string+session+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
thumbnail = ""
plot = ""
#http://www.pordede.com/peli/the-lego-movie
#http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1
#http://www.pordede.com/links/viewepisode/id/475011?popup=1
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
url = "http://www.pordede.com/links/viewepisode/id/"+epid
itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=item.show))
if (DEBUG): logger.info("Abrimos title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist2 = []
for capitulo in itemlist:
itemlist2 = findvideos(capitulo)
return itemlist2
def peliculas(item):
logger.info("pelisalacarta.channels.pordede peliculas")
# Descarga la pagina
headers = DEFAULT_HEADERS[:]
#headers.append(["Referer",item.extra])
headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cache_page(item.url,headers=headers)
if (DEBUG): logger.info("data="+data)
# Extrae las entradas (carpetas)
json_object = jsontools.load_json(data)
if (DEBUG): logger.info("html="+json_object["html"])
data = json_object["html"]
return parse_mixed_results(item,data)
def episodios(item):
logger.info("pelisalacarta.channels.pordede episodios")
itemlist = []
headers = DEFAULT_HEADERS[:]
# Descarga la pagina
data = scrapertools.cache_page(item.url, headers=headers)
if (DEBUG): logger.info("data="+data)
patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
for nombre_temporada,bloque_episodios in matchestemporadas:
if (DEBUG): logger.info("nombre_temporada="+nombre_temporada)
if (DEBUG): logger.info("bloque_episodios="+bloque_episodios)
# Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
for scrapedurl,numero,scrapedtitle,info,visto in matches:
visto_string = "[visto] " if visto.strip()=="active" else ""
title = visto_string+nombre_temporada.replace("Temporada ", "").replace("Extras", "Extras 0")+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
thumbnail = ""
plot = ""
#http://www.pordede.com/peli/the-lego-movie
#http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1
#http://www.pordede.com/links/viewepisode/id/475011?popup=1
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
url = "http://www.pordede.com/links/viewepisode/id/"+epid
itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=item.show))
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"):
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
# Sin año y sin valoración:
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
# Sin año:
#show = re.sub(r"\s\(\d+\)", "", item.show)
# Sin valoración:
#show = re.sub(r"\s\(\d+\.\d+\)", "", item.show)
itemlist.append( Item(channel='pordede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=show) )
itemlist.append( Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=show))
return itemlist
def parse_listas(item, patron):
logger.info("pelisalacarta.channels.pordede parse_listas")
# Descarga la pagina
headers = DEFAULT_HEADERS[:]
#headers.append(["Referer",item.extra])
headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cache_page(item.url,headers=headers)
if (DEBUG): logger.info("data="+data)
# Extrae las entradas (carpetas)
json_object = jsontools.load_json(data)
if (DEBUG): logger.info("html="+json_object["html"])
data = json_object["html"]
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedurl,scrapedtitle,scrapeduser,scrapedfichas in matches:
title = scrapertools.htmlclean(scrapedtitle + ' (' + scrapedfichas + ' fichas, por ' + scrapeduser + ')')
url = urlparse.urljoin(item.url,scrapedurl) + "/offset/0/loadmedia"
thumbnail = ""
itemlist.append( Item(channel=__channel__, action="lista" , title=title , url=url))
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
nextpage = scrapertools.find_single_match(data,'data-url="(/lists/loadlists/offset/[^"]+)"')
if nextpage != '':
url = urlparse.urljoin(item.url,nextpage)
itemlist.append( Item(channel=__channel__, action="listas_sigues" , title=">> Página siguiente" , extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def listas_sigues(item):
logger.info("pelisalacarta.channels.pordede listas_sigues")
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
return parse_listas(item, patron)
def tus_listas(item):
logger.info("pelisalacarta.channels.pordede tus_listas")
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
patron += '<div class="right"[^<]+'
patron += '<button[^<]+</button[^<]+'
patron += '<button[^<]+</button[^<]+'
patron += '</div[^<]+'
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
return parse_listas(item, patron)
def lista(item):
logger.info("pelisalacarta.channels.pordede lista")
# Descarga la pagina
headers = DEFAULT_HEADERS[:]
#headers.append(["Referer",item.extra])
headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cache_page(item.url,headers=headers)
if (DEBUG): logger.info("data="+data)
# Extrae las entradas (carpetas)
json_object = jsontools.load_json(data)
if (DEBUG): logger.info("html="+json_object["html"])
data = json_object["html"]
return parse_mixed_results(item,data)
def findvideos(item, verTodos=False):
logger.info("pelisalacarta.channels.pordede findvideos")
# Descarga la pagina
headers = DEFAULT_HEADERS[:]
#headers.append(["Referer",item.extra])
#headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cache_page(item.url,headers=headers)
if (DEBUG): logger.info("data="+data)
# Extrae las entradas (carpetas)
#json_object = jsontools.load_json(data)
#if (DEBUG): logger.info("html="+json_object["html"])
#data = json_object["html"]
sesion = scrapertools.find_single_match(data,'SESS = "([^"]+)";')
if (DEBUG): logger.info("sesion="+sesion)
patron = '<a target="_blank" class="a aporteLink(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
if config.get_platform().startswith("xbmc") and "/what/peli" in item.url:
itemlist.append( Item(channel=__channel__, action="infosinopsis" , title="INFO / SINOPSIS" , url=item.url, thumbnail=item.thumbnail, folder=False ))
itemsort = []
sortlinks = config.get_setting("pordedesortlinks") # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
sortlinks = int(sortlinks) if sortlinks != '' else 0
showlinks = config.get_setting("pordedeshowlinks") # 0:todos, 1:ver online, 2:descargar
showlinks = int(showlinks) if showlinks != '' else 0
for match in matches:
if (DEBUG): logger.info("match="+match)
jdown = scrapertools.find_single_match(match,'<div class="jdownloader">[^<]+</div>')
if (showlinks == 1 and jdown != '') or (showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
continue
idiomas = re.compile('<div class="flag([^"]+)">([^<]+)</div>',re.DOTALL).findall(match)
idioma_0 = (idiomas[0][0].replace(" ","").strip() + " " + idiomas[0][1].replace(" ","").strip()).strip()
if len(idiomas) > 1:
idioma_1 = (idiomas[1][0].replace(" ","").strip() + " " + idiomas[1][1].replace(" ","").strip()).strip()
idioma = idioma_0 + ", " + idioma_1
else:
idioma_1 = ''
idioma = idioma_0
calidad_video = scrapertools.find_single_match(match,'<div class="linkInfo quality"><i class="icon-facetime-video"></i>([^<]+)</div>')
if (DEBUG): logger.info("calidad_video="+calidad_video)
calidad_audio = scrapertools.find_single_match(match,'<div class="linkInfo qualityaudio"><i class="icon-headphones"></i>([^<]+)</div>')
if (DEBUG): logger.info("calidad_audio="+calidad_audio)
thumb_servidor = scrapertools.find_single_match(match,'<div class="hostimage"[^<]+<img\s*src="([^"]+)">')
if (DEBUG): logger.info("thumb_servidor="+thumb_servidor)
nombre_servidor = scrapertools.find_single_match(thumb_servidor,"popup_([^\.]+)\.png")
if (DEBUG): logger.info("nombre_servidor="+nombre_servidor)
title = ("Download " if jdown != '' else "Ver en ")+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")"
cuenta = []
valoracion = 0
for idx, val in enumerate(['1', '2', 'report']):
nn = scrapertools.find_single_match(match,'<span\s+data-num="([^"]+)"\s+class="defaultPopup"\s+href="/likes/popup/value/'+val+'/')
if nn != '0' and nn != '':
cuenta.append(nn + ' ' + ['ok', 'ko', 'rep'][idx])
valoracion += int(nn) if val == '1' else -int(nn)
if len(cuenta) > 0:
title += ' (' + ', '.join(cuenta) + ')'
url = urlparse.urljoin( item.url , scrapertools.find_single_match(match,'href="([^"]+)"') )
thumbnail = thumb_servidor
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
if sortlinks > 0:
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
# orden2 segun configuración
if sortlinks == 1:
orden = valoracion
elif sortlinks == 2:
orden = valora_idioma(idioma_0, idioma_1)
elif sortlinks == 3:
orden = valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 4:
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 5:
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
elif sortlinks == 6:
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
itemsort.append({'action': "play", 'title': title, 'url':url, 'thumbnail':thumbnail, 'plot':plot, 'extra':sesion+"|"+item.url, 'fulltitle':title, 'orden1': (jdown == ''), 'orden2':orden})
else:
itemlist.append( Item(channel=__channel__, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, extra=sesion+"|"+item.url, fulltitle=title))
if sortlinks > 0:
numberlinks = config.get_setting("pordedenumberlinks") # 0:todos, > 0:n*5 (5,10,15,20,...)
numberlinks = int(numberlinks) * 5 if numberlinks != '' else 0
if numberlinks == 0:
verTodos = True
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
for i, subitem in enumerate(itemsort):
if verTodos == False and i >= numberlinks:
itemlist.append(Item(channel=__channel__, action='findallvideos' , title='Ver todos los enlaces', url=item.url, extra=item.extra ))
break
itemlist.append( Item(channel=__channel__, action=subitem['action'] , title=subitem['title'] , url=subitem['url'] , thumbnail=subitem['thumbnail'] , plot=subitem['plot'] , extra=subitem['extra'] , fulltitle=subitem['fulltitle'] ))
return itemlist
def findallvideos(item):
return findvideos(item, True)
def play(item):
logger.info("pelisalacarta.channels.pordede play url="+item.url)
# Marcar como visto
checkseen(item.extra.split("|")[1])
# Hace la llamada
headers = DEFAULT_HEADERS[:]
headers.append( ["Referer" , item.extra.split("|")[1] ])
data = scrapertools.cache_page(item.url,post="_s="+item.extra.split("|")[0],headers=headers)
if (DEBUG): logger.info("data="+data)
#url = scrapertools.find_single_match(data,'<a href="([^"]+)" target="_blank"><button>Visitar enlace</button>')
url = scrapertools.find_single_match(data,'<p class="links">\s+<a href="([^"]+)" target="_blank"')
url = urlparse.urljoin(item.url,url)
headers = DEFAULT_HEADERS[:]
headers.append( ["Referer" , item.url ])
#data2 = scrapertools.cache_page(url,headers=headers)
#logger.info("pelisalacarta.channels.pordede play (interstitial) url="+url)
#logger.info("data2="+data2)
#url2 = scrapertools.find_single_match(data2,'<a href="([^"]+)"><button disabled>Ir al vídeo</button>')
#url2 = urlparse.urljoin(item.url,url2)
#headers = DEFAULT_HEADERS[:]
#headers.append( ["Referer" , url2 ])
media_url = scrapertools.downloadpage(url,headers=headers,header_to_get="location",follow_redirects=False)
logger.info("media_url="+media_url)
itemlist = servertools.find_video_items(data=media_url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist
def checkseen(item):
logger.info("pelisalacarta.channels.pordede checkseen "+item)
if "/viewepisode/" in item:
headers = DEFAULT_HEADERS[:]
episode = item.split("/")[-1]
scrapertools.downloadpage("http://www.pordede.com/ajax/action", post="model=episode&id="+episode+"&action=seen&value=1")
if "/what/peli" in item:
headers = DEFAULT_HEADERS[:]
data = scrapertools.cache_page(item, headers=headers)
# GET MOVIE ID
movieid = scrapertools.find_single_match(data,'href="/links/create/ref_id/([0-9]+)/ref_model/')
scrapertools.downloadpage("http://www.pordede.com/ajax/mediaaction", post="model=peli&id="+movieid+"&action=status&value=3")
return True
def infosinopsis(item):
logger.info("pelisalacarta.channels.pordede infosinopsis")
url_aux = item.url.replace("/links/view/slug/", "/peli/").replace("/what/peli", "")
# Descarga la pagina
headers = DEFAULT_HEADERS[:]
#headers.append(["Referer",item.extra])
#headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cache_page(url_aux,headers=headers)
if (DEBUG): logger.info("data="+data)
scrapedtitle = scrapertools.find_single_match(data,'<h1>([^<]+)</h1>')
scrapedvalue = scrapertools.find_single_match(data,'<span class="puntuationValue" data-value="([^"]+)"')
scrapedyear = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>')
scrapedduration = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>', 1)
scrapedplot = scrapertools.find_single_match(data,'<div class="info text"[^>]+>([^<]+)</div>')
#scrapedthumbnail = scrapertools.find_single_match(data,'<meta property="og:image" content="([^"]+)"')
#thumbnail = scrapedthumbnail.replace("http://www.pordede.comhttp://", "http://").replace("mediacover", "mediathumb")
scrapedgenres = re.compile('href="/pelis/index/genre/[^"]+">([^<]+)</a>',re.DOTALL).findall(data)
scrapedcasting = re.compile('href="/star/[^"]+">([^<]+)</a><br/><span>([^<]+)</span>',re.DOTALL).findall(data)
title = scrapertools.htmlclean(scrapedtitle)
plot = "Año: [B]"+scrapedyear+"[/B]"
plot += " , Duración: [B]"+scrapedduration+"[/B]"
plot += " , Puntuación usuarios: [B]"+scrapedvalue+"[/B]"
plot += "\nGéneros: "+", ".join(scrapedgenres)
plot += "\n\nSinopsis:\n"+scrapertools.htmlclean(scrapedplot)
plot += "\n\nCasting:\n"
for actor,papel in scrapedcasting:
plot += actor+" ("+papel+"). "
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
tbd.ask(title, plot)
del tbd
return
try:
import xbmcgui
class TextBox( xbmcgui.WindowXML ):
""" Create a skinned textbox window """
def __init__( self, *args, **kwargs):
pass
def onInit( self ):
try:
self.getControl( 5 ).setText( self.text )
self.getControl( 1 ).setLabel( self.title )
except: pass
def onClick( self, controlId ):
pass
def onFocus( self, controlId ):
pass
def onAction( self, action ):
self.close()
def ask(self, title, text ):
self.title = title
self.text = text
self.doModal()
except:
pass
# Valoraciones de enlaces, los valores más altos se mostrarán primero :
def valora_calidad(video, audio):
prefs_video = [ 'hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener' ]
prefs_audio = [ 'dts', '5.1', 'rip', 'line', 'screener' ]
video = ''.join(video.split()).lower()
pts = (9 - prefs_video.index(video) if video in prefs_video else 1) * 10
audio = ''.join(audio.split()).lower()
pts += 9 - prefs_audio.index(audio) if audio in prefs_audio else 1
return pts
def valora_idioma(idioma_0, idioma_1):
prefs = [ 'spanish', 'spanish LAT', 'catalan', 'english', 'french' ]
pts = (9 - prefs.index(idioma_0) if idioma_0 in prefs else 1) * 10
if idioma_1 != '': # si hay subtítulos
idioma_1 = idioma_1.replace(' SUB', '')
pts += 8 - prefs.index(idioma_1) if idioma_1 in prefs else 1
else:
pts += 9 # sin subtítulos por delante
return pts
|
sdgdsffdsfff/Cmdb_Puppet
|
refs/heads/master
|
cmdb/manage.py
|
2
|
#!/usr/local/python/bin/python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simplecmdb.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
dkarakats/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_error_module.py
|
22
|
"""
Tests for ErrorModule and NonStaffErrorModule
"""
import unittest
from xmodule.tests import get_test_system
from xmodule.error_module import ErrorDescriptor, ErrorModule, NonStaffErrorDescriptor
from xmodule.modulestore.xml import CourseLocationManager
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.x_module import XModuleDescriptor, XModule, STUDENT_VIEW
from mock import MagicMock, Mock, patch
from xblock.runtime import Runtime, IdReader
from xblock.field_data import FieldData
from xblock.fields import ScopeIds
from xblock.test.tools import unabc
class SetupTestErrorModules(unittest.TestCase):
"""Common setUp for use in ErrorModule tests."""
def setUp(self):
super(SetupTestErrorModules, self).setUp()
self.system = get_test_system()
self.course_id = SlashSeparatedCourseKey('org', 'course', 'run')
self.location = self.course_id.make_usage_key('foo', 'bar')
self.valid_xml = u"<problem>ABC \N{SNOWMAN}</problem>"
self.error_msg = "Error"
class TestErrorModule(SetupTestErrorModules):
"""
Tests for ErrorModule and ErrorDescriptor
"""
def test_error_module_xml_rendering(self):
descriptor = ErrorDescriptor.from_xml(
self.valid_xml,
self.system,
CourseLocationManager(self.course_id),
self.error_msg
)
self.assertIsInstance(descriptor, ErrorDescriptor)
descriptor.xmodule_runtime = self.system
context_repr = self.system.render(descriptor, STUDENT_VIEW).content
self.assertIn(self.error_msg, context_repr)
self.assertIn(repr(self.valid_xml), context_repr)
def test_error_module_from_descriptor(self):
descriptor = MagicMock([XModuleDescriptor],
runtime=self.system,
location=self.location,
_field_data=self.valid_xml)
error_descriptor = ErrorDescriptor.from_descriptor(
descriptor, self.error_msg)
self.assertIsInstance(error_descriptor, ErrorDescriptor)
error_descriptor.xmodule_runtime = self.system
context_repr = self.system.render(error_descriptor, STUDENT_VIEW).content
self.assertIn(self.error_msg, context_repr)
self.assertIn(repr(descriptor), context_repr)
class TestNonStaffErrorModule(SetupTestErrorModules):
"""
Tests for NonStaffErrorModule and NonStaffErrorDescriptor
"""
def test_non_staff_error_module_create(self):
descriptor = NonStaffErrorDescriptor.from_xml(
self.valid_xml,
self.system,
CourseLocationManager(self.course_id)
)
self.assertIsInstance(descriptor, NonStaffErrorDescriptor)
def test_from_xml_render(self):
descriptor = NonStaffErrorDescriptor.from_xml(
self.valid_xml,
self.system,
CourseLocationManager(self.course_id)
)
descriptor.xmodule_runtime = self.system
context_repr = self.system.render(descriptor, STUDENT_VIEW).content
self.assertNotIn(self.error_msg, context_repr)
self.assertNotIn(repr(self.valid_xml), context_repr)
def test_error_module_from_descriptor(self):
descriptor = MagicMock([XModuleDescriptor],
runtime=self.system,
location=self.location,
_field_data=self.valid_xml)
error_descriptor = NonStaffErrorDescriptor.from_descriptor(
descriptor, self.error_msg)
self.assertIsInstance(error_descriptor, ErrorDescriptor)
error_descriptor.xmodule_runtime = self.system
context_repr = self.system.render(error_descriptor, STUDENT_VIEW).content
self.assertNotIn(self.error_msg, context_repr)
self.assertNotIn(str(descriptor), context_repr)
class BrokenModule(XModule):
def __init__(self, *args, **kwargs):
super(BrokenModule, self).__init__(*args, **kwargs)
raise Exception("This is a broken xmodule")
class BrokenDescriptor(XModuleDescriptor):
module_class = BrokenModule
class TestException(Exception):
"""An exception type to use to verify raises in tests"""
pass
@unabc("Tests should not call {}")
class TestRuntime(Runtime):
pass
class TestErrorModuleConstruction(unittest.TestCase):
"""
Test that error module construction happens correctly
"""
def setUp(self):
# pylint: disable=abstract-class-instantiated
super(TestErrorModuleConstruction, self).setUp()
field_data = Mock(spec=FieldData)
self.descriptor = BrokenDescriptor(
TestRuntime(Mock(spec=IdReader), field_data),
field_data,
ScopeIds(None, None, None, Location('org', 'course', 'run', 'broken', 'name', None))
)
self.descriptor.xmodule_runtime = TestRuntime(Mock(spec=IdReader), field_data)
self.descriptor.xmodule_runtime.error_descriptor_class = ErrorDescriptor
self.descriptor.xmodule_runtime.xmodule_instance = None
def test_broken_module(self):
"""
Test that when an XModule throws an error during __init__, we
get an ErrorModule back from XModuleDescriptor._xmodule
"""
module = self.descriptor._xmodule
self.assertIsInstance(module, ErrorModule)
@patch.object(ErrorDescriptor, '__init__', Mock(side_effect=TestException))
def test_broken_error_descriptor(self):
"""
Test that a broken error descriptor doesn't cause an infinite loop
"""
with self.assertRaises(TestException):
module = self.descriptor._xmodule
@patch.object(ErrorModule, '__init__', Mock(side_effect=TestException))
def test_broken_error_module(self):
"""
Test that a broken error module doesn't cause an infinite loop
"""
with self.assertRaises(TestException):
module = self.descriptor._xmodule
|
Xero-Hige/Magus
|
refs/heads/magus-master
|
src/libs/string_generalizer.py
|
1
|
# Based on: https://gist.github.com/topicus/4611549
# Updated on 13/11/2017 with ñ skip
import sys
import unicodedata
def strip_accents(s):
stripped = [c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn']
try:
for i in range(len(stripped)):
if s[i] == 'Ñ' or s[i] == 'ñ':
stripped[i] = s[i]
except IndexError as e:
print("Wrong strip: {} caused by {}".format(e, s), file=sys.stderr)
return "".join(stripped)
|
bd339/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/old-tests/webdriver/user_input/click_test.py
|
141
|
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
repo_root = os.path.abspath(os.path.join(__file__, "../../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver import exceptions, wait
class ClickTest(base_test.WebDriverBaseTest):
def setUp(self):
self.wait = wait.WebDriverWait(self.driver, 5, ignored_exceptions = [exceptions.NoSuchAlertException])
self.driver.get(self.webserver.where_is('modal/res/alerts.html'))
def tearDown(self):
try:
self.driver.switch_to_alert().dismiss()
except exceptions.NoSuchAlertException:
pass
def test_click_div(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("div")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "div")
def test_click_p(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("p")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "p")
def test_click_h1(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("h1")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "h1")
def test_click_pre(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("pre")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "pre")
def test_click_ol(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("ol")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "ol")
def test_click_ul(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("ul")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "ul")
def test_click_a(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("a")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "a")
def test_click_img(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("img")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "img")
def test_click_video(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("video")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "video")
def test_click_canvas(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("canvas")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "canvas")
def test_click_progress(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("progress")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "progress")
def test_click_textarea(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("textarea")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "textarea")
def test_click_button(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("button")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "button")
def test_click_svg(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("svg")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "svg")
def test_click_input_range(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_range")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_range")
def test_click_input_button(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_button")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_button")
def test_click_input_submit(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_submit")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_submit")
def test_click_input_reset(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_reset")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_reset")
def test_click_input_checkbox(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_checkbox")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_checkbox")
def test_click_input_radio(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_radio")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_radio")
def test_click_input_text(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_text")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_text")
def test_click_input_number(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_number")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_number")
def test_click_input_tel(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_tel")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_tel")
def test_click_input_url(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_url")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_url")
def test_click_input_email(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_email")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_email")
def test_click_input_search(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_search")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_search")
def test_click_input_image(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_image")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_image")
if __name__ == "__main__":
unittest.main()
|
adw0rd/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/modeltests/custom_columns/__init__.py
|
12133432
| |
throwable-one/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/modeltests/reverse_lookup/__init__.py
|
12133432
| |
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyAddImportQuickFixTest/existingImportsAlwaysSuggestedFirstEvenIfLonger/long/pkg/__init__.py
|
12133432
| |
Coelhon/MasterRepo.repository
|
refs/heads/master
|
plugin.video.ZemTV-shani/websocket/_socket.py
|
52
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import socket
import six
from ._exceptions import *
from ._utils import *
from ._ssl_compat import *
DEFAULT_SOCKET_OPTION = [(socket.SOL_TCP, socket.TCP_NODELAY, 1)]
if hasattr(socket, "SO_KEEPALIVE"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
if hasattr(socket, "TCP_KEEPIDLE"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPIDLE, 30))
if hasattr(socket, "TCP_KEEPINTVL"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPINTVL, 10))
if hasattr(socket, "TCP_KEEPCNT"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPCNT, 3))
_default_timeout = None
__all__ = ["DEFAULT_SOCKET_OPTION", "sock_opt", "setdefaulttimeout", "getdefaulttimeout",
"recv", "recv_line", "send"]
class sock_opt(object):
def __init__(self, sockopt, sslopt):
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
self.sockopt = sockopt
self.sslopt = sslopt
self.timeout = None
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global _default_timeout
_default_timeout = timeout
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return _default_timeout
def recv(sock, bufsize):
if not sock:
raise WebSocketConnectionClosedException("socket is already closed.")
try:
bytes = sock.recv(bufsize)
except socket.timeout as e:
message = extract_err_message(e)
raise WebSocketTimeoutException(message)
except SSLError as e:
message = extract_err_message(e)
if message == "The read operation timed out":
raise WebSocketTimeoutException(message)
else:
raise
if not bytes:
raise WebSocketConnectionClosedException("Connection is already closed.")
return bytes
def recv_line(sock):
line = []
while True:
c = recv(sock, 1)
line.append(c)
if c == six.b("\n"):
break
return six.b("").join(line)
def send(sock, data):
if isinstance(data, six.text_type):
data = data.encode('utf-8')
if not sock:
raise WebSocketConnectionClosedException("socket is already closed.")
try:
return sock.send(data)
except socket.timeout as e:
message = extract_err_message(e)
raise WebSocketTimeoutException(message)
except Exception as e:
message = extract_err_message(e)
if isinstance(message, str) and "timed out" in message:
raise WebSocketTimeoutException(message)
else:
raise
|
camisatx/pySecMaster
|
refs/heads/master
|
pySecMaster/query_database.py
|
1
|
import pandas as pd
import psycopg2
import time
__author__ = 'Josh Schertz'
__copyright__ = 'Copyright (C) 2018 Josh Schertz'
__description__ = 'An automated system to store and maintain financial data.'
__email__ = 'josh[AT]joshschertz[DOT]com'
__license__ = 'GNU AGPLv3'
__maintainer__ = 'Josh Schertz'
__status__ = 'Development'
__url__ = 'https://joshschertz.com/'
__version__ = '1.5.0'
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
def query_entire_table(database, user, password, host, port, table):
""" Query all of the active tsid values from the specified database.
:param database: String of the database name
:param user: String of the username used to login to the database
:param password: String of the password used to login to the database
:param host: String of the database address (localhost, url, ip, etc.)
:param port: Integer of the database port number (5432)
:param table: String of the table whose values should be returned
:return: DataFrame of the returned values
"""
conn = psycopg2.connect(database=database, user=user, password=password,
host=host, port=port)
try:
with conn:
cur = conn.cursor()
query = ("""SELECT sym.source_id AS tsid
FROM symbology AS sym,
LATERAL (
SELECT source_id
FROM %s
WHERE source_id = sym.source_id
ORDER BY source_id ASC NULLS LAST
LIMIT 1) AS prices""" %
(table,))
cur.execute(query)
rows = cur.fetchall()
if rows:
df = pd.DataFrame(rows)
else:
raise SystemExit('No data returned from query_entire_table')
return df
except psycopg2.Error as e:
print(
'Error when trying to retrieve data from the %s database in '
'query_entire_table' % database)
print(e)
except conn.OperationalError:
raise SystemError('Unable to connect to the %s database in '
'query_entire_table. Make sure the database '
'address/name are correct.' % database)
except Exception as e:
print(e)
raise SystemError('Error: Unknown issue occurred in query_entire_table')
if __name__ == '__main__':
from utilities.user_dir import user_dir
userdir = user_dir()
test_database = userdir['postgresql']['pysecmaster_db']
test_user = userdir['postgresql']['pysecmaster_user']
test_password = userdir['postgresql']['pysecmaster_password']
test_host = userdir['postgresql']['pysecmaster_host']
test_port = userdir['postgresql']['pysecmaster_port']
test_table = 'daily_prices' # daily_prices, minute_prices, quandl_codes
start_time = time.time()
table_df = query_entire_table(test_database, test_user, test_password,
test_host, test_port, test_table)
print('Query took %0.2f seconds' % (time.time() - start_time))
# table_df.to_csv('%s.csv' % test_table)
print(table_df)
|
pudquick/lightblue-0.4
|
refs/heads/master
|
build/lib/lightblue/_macutil.py
|
68
|
# Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Mac-specific utility functions and constants.
from Foundation import NSObject, NSDate, NSPoint, NSDefaultRunLoopMode, NSTimer
from AppKit import NSApplication, NSEvent, NSApplicationDefined, NSAnyEventMask
import objc
import time
import _IOBluetooth
import _lightbluecommon
# for mac os 10.5
try:
from Foundation import NSUIntegerMax
NSAnyEventMask = NSUIntegerMax
except:
pass
# values of constants used in _IOBluetooth.framework
kIOReturnSuccess = 0 # defined in <IOKit/IOReturn.h>
kIOBluetoothUserNotificationChannelDirectionIncoming = 1
# defined in <IOBluetooth/IOBluetoothUserLib.h>
kBluetoothHCIErrorPageTimeout = 0x04 # <IOBluetooth/Bluetooth.h>
# defined in <IOBluetooth/IOBluetoothUserLib.h>
kIOBluetoothServiceBrowserControllerOptionsNone = 0L
LIGHTBLUE_NOTIFY_ID = 5444 # any old number
WAIT_MAX_TIMEOUT = 3
# IOBluetoothSDPUUID objects for RFCOMM and OBEX protocol UUIDs
PROTO_UUIDS = {
_lightbluecommon.RFCOMM: _IOBluetooth.IOBluetoothSDPUUID.uuid16_(0x0003),
_lightbluecommon.OBEX: _IOBluetooth.IOBluetoothSDPUUID.uuid16_(0x0008)
}
def formatdevaddr(addr):
"""
Returns address of a device in usual form e.g. "00:00:00:00:00:00"
- addr: address as returned by device.getAddressString() on an
IOBluetoothDevice
"""
# make uppercase cos PyS60 & Linux seem to always return uppercase
# addresses
# can safely encode to ascii cos BT addresses are only in hex (pyobjc
# returns all strings in unicode)
return addr.replace("-", ":").encode('ascii').upper()
def createbtdevaddr(addr):
# in mac 10.5, can use BluetoothDeviceAddress directly
chars = btaddrtochars(addr)
try:
btdevaddr = _IOBluetooth.BluetoothDeviceAddress(chars)
return btdevaddr
except:
return chars
def btaddrtochars(addr):
"""
Takes a bluetooth address and returns a tuple with the corresponding
char values. This can then be used to construct a
IOBluetoothDevice object, providing the signature of the withAddress:
selector has been set (as in _setpyobjcsignatures() in this module).
For example:
>>> chars = btaddrtochars("00:0e:0a:00:a2:00")
>>> chars
(0, 14, 10, 0, 162, 0)
>>> device = _IOBluetooth.IOBluetoothDevice.withAddress_(chars)
>>> type(device)
<objective-c class IOBluetoothDevice at 0xa4024988>
>>> device.getAddressString()
u'00-0e-0a-00-a2-00'
"""
if not _lightbluecommon._isbtaddr(addr):
raise TypeError("address %s not valid bluetooth address" % str(addr))
if addr.find(":") == -1:
addr = addr.replace("-", ":") # consider alternative addr separator
# unhexlify gives binary value like '\x0e', then ord to get the char value.
# unhexlify throws TypeError if value is not a hex pair.
import binascii
chars = [ord(binascii.unhexlify(part)) for part in addr.split(":")]
return tuple(chars)
def looponce():
app = NSApplication.sharedApplication()
# to push the run loops I seem to have to do this twice
# use NSEventTrackingRunLoopMode or NSDefaultRunLoopMode?
for i in range(2):
event = app.nextEventMatchingMask_untilDate_inMode_dequeue_(
NSAnyEventMask, NSDate.dateWithTimeIntervalSinceNow_(0.02),
NSDefaultRunLoopMode, False)
def waituntil(conditionfunc, timeout=None):
"""
Waits until conditionfunc() returns true, or <timeout> seconds have passed.
(If timeout=None, this waits indefinitely until conditionfunc() returns
true.) Returns false if the process timed out, otherwise returns true.
Note!! You must call interruptwait() when you know that conditionfunc()
should be checked (e.g. if you are waiting for data and you know some data
has arrived) so that this can check conditionfunc(); otherwise it will just
continue to wait. (This allows the function to wait for an event that is
sent by interruptwait() instead of polling conditionfunc().)
This allows the caller to wait while the main event loop processes its
events. This must be done for certain situations, e.g. to receive socket
data or to accept client connections on a server socket, since IOBluetooth
requires the presence of an event loop to run these operations.
This function doesn't need to be called if there is something else that is
already processing the main event loop, e.g. if called from within a Cocoa
application.
"""
app = NSApplication.sharedApplication()
starttime = time.time()
if timeout is None:
timeout = NSDate.distantFuture().timeIntervalSinceNow()
if not isinstance(timeout, (int, float)):
raise TypeError("timeout must be int or float, was %s" % \
type(timeout))
endtime = starttime + timeout
while True:
currtime = time.time()
if currtime >= endtime:
return False
# use WAIT_MAX_TIMEOUT, don't wait forever in case of KeyboardInterrupt
e = app.nextEventMatchingMask_untilDate_inMode_dequeue_(NSAnyEventMask, NSDate.dateWithTimeIntervalSinceNow_(min(endtime - currtime, WAIT_MAX_TIMEOUT)), NSDefaultRunLoopMode, True)
if e is not None:
if (e.type() == NSApplicationDefined and e.subtype() == LIGHTBLUE_NOTIFY_ID):
if conditionfunc():
return True
else:
app.postEvent_atStart_(e, True)
def interruptwait():
"""
If waituntil() has been called, this will interrupt the waiting process so
it can check whether it should stop waiting.
"""
evt = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(NSApplicationDefined, NSPoint(), NSApplicationDefined, 0, 1, None, LIGHTBLUE_NOTIFY_ID, 0, 0)
NSApplication.sharedApplication().postEvent_atStart_(evt, True)
class BBCocoaSleeper(NSObject):
def init(self):
self = super(BBCocoaSleeper, self).init()
self.timedout = False
return self
def sleep(self, timeout):
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
timeout, self, "timedOut:", None, False)
self.timedout = False
waituntil(lambda: self.timedout)
def timedOut_(self, timer):
self.timedout = True
interruptwait()
timedOut_ = objc.selector(timedOut_, signature="v@:@")
def waitfor(timeout):
sleeper = BBCocoaSleeper.alloc().init()
sleeper.sleep(timeout)
class BBFileLikeObjectReader(NSObject):
"""
Provides a suitable delegate class for the BBDelegatingInputStream class in
LightAquaBlue.framework.
This basically provides a wrapper for a python file-like object so that it
can be read through a NSInputStream.
"""
def initWithFileLikeObject_(self, fileobj):
self = super(BBFileLikeObjectReader, self).init()
self.__fileobj = fileobj
return self
initWithFileLikeObject_ = objc.selector(initWithFileLikeObject_,
signature="@@:@")
def readDataWithMaxLength_(self, maxlength):
try:
data = self.__fileobj.read(maxlength)
except Exception:
return None
return buffer(data)
readDataWithMaxLength_ = objc.selector(readDataWithMaxLength_,
signature="@@:I") #"@12@0:4I8" #"@:I"
class BBFileLikeObjectWriter(NSObject):
"""
Provides a suitable delegate class for the BBDelegatingOutputStream class in
LightAquaBlue.framework.
This basically provides a wrapper for a python file-like object so that it
can be written to through a NSOutputStream.
"""
def initWithFileLikeObject_(self, fileobj):
self = super(BBFileLikeObjectWriter, self).init()
self.__fileobj = fileobj
return self
initWithFileLikeObject_ = objc.selector(initWithFileLikeObject_,
signature="@@:@")
def write_(self, data):
try:
self.__fileobj.write(data)
except Exception:
return -1
return data.length()
write_ = objc.selector(write_, signature="i12@0:4@8") #i12@0:4@8 #i@:@
|
haxwithaxe/supybot
|
refs/heads/master
|
plugins/Format/config.py
|
15
|
###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Format', True)
Format = conf.registerPlugin('Format')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Format, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79
|
nvoron23/paragraph2vec
|
refs/heads/master
|
gensim/parsing/porter.py
|
86
|
#!/usr/bin/env python
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Vivake Gupta (v@nano.com)
Release 1: January 2001
Further adjustments by Santiago Bruno (bananabruno@gmail.com)
to allow word input not restricted to one word per line, leading
to:
Release 2: July 2008
Optimizations and cleanup of the code by Lars Buitinck, July 2012.
"""
from six.moves import xrange
class PorterStemmer(object):
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[0],
b[1] ... ending at b[k]. k is readjusted downwards as the stemming
progresses.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.j = 0 # j is a general offset into the string
def _cons(self, i):
"""True <=> b[i] is a consonant."""
ch = self.b[i]
if ch in "aeiou":
return False
if ch == 'y':
return i == 0 or not self._cons(i - 1)
return True
def _m(self):
"""Returns the number of consonant sequences between 0 and j.
If c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
i = 0
while True:
if i > self.j:
return 0
if not self._cons(i):
break
i += 1
i += 1
n = 0
while True:
while True:
if i > self.j:
return n
if self._cons(i):
break
i += 1
i += 1
n += 1
while 1:
if i > self.j:
return n
if not self._cons(i):
break
i += 1
i += 1
def _vowelinstem(self):
"""True <=> 0,...j contains a vowel"""
return not all(self._cons(i) for i in xrange(self.j + 1))
def _doublec(self, j):
"""True <=> j,(j-1) contain a double consonant."""
return j > 0 and self.b[j] == self.b[j-1] and self._cons(j)
def _cvc(self, i):
"""True <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. This is used when trying to
restore an e at the end of a short word, e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < 2 or not self._cons(i) or self._cons(i-1) or not self._cons(i-2):
return False
return self.b[i] not in "wxy"
def _ends(self, s):
"""True <=> 0,...k ends with the string s."""
if s[-1] != self.b[self.k]: # tiny speed-up
return 0
length = len(s)
if length > (self.k + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def _setto(self, s):
"""Set (j+1),...k to the characters in the string s, adjusting k."""
self.b = self.b[:self.j+1] + s
self.k = len(self.b) - 1
def _r(self, s):
if self._m() > 0:
self._setto(s)
def _step1ab(self):
"""Get rid of plurals and -ed or -ing. E.g.,
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self._ends("sses"):
self.k -= 2
elif self._ends("ies"):
self._setto("i")
elif self.b[self.k - 1] != 's':
self.k -= 1
if self._ends("eed"):
if self._m() > 0:
self.k -= 1
elif (self._ends("ed") or self._ends("ing")) and self._vowelinstem():
self.k = self.j
if self._ends("at"): self._setto("ate")
elif self._ends("bl"): self._setto("ble")
elif self._ends("iz"): self._setto("ize")
elif self._doublec(self.k):
if self.b[self.k - 1] not in "lsz":
self.k -= 1
elif self._m() == 1 and self._cvc(self.k):
self._setto("e")
def _step1c(self):
"""Turn terminal y to i when there is another vowel in the stem."""
if self._ends("y") and self._vowelinstem():
self.b = self.b[:self.k] + 'i'
def _step2(self):
"""Map double suffices to single ones.
So, -ization ( = -ize plus -ation) maps to -ize etc. Note that the
string before the suffix must give _m() > 0.
"""
ch = self.b[self.k - 1]
if ch == 'a':
if self._ends("ational"): self._r("ate")
elif self._ends("tional"): self._r("tion")
elif ch == 'c':
if self._ends("enci"): self._r("ence")
elif self._ends("anci"): self._r("ance")
elif ch == 'e':
if self._ends("izer"): self._r("ize")
elif ch == 'l':
if self._ends("bli"): self._r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self._ends("abli"): self._r("able")
elif self._ends("alli"): self._r("al")
elif self._ends("entli"): self._r("ent")
elif self._ends("eli"): self._r("e")
elif self._ends("ousli"): self._r("ous")
elif ch == 'o':
if self._ends("ization"): self._r("ize")
elif self._ends("ation"): self._r("ate")
elif self._ends("ator"): self._r("ate")
elif ch == 's':
if self._ends("alism"): self._r("al")
elif self._ends("iveness"): self._r("ive")
elif self._ends("fulness"): self._r("ful")
elif self._ends("ousness"): self._r("ous")
elif ch == 't':
if self._ends("aliti"): self._r("al")
elif self._ends("iviti"): self._r("ive")
elif self._ends("biliti"): self._r("ble")
elif ch == 'g': # --DEPARTURE--
if self._ends("logi"): self._r("log")
# To match the published algorithm, delete this phrase
def _step3(self):
"""Deal with -ic-, -full, -ness etc. Similar strategy to _step2."""
ch = self.b[self.k]
if ch == 'e':
if self._ends("icate"): self._r("ic")
elif self._ends("ative"): self._r("")
elif self._ends("alize"): self._r("al")
elif ch == 'i':
if self._ends("iciti"): self._r("ic")
elif ch == 'l':
if self._ends("ical"): self._r("ic")
elif self._ends("ful"): self._r("")
elif ch == 's':
if self._ends("ness"): self._r("")
def _step4(self):
"""_step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
ch = self.b[self.k - 1]
if ch == 'a':
if not self._ends("al"): return
elif ch == 'c':
if not self._ends("ance") and not self._ends("ence"): return
elif ch == 'e':
if not self._ends("er"): return
elif ch == 'i':
if not self._ends("ic"): return
elif ch == 'l':
if not self._ends("able") and not self._ends("ible"): return
elif ch == 'n':
if self._ends("ant"): pass
elif self._ends("ement"): pass
elif self._ends("ment"): pass
elif self._ends("ent"): pass
else: return
elif ch == 'o':
if self._ends("ion") and self.b[self.j] in "st": pass
elif self._ends("ou"): pass
# takes care of -ous
else: return
elif ch == 's':
if not self._ends("ism"): return
elif ch == 't':
if not self._ends("ate") and not self._ends("iti"): return
elif ch == 'u':
if not self._ends("ous"): return
elif ch == 'v':
if not self._ends("ive"): return
elif ch == 'z':
if not self._ends("ize"): return
else:
return
if self._m() > 1:
self.k = self.j
def _step5(self):
"""Remove a final -e if _m() > 1, and change -ll to -l if m() > 1.
"""
k = self.j = self.k
if self.b[k] == 'e':
a = self._m()
if a > 1 or (a == 1 and not self._cvc(k - 1)):
self.k -= 1
if self.b[self.k] == 'l' and self._doublec(self.k) and self._m() > 1:
self.k -= 1
def stem(self, w):
"""Stem the word w, return the stemmed form."""
w = w.lower()
k = len(w) - 1
if k <= 1:
return w # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.b = w
self.k = k
self._step1ab()
self._step1c()
self._step2()
self._step3()
self._step4()
self._step5()
return self.b[:self.k+1]
def stem_sentence(self, txt):
return " ".join(map(self.stem, txt.split()))
def stem_documents(self, docs):
return map(self.stem_sentence, docs)
if __name__ == '__main__':
import sys
p = PorterStemmer()
for f in sys.argv[1:]:
with open(f) as infile:
for line in infile:
print(p.stem_sentence(line))
|
voidcc/PCTRL
|
refs/heads/master
|
pox/proto/__init__.py
|
44
|
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Components and utilities for working with various protocols.
This package contains stuff for working with protocols. In general, this
does not include stuff for actually working with the wire formats of protocols
(this is handled by pox.lib.packet). However, there are helpers for working
with protocols here, as well as POX implementations of some protocols (e.g.,
DHCP).
"""
|
soulmachine/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/kde.py
|
4
|
"""
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..utils import check_array, check_random_state
from ..utils.extmath import row_norms
from .ball_tree import BallTree, DTYPE
from .kd_tree import KDTree
VALID_KERNELS = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',
'cosine']
TREE_DICT = {'ball_tree': BallTree, 'kd_tree': KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: bandwidth estimation
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation
Parameters
----------
bandwidth : float
The bandwidth of the kernel.
algorithm : string
The tree algorithm to use. Valid options are
['kd_tree'|'ball_tree'|'auto']. Default is 'auto'.
kernel : string
The kernel to use. Valid kernels are
['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Default is 'gaussian'.
metric : string
The distance metric to use. Note that not all metrics are
valid with all algorithms. Refer to the documentation of
:class:`BallTree` and :class:`KDTree` for a description of
available algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
atol : float
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 0.
rtol : float
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 1E-8.
breadth_first : boolean
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details. Default is 40.
metric_params : dict
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
"""
def __init__(self, bandwidth=1.0, algorithm='auto',
kernel='gaussian', metric="euclidean", atol=0, rtol=0,
breadth_first=True, leaf_size=40, metric_params=None):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
# run the choose algorithm code so that exceptions will happen here
# we're using clone() in the GenerativeBayes classifier,
# so we can't do this kind of logic in __init__
self._choose_algorithm(self.algorithm, self.metric)
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == 'auto':
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return 'kd_tree'
elif metric in BallTree.valid_metrics:
return 'ball_tree'
else:
raise ValueError("invalid metric: '{0}'".format(metric))
elif algorithm in TREE_DICT:
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError("invalid metric for {0}: "
"'{1}'".format(TREE_DICT[algorithm],
metric))
return algorithm
else:
raise ValueError("invalid algorithm: '{0}'".format(algorithm))
def fit(self, X):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
X = check_array(X, order='C', dtype=DTYPE)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](X, metric=self.metric,
leaf_size=self.leaf_size,
**kwargs)
return self
def score_samples(self, X):
"""Evaluate the density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray
The array of log(density) evaluations. This has shape X.shape[:-1]
"""
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = check_array(X, order='C', dtype=DTYPE)
N = self.tree_.data.shape[0]
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X, h=self.bandwidth, kernel=self.kernel, atol=atol_N,
rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)
log_density -= np.log(N)
return log_density
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
random_state : RandomState or an int seed (0 by default)
A random number generator instance.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples.
"""
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ['gaussian', 'tophat']:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
i = rng.randint(data.shape[0], size=n_samples)
if self.kernel == 'gaussian':
return np.atleast_2d(rng.normal(data[i], self.bandwidth))
elif self.kernel == 'tophat':
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (gammainc(0.5 * dim, 0.5 * s_sq) ** (1. / dim)
* self.bandwidth / np.sqrt(s_sq))
return data[i] + X * correction[:, np.newaxis]
|
CognitionGuidedSurgery/msml
|
refs/heads/release_cand_1.0
|
src/msml/model/alphabet/operator/python.py
|
1
|
from .base import *
__author__ = 'Alexander Weigl'
from .... import log
from ....log import error
from ...sequence import executeOperatorSequence
class PythonOperator(Operator):
"""Operator for Python functions.
"""
def __init__(self, name, input=None, output=None, parameters=None, runtime=None, meta=None, settings=None):
"""
:param runtime: should include the key: "function" and "module"
.. seealso: :py:meth:`Operator.__init__`
"""
Operator.__init__(self, name, input, output, parameters, runtime, meta, settings)
self.function_name = runtime['function']
"""name of the pyhton function"""
self.modul_name = runtime['module']
"""the name of the python module"""
self._function = None
"""the found and bind python function"""
def _check_function(self):
pass
def __str__(self):
return "<PythonOperator: %s.%s>" % (self.modul_name, self.function_name)
def __call__(self, **kwargs):
if not self._function:
self.bind_function()
# bad for c++ modules, because of loss of signature
# r = self.__function(**kwargs)
#replace empty values with defaults from operators xml description (by getting all defaults and overwrite with given user values)
kwargsUpdated = self.get_default_args()
kwargsUpdated.update(kwargs)
args = [kwargsUpdated.get(x, None) for x in self.acceptable_names()]
orderedKwArgs = OrderedDict(zip(self.acceptable_names(), args))
log.debug("Parameter: %s" % self.acceptable_names() )
log.debug("Args: %s" % args)
count = sum('*' in str(arg) for arg in kwargsUpdated.values())
if count == 2:
r = executeOperatorSequence(self, orderedKwArgs, self.settings.get('seq_parallel', True))
else:
r = self._function(*args)
if len(self.output) == 0:
results = None
elif len(self.output) == 1:
results = {self.output_names()[0]: r}
else:
results = dict(zip(self.output_names(), r))
return results
def bind_function(self):
"""Search and bind the python function. Have to be called before `__call__`"""
import importlib
try:
mod = importlib.import_module(self.modul_name)
self._function = getattr(mod, self.function_name)
return self._function
except ImportError as e:
#TODO print stack traces
error("%s.%s is not available (module not found), got exception message '%s'" % (self.modul_name, self.function_name, e.message))
except AttributeError as e:
error("%s.%s is not available (function/attribute not found), got exception message '%s" % (self.modul_name, self.function_name, e.message))
def validate(self):
return self.bind_function() is not None
|
diegosarmentero/mydownloads
|
refs/heads/master
|
mydownloads_web/mydownloads_web/settings.py
|
1
|
# Django settings for mydownloads_web project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'p%mcqnb+$pm$og69+-e)^ua595n$*6h13wo@(+@yu!clw_12-s'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mydownloads_web.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mydownloads_web.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'mydownloads_web',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from local_settings import *
except ImportError:
pass
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/fromNamespacePackageImportModule/p1/m1.py
|
819
|
def foo():
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.