repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
minlex/django-socialregistration | refs/heads/master | socialregistration/contrib/linkedin/templatetags/__init__.py | 12133432 | |
dynaryu/rmtk | refs/heads/master | rmtk/plotting/collapse_maps/__init__.py | 67 | # -*- coding: utf-8 -*-
#
# LICENSE
#
# Copyright © 2014-2015, GEM Foundation, Chiara Casotto, Anirudh Rao,
# Vitor Silva.
#
# The Risk Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Risk Modeller's Toolkit (rmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the risk scientific staff of the GEM Model Facility
# (risk@globalquakemodel.org).
#
# The Risk Modeller's Toolkit (rmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
|
alex/changes | refs/heads/master | changes/utils/agg.py | 1 | def safe_agg(func, sequence, default=None):
m = None
for item in sequence:
if item is None:
continue
elif m is None:
m = item
else:
m = func(m, item)
if m is None:
m = default
return m
|
StackStorm/mistral | refs/heads/master | mistral/api/app.py | 1 | # Copyright 2013 - Mirantis, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import osprofiler.web
import pecan
from mistral.api import access_control
from mistral import config as m_config
from mistral import context as ctx
from mistral.db.v2 import api as db_api_v2
from mistral.rpc import base as rpc
from mistral.service import coordination
from mistral.services import periodic
def get_pecan_config():
# Set up the pecan configuration.
opts = cfg.CONF.pecan
cfg_dict = {
"app": {
"root": opts.root,
"modules": opts.modules,
"debug": opts.debug,
"auth_enable": opts.auth_enable
}
}
return pecan.configuration.conf_from_dict(cfg_dict)
def setup_app(config=None):
if not config:
config = get_pecan_config()
m_config.set_config_defaults()
app_conf = dict(config.app)
db_api_v2.setup_db()
# TODO(rakhmerov): Why do we run cron triggers in the API layer?
# Should we move it to engine?s
if cfg.CONF.cron_trigger.enabled:
periodic.setup()
coordination.Service('api_group').register_membership()
app = pecan.make_app(
app_conf.pop('root'),
hooks=lambda: [ctx.AuthHook(), ctx.ContextHook()],
logging=getattr(config, 'logging', {}),
**app_conf
)
# Set up access control.
app = access_control.setup(app)
# TODO(rakhmerov): need to get rid of this call.
# Set up RPC related flags in config
rpc.get_transport()
# Set up profiler.
if cfg.CONF.profiler.enabled:
app = osprofiler.web.WsgiMiddleware(
app,
hmac_keys=cfg.CONF.profiler.hmac_keys,
enabled=cfg.CONF.profiler.enabled
)
# Create a CORS wrapper, and attach mistral-specific defaults that must be
# included in all CORS responses.
return cors_middleware.CORS(app, cfg.CONF)
def init_wsgi():
# By default, oslo.config parses the CLI args if no args is provided.
# As a result, invoking this wsgi script from gunicorn leads to the error
# with argparse complaining that the CLI options have already been parsed.
m_config.parse_args(args=[])
return setup_app()
|
saurabh6790/omnisys-lib | refs/heads/master | webnotes/widgets/page_body.py | 65 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
redirect_template = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>%s</title>
<meta http-equiv="REFRESH" content="0; url=%s"></HEAD>
<BODY style="font-family: Arial; padding: 8px; font-size: 14px; margin: 0px;">
Redirecting...
</BODY>
</HTML>''' |
janusnic/django-allauth | refs/heads/master | allauth/socialaccount/providers/xing/provider.py | 68 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
class XingAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('permalink')
def get_avatar_url(self):
return self.account.extra_data.get(
'photo_urls', {}).get('large')
def to_str(self):
dflt = super(XingAccount, self).to_str()
first_name = self.account.extra_data.get('first_name', '')
last_name = self.account.extra_data.get('last_name', '')
name = ' '.join([first_name, last_name]).strip()
return name or dflt
class XingProvider(OAuthProvider):
id = 'xing'
name = 'Xing'
package = 'allauth.socialaccount.providers.xing'
account_class = XingAccount
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(email=data.get('active_email'),
username=data.get('page_name'),
first_name=data.get('first_name'),
last_name=data.get('last_name'))
providers.registry.register(XingProvider)
|
Jannes123/django-oscar | refs/heads/master | src/oscar/apps/basket/__init__.py | 59 | default_app_config = 'oscar.apps.basket.config.BasketConfig'
|
apixandru/intellij-community | refs/heads/master | python/testData/refactoring/move/function/before/src/lib1.py | 166 | def urlopen(x):
return file(x) |
divya-csekar/flask-microblog-server | refs/heads/master | flask/Lib/site-packages/werkzeug/debug/__init__.py | 280 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
|
DazWorrall/ansible-modules-extras | refs/heads/devel | monitoring/logentries.py | 153 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Ivan Vanderbyl <ivan@app.io>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: logentries
author: "Ivan Vanderbyl (@ivanvanderbyl)"
short_description: Module for tracking logs via logentries.com
description:
- Sends logs to LogEntries in realtime
version_added: "1.6"
options:
path:
description:
- path to a log file
required: true
state:
description:
- following state of the log
choices: [ 'present', 'absent' ]
required: false
default: present
name:
description:
- name of the log
required: false
logtype:
description:
- type of the log
required: false
notes:
- Requires the LogEntries agent which can be installed following the instructions at logentries.com
'''
EXAMPLES = '''
- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log
- logentries: path=/var/log/nginx/error.log state=absent
'''
def query_log_status(module, le_path, path, state="present"):
""" Returns whether a log is followed or not. """
if state == "present":
rc, out, err = module.run_command("%s followed %s" % (le_path, path))
if rc == 0:
return True
return False
def follow_log(module, le_path, logs, name=None, logtype=None):
""" Follows one or more logs if not already followed. """
followed_count = 0
for log in logs:
if query_log_status(module, le_path, log):
continue
if module.check_mode:
module.exit_json(changed=True)
cmd = [le_path, 'follow', log]
if name:
cmd.extend(['--name',name])
if logtype:
cmd.extend(['--type',logtype])
rc, out, err = module.run_command(' '.join(cmd))
if not query_log_status(module, le_path, log):
module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
followed_count += 1
if followed_count > 0:
module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
module.exit_json(changed=False, msg="logs(s) already followed")
def unfollow_log(module, le_path, logs):
""" Unfollows one or more logs if followed. """
removed_count = 0
# Using a for loop incase of error, we can report the package that failed
for log in logs:
# Query the log first, to see if we even need to remove.
if not query_log_status(module, le_path, log):
continue
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([le_path, 'rm', log])
if query_log_status(module, le_path, log):
module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
removed_count += 1
if removed_count > 0:
module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
module.exit_json(changed=False, msg="logs(s) already unfollowed")
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True),
state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
name = dict(required=False, default=None, type='str'),
logtype = dict(required=False, default=None, type='str', aliases=['type'])
),
supports_check_mode=True
)
le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
p = module.params
# Handle multiple log files
logs = p["path"].split(",")
logs = filter(None, logs)
if p["state"] in ["present", "followed"]:
follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
elif p["state"] in ["absent", "unfollowed"]:
unfollow_log(module, le_path, logs)
# import module snippets
from ansible.module_utils.basic import *
main()
|
kho0810/flaskr | refs/heads/master | migrations/versions/16776b812c23_.py | 1 | """empty message
Revision ID: 16776b812c23
Revises: 3f055fdcbbd0
Create Date: 2014-08-08 02:16:23.559345
"""
# revision identifiers, used by Alembic.
revision = '16776b812c23'
down_revision = '3f055fdcbbd0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('article',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('author', sa.String(length=255), nullable=True),
sa.Column('category', sa.String(length=255), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('article_id', sa.Integer(), nullable=True),
sa.Column('author', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('like', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['article_id'], ['article.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
op.drop_table('article')
### end Alembic commands ###
|
rggibson/pb-tracker | refs/heads/master | pytz/__init__.py | 52 | '''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The Olson database is updated several times a year.
OLSON_VERSION = '2010h'
VERSION = OLSON_VERSION
# Version format for a patch release - only one so far.
#VERSION = OLSON_VERSION + '.2'
__version__ = OLSON_VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
'loader',
]
import sys, datetime, os.path, gettext
from UserDict import DictMixin
from UserList import UserList
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
from tzinfo import AmbiguousTimeError, InvalidTimeError, NonExistentTimeError
from tzinfo import unpickler
from tzfile import build_tzinfo
# Use 2.3 sets module implementation if set builtin is not available
try:
set
except NameError:
from sets import Set as set
class TimezoneLoader(object):
def __init__(self):
self.available = {}
def open_resource(self, name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename) and resource_stream is not None:
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(self, name):
"""Return true if the given resource exists"""
if name not in self.available:
try:
self.open_resource(name)
self.available[name] = True
except IOError:
self.available[name] = False
return self.available[name]
loader = TimezoneLoader()
def open_resource(name):
return loader.open_resource(name)
def resource_exists(name):
return loader.resource_exists(name)
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\u2122'
'''
if zone.upper() == 'UTC':
return utc
try:
zone = zone.encode('US-ASCII')
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if resource_exists(zone):
_tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone))
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Identical to the reference UTC implementation given in Python docs except
that it unpickles using the single module global instance defined beneath
this class declaration.
Also contains extra attributes and methods to match other pytz tzinfo
instances.
"""
zone = "UTC"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
return dt.replace(tzinfo=self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p), len(naive_p), len(p) - len(naive_p)
(60, 43, 17)
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
self._fill()
return self.data[key.upper()]
def keys(self):
if self.data is None:
self._fill()
return self.data.keys()
class _LazyList(UserList):
def __init__(self, func):
self._data = None
self._build = func
def data(self):
if self._data is None:
self._data = self._build()
return self._data
data = property(data)
class _CountryTimezoneDict(_LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> country_timezones['ch']
['Europe/Zurich']
>>> country_timezones['CH']
['Europe/Zurich']
>>> country_timezones[u'ch']
['Europe/Zurich']
>>> country_timezones['XXX']
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> country_timezones('nz')
['Pacific/Auckland', 'Pacific/Chatham']
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
for line in zone_tab:
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if not resource_exists(zone):
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(_LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> country_names['au']
'Australia'
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
for line in zone_tab.readlines():
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return None
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError, 'Not naive datetime (tzinfo is already set)'
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is None:
raise ValueError, 'Naive time - no tzinfo set'
return dt.replace(tzinfo=self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones_unfiltered = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = _LazyList(
lambda: filter(resource_exists, all_timezones_unfiltered)
)
all_timezones_set = set(all_timezones_unfiltered) # XXX
common_timezones_unfiltered = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Martinique',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Vostok',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Helsinki',
'Europe/Istanbul',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Lisbon',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/Simferopol',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Chatham',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = _LazyList(
lambda: filter(resource_exists, common_timezones_unfiltered)
)
common_timezones_set = set(common_timezones_unfiltered) # XXX
|
deejross/python3-pywbem | refs/heads/master | twisted_client.py | 1 | #
# (C) Copyright 2005,2007 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Author: Tim Potter <tpot@hp.com>
"""pywbem.twisted - WBEM client bindings for Twisted Python.
This module contains factory classes that produce WBEMClient instances
that perform WBEM requests over HTTP using the
twisted.protocols.http.HTTPClient base class.
"""
import base64
import six
from datetime import datetime, timedelta
import urllib
try:
from elementtree.ElementTree import fromstring, tostring
except ImportError as arg:
from xml.etree.ElementTree import fromstring, tostring
from twisted.internet import reactor, protocol, defer
from twisted.web import http #, client, error
# TODO: Eww - we should get rid of the tupletree, tupleparse modules
# and replace with elementtree based code.
from . import cim_types, cim_xml, cim_obj, tupleparse, tupletree
from .cim_obj import CIMClass, CIMClassName, CIMInstance, CIMInstanceName, tocimxml
from .cim_operations import CIMError, DEFAULT_NAMESPACE
from .cim_types import CIMDateTime
from .cim_xml import IPARAMVALUE
class WBEMClient(http.HTTPClient):
"""A HTTPClient subclass that handles WBEM requests."""
status = None
def connectionMade(self):
"""Send a HTTP POST command with the appropriate CIM over HTTP
headers and payload."""
self.factory.request_xml = str(self.factory.payload)
self.sendCommand('POST', '/cimom')
self.sendHeader('Host', '%s:%d' %
(self.transport.addr[0], self.transport.addr[1]))
self.sendHeader('User-Agent', 'pywbem/twisted')
self.sendHeader('Content-length', len(self.factory.payload))
self.sendHeader('Content-type', 'application/xml')
if self.factory.creds:
auth = base64.b64encode('%s:%s' % (self.factory.creds[0],
self.factory.creds[1]))
self.sendHeader('Authorization', 'Basic %s' % auth)
self.sendHeader('CIMOperation', str(self.factory.operation))
self.sendHeader('CIMMethod', str(self.factory.method))
self.sendHeader('CIMObject', str(self.factory.object))
self.endHeaders()
# TODO: Figure out why twisted doesn't support unicode. An
# exception should be thrown by the str() call if the payload
# can't be converted to the current codepage.
self.transport.write(str(self.factory.payload))
def handleResponse(self, data):
"""Called when all response data has been received."""
self.factory.response_xml = data
if self.status == '200':
self.factory.parseErrorAndResponse(data)
self.factory.deferred = None
self.transport.loseConnection()
def handleStatus(self, version, status, message):
"""Save the status code for processing when we get to the end
of the headers."""
self.status = status
self.message = message
def handleHeader(self, key, value):
"""Handle header values."""
if key == 'CIMError':
self.CIMError = urllib.unquote(value)
if key == 'PGErrorDetail':
self.PGErrorDetail = urllib.unquote(value)
def handleEndHeaders(self):
"""Check whether the status was OK and raise an error if not
using previously saved header information."""
if self.status != '200':
if not hasattr(self, 'cimerror') or \
not hasattr(self, 'errordetail'):
self.factory.deferred.errback(
CIMError(0, 'HTTP error %s: %s' %
(self.status, self.message)))
else:
self.factory.deferred.errback(
CIMError(0, '%s: %s' % (cimerror, errordetail)))
class WBEMClientFactory(protocol.ClientFactory):
"""Create instances of the WBEMClient class."""
request_xml = None
response_xml = None
xml_header = '<?xml version="1.0" encoding="utf-8" ?>'
def __init__(self, creds, operation, method, object, payload):
self.creds = creds
self.operation = operation
self.method = method
self.object = object
self.payload = payload
self.protocol = lambda: WBEMClient()
self.deferred = defer.Deferred()
def clientConnectionFailed(self, connector, reason):
if self.deferred is not None:
reactor.callLater(0, self.deferred.errback, reason)
def clientConnectionLost(self, connector, reason):
if self.deferred is not None:
reactor.callLater(0, self.deferred.errback, reason)
def imethodcallPayload(self, methodname, localnsp, **kwargs):
"""Generate the XML payload for an intrinsic methodcall."""
param_list = [IPARAMVALUE(x[0], tocimxml(x[1]))
for x in kwargs.items()]
payload = cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
methodname,
cim_xml.LOCALNAMESPACEPATH(
[cim_xml.NAMESPACE(ns)
for ns in localnsp.split('/')]),
param_list)),
'1001', '1.0'),
'2.0', '2.0')
return self.xml_header + payload.toxml()
def methodcallPayload(self, methodname, obj, namespace, **kwargs):
"""Generate the XML payload for an extrinsic methodcall."""
if isinstance(obj, CIMInstanceName):
path = obj.copy()
path.host = None
path.namespace = None
localpath = cim_xml.LOCALINSTANCEPATH(
cim_xml.LOCALNAMESPACEPATH(
[cim_xml.NAMESPACE(ns)
for ns in namespace.split('/')]),
path.tocimxml())
else:
localpath = cim_xml.LOCALCLASSPATH(
cim_xml.LOCALNAMESPACEPATH(
[cim_xml.NAMESPACE(ns)
for ns in namespace.split('/')]),
obj)
def paramtype(obj):
"""Return a string to be used as the CIMTYPE for a parameter."""
if isinstance(obj, cim_types.CIMType):
return obj.cimtype
elif type(obj) == bool:
return 'boolean'
elif isinstance(obj, six.string_types):
return 'string'
elif isinstance(obj, (datetime, timedelta)):
return 'datetime'
elif isinstance(obj, (CIMClassName, CIMInstanceName)):
return 'reference'
elif isinstance(obj, (CIMClass, CIMInstance)):
return 'string'
elif isinstance(obj, list):
return paramtype(obj[0])
raise TypeError('Unsupported parameter type "%s"' % type(obj))
def paramvalue(obj):
"""Return a cim_xml node to be used as the value for a
parameter."""
if isinstance(obj, (datetime, timedelta)):
obj = CIMDateTime(obj)
if isinstance(obj, (cim_types.CIMType, bool) + six.string_types):
return cim_xml.VALUE(cim_types.atomic_to_cim_xml(obj))
if isinstance(obj, (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFERENCE(obj.tocimxml())
if isinstance(obj, (CIMClass, CIMInstance)):
return cim_xml.VALUE(obj.tocimxml().toxml())
if isinstance(obj, list):
if isinstance(obj[0], (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFARRAY([paramvalue(x) for x in obj])
return cim_xml.VALUE_ARRAY([paramvalue(x) for x in obj])
raise TypeError('Unsupported parameter type "%s"' % type(obj))
param_list = [cim_xml.PARAMVALUE(x[0],
paramvalue(x[1]),
paramtype(x[1]))
for x in kwargs.items()]
payload = cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.METHODCALL(methodname,
localpath,
param_list)),
'1001', '1.0'),
'2.0', '2.0')
return self.xml_header + payload.toxml()
def parseErrorAndResponse(self, data):
"""Parse returned XML for errors, then convert into
appropriate Python objects."""
xml = fromstring(data)
error = xml.find('.//ERROR')
if error is None:
self.deferred.callback(self.parseResponse(xml))
return
try:
code = int(error.attrib['CODE'])
except ValueError:
code = 0
self.deferred.errback(CIMError(code, error.attrib['DESCRIPTION']))
def parseResponse(self, xml):
"""Parse returned XML and convert into appropriate Python
objects. Override in subclass"""
pass
class EnumerateInstances(WBEMClientFactory):
"""Factory to produce EnumerateInstances WBEM clients."""
def __init__(self, creds, classname, namespace='root/cimv2', **kwargs):
self.classname = classname
self.namespace = namespace
payload = self.imethodcallPayload(
'EnumerateInstances',
namespace,
ClassName=CIMClassName(classname),
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='EnumerateInstances',
object=namespace,
payload=payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.classname, id(self))
def parseResponse(self, xml):
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//VALUE.NAMEDINSTANCE')]
return [tupleparse.parse_value_namedinstance(x) for x in tt]
class EnumerateInstanceNames(WBEMClientFactory):
"""Factory to produce EnumerateInstanceNames WBEM clients."""
def __init__(self, creds, classname, namespace='root/cimv2', **kwargs):
self.classname = classname
self.namespace = namespace
payload = self.imethodcallPayload(
'EnumerateInstanceNames',
namespace,
ClassName=CIMClassName(classname),
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='EnumerateInstanceNames',
object=namespace,
payload=payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.classname, id(self))
def parseResponse(self, xml):
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//INSTANCENAME')]
names = [tupleparse.parse_instancename(x) for x in tt]
[setattr(n, 'namespace', self.namespace) for n in names]
return names
class GetInstance(WBEMClientFactory):
"""Factory to produce GetInstance WBEM clients."""
def __init__(self, creds, instancename, namespace='root/cimv2', **kwargs):
self.instancename = instancename
self.namespace = namespace
payload = self.imethodcallPayload(
'GetInstance',
namespace,
InstanceName=instancename,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='GetInstance',
object=namespace,
payload=payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.instancename, id(self))
def parseResponse(self, xml):
tt = tupletree.xml_to_tupletree(
tostring(xml.find('.//INSTANCE')))
return tupleparse.parse_instance(tt)
class DeleteInstance(WBEMClientFactory):
"""Factory to produce DeleteInstance WBEM clients."""
def __init__(self, creds, instancename, namespace='root/cimv2', **kwargs):
self.instancename = instancename
self.namespace = namespace
payload = self.imethodcallPayload(
'DeleteInstance',
namespace,
InstanceName=instancename,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='DeleteInstance',
object=namespace,
payload=payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.instancename, id(self))
class CreateInstance(WBEMClientFactory):
"""Factory to produce CreateInstance WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, instance, namespace='root/cimv2', **kwargs):
payload = self.imethodcallPayload(
'CreateInstance',
namespace,
NewInstance=instance,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='CreateInstance',
object=namespace,
payload=payload)
def parseResponse(self, xml):
tt = tupletree.xml_to_tupletree(
tostring(xml.find('.//INSTANCENAME')))
return tupleparse.parse_instancename(tt)
class ModifyInstance(WBEMClientFactory):
"""Factory to produce ModifyInstance WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, instancename, instance, namespace='root/cimv2',
**kwargs):
wrapped_instance = CIMInstanceName(instancename, instance)
payload = self.imethodcallPayload(
'ModifyInstance',
namespace,
ModifiedInstance=wrapped_instance,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='ModifyInstance',
object=namespace,
payload=payload)
class EnumerateClassNames(WBEMClientFactory):
"""Factory to produce EnumerateClassNames WBEM clients."""
def __init__(self, creds, namespace='root/cimv2', **kwargs):
self.localnsp = DEFAULT_NAMESPACE
payload = self.imethodcallPayload(
'EnumerateClassNames',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='EnumerateClassNames',
object=DEFAULT_NAMESPACE,
payload=payload)
def __repr__(self):
return '<%s(/%s) at 0x%x>' % \
(self.__class__, self.namespace, id(self))
def parseResponse(self, xml):
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//CLASSNAME')]
return [tupleparse.parse_classname(x) for x in tt]
class EnumerateClasses(WBEMClientFactory):
"""Factory to produce EnumerateClasses WBEM clients."""
def __init__(self, creds, namespace='root/cimv2', **kwargs):
self.localnsp = DEFAULT_NAMESPACE
payload = self.imethodcallPayload(
'EnumerateClasses',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='EnumerateClasses',
object=namespace,
payload=payload)
def __repr__(self):
return '<%s(/%s) at 0x%x>' % \
(self.__class__, self.namespace, id(self))
def parseResponse(self, xml):
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//CLASS')]
return [tupleparse.parse_class(x) for x in tt]
class GetClass(WBEMClientFactory):
"""Factory to produce GetClass WBEM clients."""
def __init__(self, creds, classname, namespace='root/cimv2', **kwargs):
self.classname = classname
self.namespace = namespace
payload = self.imethodcallPayload(
'GetClass',
namespace,
ClassName=CIMClassName(classname),
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='GetClass',
object=namespace,
payload=payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.classname, id(self))
def parseResponse(self, xml):
tt = tupletree.xml_to_tupletree(
tostring(xml.find('.//CLASS')))
return tupleparse.parse_class(tt)
class Associators(WBEMClientFactory):
"""Factory to produce Associators WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, obj, namespace='root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'Associators',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='Associators',
object=namespace,
payload=payload)
class AssociatorNames(WBEMClientFactory):
"""Factory to produce AssociatorNames WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, obj, namespace='root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'AssociatorNames',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='AssociatorNames',
object=namespace,
payload=payload)
def parseResponse(self, xml):
if len(xml.findall('.//INSTANCENAME')) > 0:
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//INSTANCENAME')]
return [tupleparse.parse_instancename(x) for x in tt]
else:
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//OBJECTPATH')]
return [tupleparse.parse_objectpath(x)[2] for x in tt]
class References(WBEMClientFactory):
"""Factory to produce References WBEM clients."""
def __init__(self, creds, obj, namespace='root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'References',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='References',
object=namespace,
payload=payload)
class ReferenceNames(WBEMClientFactory):
"""Factory to produce ReferenceNames WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, obj, namespace='root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'ReferenceNames',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method='ReferenceNames',
object=namespace,
payload=payload)
def parseResponse(self, xml):
if len(xml.findall('.//INSTANCENAME')) > 0:
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//INSTANCENAME')]
return [tupleparse.parse_instancename(x) for x in tt]
else:
tt = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//OBJECTPATH')]
return [tupleparse.parse_objectpath(x)[2] for x in tt]
class InvokeMethod(WBEMClientFactory):
"""Factory to produce InvokeMethod WBEM clients."""
def __init__(self, creds, MethodName, ObjectName, namespace='root/cimv2',
**kwargs):
# Convert string to CIMClassName
obj = ObjectName
if isinstance(obj, six.string_types):
obj = CIMClassName(obj, namespace=namespace)
if isinstance(obj, CIMInstanceName) and obj.namespace is None:
obj = ObjectName.copy()
obj.namespace = namespace
# Make the method call
payload = self.methodcallPayload(
MethodName,
obj,
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation='MethodCall',
method=MethodName,
object=obj,
payload=payload)
def parseResponse(self, xml):
# Return value of method
result_xml = tupletree.xml_to_tupletree(
tostring(xml.find('.//RETURNVALUE')))
result_tt = tupleparse.parse_any(result_xml)
result = cim_obj.tocimobj(result_tt[1]['PARAMTYPE'],
result_tt[2])
# Output parameters
params_xml = [tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//PARAMVALUE')]
params_tt = [tupleparse.parse_any(x) for x in params_xml]
params = {}
for p in params_tt:
if p[1] == 'reference':
params[p[0]] = p[2]
else:
params[p[0]] = cim_obj.tocimobj(p[1], p[2])
return (result, params)
|
QGuLL/samba | refs/heads/master | python/samba/netcmd/sites.py | 40 | # sites management
#
# Copyright Matthieu Patou <mat@matws.net> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from samba import sites
from samba.samdb import SamDB
import samba.getopt as options
from samba.auth import system_session
from samba.netcmd import (
Command,
CommandError,
SuperCommand
)
class cmd_sites_create(Command):
"""Create a new site."""
synopsis = "%prog <site> [options]"
takes_args = ["sitename"]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
def run(self, sitename, sambaopts=None, credopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
url = lp.private_path("sam.ldb")
if not os.path.exists(url):
raise CommandError("secret database not found at %s " % url)
samdb = SamDB(url=url, session_info=system_session(),
credentials=creds, lp=lp)
samdb.transaction_start()
try:
ok = sites.create_site(samdb, samdb.get_config_basedn(), sitename)
samdb.transaction_commit()
except sites.SiteAlreadyExistsException, e:
samdb.transaction_cancel()
raise CommandError("Error while creating site %s, error: %s" % (sitename, str(e)))
self.outf.write("Site %s created !\n" % sitename)
class cmd_sites_delete(Command):
"""Delete an existing site."""
synopsis = "%prog <site> [options]"
takes_args = ["sitename"]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
def run(self, sitename, sambaopts=None, credopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
url = lp.private_path("sam.ldb")
if not os.path.exists(url):
raise CommandError("secret database not found at %s " % url)
samdb = SamDB(url=url, session_info=system_session(),
credentials=creds, lp=lp)
samdb.transaction_start()
try:
ok = sites.delete_site(samdb, samdb.get_config_basedn(), sitename)
samdb.transaction_commit()
except sites.SiteException, e:
samdb.transaction_cancel()
raise CommandError(
"Error while removing site %s, error: %s" % (sitename, str(e)))
self.outf.write("Site %s removed!\n" % sitename)
class cmd_sites(SuperCommand):
"""Sites management."""
subcommands = {}
subcommands["create"] = cmd_sites_create()
subcommands["remove"] = cmd_sites_delete()
|
zinic/rpyle | refs/heads/master | src/rpyle/main.py | 1 | import os
import sys
import time
import random
import hashlib
import rpyle.webservice
import rpyle.http_server
import rpyle.system_provider
import chuckbox.log as log
import multiprocessing as mp
import chuckbox.project as project
_LOG = log.get_logger(__name__)
def init():
log.get_log_manager().configure({
'level': 'DEBUG',
'console_enabled': True})
about= project.about(__package__)
_LOG.info('About: {}'.format(about.version))
def start_webapp(randserve):
app = rpyle.webservice.new_app(randserve)
rpyle.http_server.serve(app)
rpyle_server_comm, webserver_comm= mp.Pipe()
webapp_proc = mp.Process(
name='ryple_webserver',
target=start_webapp,
args=(webserver_comm,))
webapp_proc.start()
hasher = hashlib.new('sha1')
rdata_pool = list()
rdata_sources = [rpyle.system_provider.ProcessProvider([
'arecord',
'-f',
'dat',
'-B',
'1024'
]),
rpyle.system_provider.MemoryStatisticsProvider(),
rpyle.system_provider.CPUStatisticsProvider()
]
for source in rdata_sources:
source.start()
timeout = 0
default_response = tuple()
last_display_time = time.time()
while True:
entropy_amount = len(rdata_pool)
if entropy_amount < 262144:
had_ready_sources = False
for source in rdata_sources:
if source.ready():
had_ready_sources = True
rdata = source.read(64)
rdata_pool.extend(rdata)
if had_ready_sources:
timeout = 0
else:
timeout = 0.1
else:
timeout = 1
now = time.time()
if now - last_display_time > 7.5:
last_display_time = now
_LOG.info('Gathered {} bytes of entropy.'.format(entropy_amount))
if rpyle_server_comm.poll(timeout):
request = rpyle_server_comm.recv()
response = default_response
if isinstance(request, list) and len(request) > 0:
command = request[0]
if command == 'get':
if len(rdata_pool) > 128:
response = rdata_pool[:128]
rdata_pool = rdata_pool[128:]
rpyle_server_comm.send(response)
|
jasonwzhy/django | refs/heads/master | tests/migrations/test_migrations_squashed_complex/2_auto.py | 770 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("migrations", "1_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
sysalexis/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/ctypes/test/test_incomplete.py | 170 | import unittest
from ctypes import *
################################################################
#
# The incomplete pointer example from the tutorial
#
class MyTestCase(unittest.TestCase):
def test_incomplete_example(self):
lpcell = POINTER("cell")
class cell(Structure):
_fields_ = [("name", c_char_p),
("next", lpcell)]
SetPointerType(lpcell, cell)
c1 = cell()
c1.name = b"foo"
c2 = cell()
c2.name = b"bar"
c1.next = pointer(c2)
c2.next = pointer(c1)
p = c1
result = []
for i in range(8):
result.append(p.name)
p = p.next[0]
self.assertEqual(result, [b"foo", b"bar"] * 4)
# to not leak references, we must clean _pointer_type_cache
from ctypes import _pointer_type_cache
del _pointer_type_cache[cell]
################################################################
if __name__ == '__main__':
unittest.main()
|
dahlstrom-g/intellij-community | refs/heads/master | python/testData/refactoring/convertPackageToModule/simple/after/b.py | 12133432 | |
hdinsight/hue | refs/heads/master | apps/useradmin/src/useradmin/migrations/__init__.py | 12133432 | |
valfirst/selenium | refs/heads/master | py/selenium/webdriver/common/by.py | 61 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The By implementation.
"""
class By(object):
"""
Set of supported locator strategies.
"""
ID = "id"
XPATH = "xpath"
LINK_TEXT = "link text"
PARTIAL_LINK_TEXT = "partial link text"
NAME = "name"
TAG_NAME = "tag name"
CLASS_NAME = "class name"
CSS_SELECTOR = "css selector"
|
Yarrick13/hwasp | refs/heads/master | tests/wasp1/AllAnswerSets/grounding_backjump_14.test.py | 3 | input = """
"""
output = """
{a(2), a(3), b(1,1), p(1), p(2), q(1), q(2), t(1,1,2), t(1,2,3)}
"""
|
Gaia3D/QGIS | refs/heads/master | python/console/console.py | 5 | # -*- coding:utf-8 -*-
"""
/***************************************************************************
Python Console for QGIS
-------------------
begin : 2012-09-10
copyright : (C) 2012 by Salvatore Larosa
email : lrssvtml (at) gmail (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Some portions of code were taken from https://code.google.com/p/pydee/
"""
from PyQt4.QtCore import Qt, QTimer, QSettings, QCoreApplication, QSize, QByteArray, QFileInfo, SIGNAL
from PyQt4.QtGui import QDockWidget, QToolBar, QToolButton, QWidget, QSplitter, QTreeWidget, QAction, QFileDialog, QCheckBox, QSizePolicy, QMenu, QGridLayout, QApplication
from PyQt4 import pyqtconfig
from qgis.utils import iface
from console_sci import ShellScintilla
from console_output import ShellOutputScintilla
from console_editor import EditorTabWidget
from console_settings import optionsDialog
from qgis.core import QgsApplication, QgsContextHelp
from qgis.gui import QgsFilterLineEdit
import sys
_console = None
def show_console():
""" called from QGIS to open the console """
global _console
if _console is None:
parent = iface.mainWindow() if iface else None
_console = PythonConsole( parent )
_console.show() # force show even if it was restored as hidden
# set focus to the console so the user can start typing
# defer the set focus event so it works also whether the console not visible yet
QTimer.singleShot(0, _console.activate)
else:
_console.setVisible(not _console.isVisible())
# set focus to the console so the user can start typing
if _console.isVisible():
_console.activate()
## Shows help on first launch of the console
settings = QSettings()
if settings.value('pythonConsole/contextHelpOnFirstLaunch', True, type=bool):
QgsContextHelp.run( "PythonConsole" )
settings.setValue('pythonConsole/contextHelpOnFirstLaunch', False)
_old_stdout = sys.stdout
_console_output = None
# hook for python console so all output will be redirected
# and then shown in console
def console_displayhook(obj):
global _console_output
_console_output = obj
class PythonConsole(QDockWidget):
def __init__(self, parent=None):
QDockWidget.__init__(self, parent)
self.setObjectName("PythonConsole")
self.setWindowTitle(QCoreApplication.translate("PythonConsole", "Python Console"))
#self.setAllowedAreas(Qt.BottomDockWidgetArea)
self.console = PythonConsoleWidget(self)
self.setWidget( self.console )
self.setFocusProxy( self.console )
# try to restore position from stored main window state
if iface and not iface.mainWindow().restoreDockWidget(self):
iface.mainWindow().addDockWidget(Qt.BottomDockWidgetArea, self)
def activate(self):
self.activateWindow()
self.raise_()
QDockWidget.setFocus(self)
def closeEvent(self, event):
self.console.saveSettingsConsole()
QWidget.closeEvent(self, event)
class PythonConsoleWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setWindowTitle(QCoreApplication.translate("PythonConsole", "Python Console"))
self.settings = QSettings()
self.shell = ShellScintilla(self)
self.setFocusProxy(self.shell)
self.shellOut = ShellOutputScintilla(self)
self.tabEditorWidget = EditorTabWidget(self)
##------------ UI -------------------------------
self.splitterEditor = QSplitter(self)
self.splitterEditor.setOrientation(Qt.Horizontal)
self.splitterEditor.setHandleWidth(6)
self.splitterEditor.setChildrenCollapsible(True)
self.splitter = QSplitter(self.splitterEditor)
self.splitter.setOrientation(Qt.Vertical)
self.splitter.setHandleWidth(3)
self.splitter.setChildrenCollapsible(False)
self.splitter.addWidget(self.shellOut)
self.splitter.addWidget(self.shell)
#self.splitterEditor.addWidget(self.tabEditorWidget)
self.splitterObj = QSplitter(self.splitterEditor)
self.splitterObj.setHandleWidth(3)
self.splitterObj.setOrientation(Qt.Horizontal)
#self.splitterObj.setSizes([0, 0])
#self.splitterObj.setStretchFactor(0, 1)
self.widgetEditor = QWidget(self.splitterObj)
self.widgetFind = QWidget(self)
self.listClassMethod = QTreeWidget(self.splitterObj)
self.listClassMethod.setColumnCount(2)
objInspLabel = QCoreApplication.translate("PythonConsole", "Object Inspector")
self.listClassMethod.setHeaderLabels([objInspLabel, ''])
self.listClassMethod.setColumnHidden(1, True)
self.listClassMethod.setAlternatingRowColors(True)
#self.splitterEditor.addWidget(self.widgetEditor)
#self.splitterObj.addWidget(self.listClassMethod)
#self.splitterObj.addWidget(self.widgetEditor)
# Hide side editor on start up
self.splitterObj.hide()
self.listClassMethod.hide()
# Hide search widget on start up
self.widgetFind.hide()
sizes = self.splitter.sizes()
self.splitter.setSizes(sizes)
##----------------Restore Settings------------------------------------
self.restoreSettingsConsole()
##------------------Toolbar Editor-------------------------------------
## Action for Open File
openFileBt = QCoreApplication.translate("PythonConsole", "Open file")
self.openFileButton = QAction(self)
self.openFileButton.setCheckable(False)
self.openFileButton.setEnabled(True)
self.openFileButton.setIcon(QgsApplication.getThemeIcon("console/iconOpenConsole.png"))
self.openFileButton.setMenuRole(QAction.PreferencesRole)
self.openFileButton.setIconVisibleInMenu(True)
self.openFileButton.setToolTip(openFileBt)
self.openFileButton.setText(openFileBt)
## Action for Save File
saveFileBt = QCoreApplication.translate("PythonConsole", "Save")
self.saveFileButton = QAction(self)
self.saveFileButton.setCheckable(False)
self.saveFileButton.setEnabled(False)
self.saveFileButton.setIcon(QgsApplication.getThemeIcon("console/iconSaveConsole.png"))
self.saveFileButton.setMenuRole(QAction.PreferencesRole)
self.saveFileButton.setIconVisibleInMenu(True)
self.saveFileButton.setToolTip(saveFileBt)
self.saveFileButton.setText(saveFileBt)
## Action for Save File As
saveAsFileBt = QCoreApplication.translate("PythonConsole", "Save As...")
self.saveAsFileButton = QAction(self)
self.saveAsFileButton.setCheckable(False)
self.saveAsFileButton.setEnabled(True)
self.saveAsFileButton.setIcon(QgsApplication.getThemeIcon("console/iconSaveAsConsole.png"))
self.saveAsFileButton.setMenuRole(QAction.PreferencesRole)
self.saveAsFileButton.setIconVisibleInMenu(True)
self.saveAsFileButton.setToolTip(saveAsFileBt)
self.saveAsFileButton.setText(saveAsFileBt)
## Action Cut
cutEditorBt = QCoreApplication.translate("PythonConsole", "Cut")
self.cutEditorButton = QAction(self)
self.cutEditorButton.setCheckable(False)
self.cutEditorButton.setEnabled(True)
self.cutEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconCutEditorConsole.png"))
self.cutEditorButton.setMenuRole(QAction.PreferencesRole)
self.cutEditorButton.setIconVisibleInMenu(True)
self.cutEditorButton.setToolTip(cutEditorBt)
self.cutEditorButton.setText(cutEditorBt)
## Action Copy
copyEditorBt = QCoreApplication.translate("PythonConsole", "Copy")
self.copyEditorButton = QAction(self)
self.copyEditorButton.setCheckable(False)
self.copyEditorButton.setEnabled(True)
self.copyEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconCopyEditorConsole.png"))
self.copyEditorButton.setMenuRole(QAction.PreferencesRole)
self.copyEditorButton.setIconVisibleInMenu(True)
self.copyEditorButton.setToolTip(copyEditorBt)
self.copyEditorButton.setText(copyEditorBt)
## Action Paste
pasteEditorBt = QCoreApplication.translate("PythonConsole", "Paste")
self.pasteEditorButton = QAction(self)
self.pasteEditorButton.setCheckable(False)
self.pasteEditorButton.setEnabled(True)
self.pasteEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconPasteEditorConsole.png"))
self.pasteEditorButton.setMenuRole(QAction.PreferencesRole)
self.pasteEditorButton.setIconVisibleInMenu(True)
self.pasteEditorButton.setToolTip(pasteEditorBt)
self.pasteEditorButton.setText(pasteEditorBt)
## Action Run Script (subprocess)
runScriptEditorBt = QCoreApplication.translate("PythonConsole", "Run script")
self.runScriptEditorButton = QAction(self)
self.runScriptEditorButton.setCheckable(False)
self.runScriptEditorButton.setEnabled(True)
self.runScriptEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconRunScriptConsole.png"))
self.runScriptEditorButton.setMenuRole(QAction.PreferencesRole)
self.runScriptEditorButton.setIconVisibleInMenu(True)
self.runScriptEditorButton.setToolTip(runScriptEditorBt)
self.runScriptEditorButton.setText(runScriptEditorBt)
## Action Run Script (subprocess)
commentEditorBt = QCoreApplication.translate("PythonConsole", "Comment")
self.commentEditorButton = QAction(self)
self.commentEditorButton.setCheckable(False)
self.commentEditorButton.setEnabled(True)
self.commentEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconCommentEditorConsole.png"))
self.commentEditorButton.setMenuRole(QAction.PreferencesRole)
self.commentEditorButton.setIconVisibleInMenu(True)
self.commentEditorButton.setToolTip(commentEditorBt)
self.commentEditorButton.setText(commentEditorBt)
## Action Run Script (subprocess)
uncommentEditorBt = QCoreApplication.translate("PythonConsole", "Uncomment")
self.uncommentEditorButton = QAction(self)
self.uncommentEditorButton.setCheckable(False)
self.uncommentEditorButton.setEnabled(True)
self.uncommentEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconUncommentEditorConsole.png"))
self.uncommentEditorButton.setMenuRole(QAction.PreferencesRole)
self.uncommentEditorButton.setIconVisibleInMenu(True)
self.uncommentEditorButton.setToolTip(uncommentEditorBt)
self.uncommentEditorButton.setText(uncommentEditorBt)
## Action for Object browser
objList = QCoreApplication.translate("PythonConsole", "Object Inspector")
self.objectListButton = QAction(self)
self.objectListButton.setCheckable(True)
self.objectListButton.setEnabled(self.settings.value("pythonConsole/enableObjectInsp",
False, type=bool))
self.objectListButton.setIcon(QgsApplication.getThemeIcon("console/iconClassBrowserConsole.png"))
self.objectListButton.setMenuRole(QAction.PreferencesRole)
self.objectListButton.setIconVisibleInMenu(True)
self.objectListButton.setToolTip(objList)
self.objectListButton.setText(objList)
## Action for Find text
findText = QCoreApplication.translate("PythonConsole", "Find Text")
self.findTextButton = QAction(self)
self.findTextButton.setCheckable(True)
self.findTextButton.setEnabled(True)
self.findTextButton.setIcon(QgsApplication.getThemeIcon("console/iconSearchEditorConsole.png"))
self.findTextButton.setMenuRole(QAction.PreferencesRole)
self.findTextButton.setIconVisibleInMenu(True)
self.findTextButton.setToolTip(findText)
self.findTextButton.setText(findText)
##----------------Toolbar Console-------------------------------------
## Action Show Editor
showEditor = QCoreApplication.translate("PythonConsole", "Show editor")
self.showEditorButton = QAction(self)
self.showEditorButton.setEnabled(True)
self.showEditorButton.setCheckable(True)
self.showEditorButton.setIcon(QgsApplication.getThemeIcon("console/iconShowEditorConsole.png"))
self.showEditorButton.setMenuRole(QAction.PreferencesRole)
self.showEditorButton.setIconVisibleInMenu(True)
self.showEditorButton.setToolTip(showEditor)
self.showEditorButton.setText(showEditor)
## Action for Clear button
clearBt = QCoreApplication.translate("PythonConsole", "Clear console")
self.clearButton = QAction(self)
self.clearButton.setCheckable(False)
self.clearButton.setEnabled(True)
self.clearButton.setIcon(QgsApplication.getThemeIcon("console/iconClearConsole.png"))
self.clearButton.setMenuRole(QAction.PreferencesRole)
self.clearButton.setIconVisibleInMenu(True)
self.clearButton.setToolTip(clearBt)
self.clearButton.setText(clearBt)
## Action for settings
optionsBt = QCoreApplication.translate("PythonConsole", "Settings")
self.optionsButton = QAction(self)
self.optionsButton.setCheckable(False)
self.optionsButton.setEnabled(True)
self.optionsButton.setIcon(QgsApplication.getThemeIcon("console/iconSettingsConsole.png"))
self.optionsButton.setMenuRole(QAction.PreferencesRole)
self.optionsButton.setIconVisibleInMenu(True)
self.optionsButton.setToolTip(optionsBt)
self.optionsButton.setText(optionsBt)
## Action menu for class
actionClassBt = QCoreApplication.translate("PythonConsole", "Import Class")
self.actionClass = QAction(self)
self.actionClass.setCheckable(False)
self.actionClass.setEnabled(True)
self.actionClass.setIcon(QgsApplication.getThemeIcon("console/iconClassConsole.png"))
self.actionClass.setMenuRole(QAction.PreferencesRole)
self.actionClass.setIconVisibleInMenu(True)
self.actionClass.setToolTip(actionClassBt)
self.actionClass.setText(actionClassBt)
## Import Processing class
loadProcessingBt = QCoreApplication.translate("PythonConsole", "Import Processing class")
self.loadProcessingButton = QAction(self)
self.loadProcessingButton.setCheckable(False)
self.loadProcessingButton.setEnabled(True)
self.loadProcessingButton.setIcon(QgsApplication.getThemeIcon("console/iconProcessingConsole.png"))
self.loadProcessingButton.setMenuRole(QAction.PreferencesRole)
self.loadProcessingButton.setIconVisibleInMenu(True)
self.loadProcessingButton.setToolTip(loadProcessingBt)
self.loadProcessingButton.setText(loadProcessingBt)
## Import QtCore class
loadQtCoreBt = QCoreApplication.translate("PythonConsole", "Import PyQt.QtCore class")
self.loadQtCoreButton = QAction(self)
self.loadQtCoreButton.setCheckable(False)
self.loadQtCoreButton.setEnabled(True)
self.loadQtCoreButton.setIcon(QgsApplication.getThemeIcon("console/iconQtCoreConsole.png"))
self.loadQtCoreButton.setMenuRole(QAction.PreferencesRole)
self.loadQtCoreButton.setIconVisibleInMenu(True)
self.loadQtCoreButton.setToolTip(loadQtCoreBt)
self.loadQtCoreButton.setText(loadQtCoreBt)
## Import QtGui class
loadQtGuiBt = QCoreApplication.translate("PythonConsole", "Import PyQt.QtGui class")
self.loadQtGuiButton = QAction(self)
self.loadQtGuiButton.setCheckable(False)
self.loadQtGuiButton.setEnabled(True)
self.loadQtGuiButton.setIcon(QgsApplication.getThemeIcon("console/iconQtGuiConsole.png"))
self.loadQtGuiButton.setMenuRole(QAction.PreferencesRole)
self.loadQtGuiButton.setIconVisibleInMenu(True)
self.loadQtGuiButton.setToolTip(loadQtGuiBt)
self.loadQtGuiButton.setText(loadQtGuiBt)
## Action for Run script
runBt = QCoreApplication.translate("PythonConsole", "Run command")
self.runButton = QAction(self)
self.runButton.setCheckable(False)
self.runButton.setEnabled(True)
self.runButton.setIcon(QgsApplication.getThemeIcon("console/iconRunConsole.png"))
self.runButton.setMenuRole(QAction.PreferencesRole)
self.runButton.setIconVisibleInMenu(True)
self.runButton.setToolTip(runBt)
self.runButton.setText(runBt)
## Help action
helpBt = QCoreApplication.translate("PythonConsole", "Help")
self.helpButton = QAction(self)
self.helpButton.setCheckable(False)
self.helpButton.setEnabled(True)
self.helpButton.setIcon(QgsApplication.getThemeIcon("console/iconHelpConsole.png"))
self.helpButton.setMenuRole(QAction.PreferencesRole)
self.helpButton.setIconVisibleInMenu(True)
self.helpButton.setToolTip(helpBt)
self.helpButton.setText(helpBt)
self.toolBar = QToolBar()
self.toolBar.setEnabled(True)
self.toolBar.setFocusPolicy(Qt.NoFocus)
self.toolBar.setContextMenuPolicy(Qt.DefaultContextMenu)
self.toolBar.setLayoutDirection(Qt.LeftToRight)
self.toolBar.setIconSize(QSize(24, 24))
self.toolBar.setOrientation(Qt.Vertical)
self.toolBar.setMovable(True)
self.toolBar.setFloatable(True)
self.toolBar.addAction(self.clearButton)
self.toolBar.addAction(self.actionClass)
self.toolBar.addAction(self.runButton)
self.toolBar.addSeparator()
self.toolBar.addAction(self.showEditorButton)
self.toolBar.addSeparator()
self.toolBar.addAction(self.optionsButton)
self.toolBar.addAction(self.helpButton)
self.toolBarEditor = QToolBar()
# self.toolBarEditor.setStyleSheet('QToolBar{background-color: rgb(%s, %s, %s' % tuple(bkgrcolor) + ');\
# border-right: 1px solid rgb(%s, %s, %s' % tuple(bordercl) + ');}')
self.toolBarEditor.setEnabled(False)
self.toolBarEditor.setFocusPolicy(Qt.NoFocus)
self.toolBarEditor.setContextMenuPolicy(Qt.DefaultContextMenu)
self.toolBarEditor.setLayoutDirection(Qt.LeftToRight)
self.toolBarEditor.setIconSize(QSize(18, 18))
self.toolBarEditor.setOrientation(Qt.Vertical)
self.toolBarEditor.setMovable(True)
self.toolBarEditor.setFloatable(True)
self.toolBarEditor.addAction(self.openFileButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.saveFileButton)
self.toolBarEditor.addAction(self.saveAsFileButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.findTextButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.cutEditorButton)
self.toolBarEditor.addAction(self.copyEditorButton)
self.toolBarEditor.addAction(self.pasteEditorButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.commentEditorButton)
self.toolBarEditor.addAction(self.uncommentEditorButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.objectListButton)
self.toolBarEditor.addSeparator()
self.toolBarEditor.addAction(self.runScriptEditorButton)
## Menu Import Class
self.classMenu = QMenu()
self.classMenu.addAction(self.loadProcessingButton)
self.classMenu.addAction(self.loadQtCoreButton)
self.classMenu.addAction(self.loadQtGuiButton)
cM = self.toolBar.widgetForAction(self.actionClass)
cM.setMenu(self.classMenu)
cM.setPopupMode(QToolButton.InstantPopup)
self.widgetButton = QWidget()
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetButton.sizePolicy().hasHeightForWidth())
self.widgetButton.setSizePolicy(sizePolicy)
self.widgetButtonEditor = QWidget(self.widgetEditor)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetButtonEditor.sizePolicy().hasHeightForWidth())
self.widgetButtonEditor.setSizePolicy(sizePolicy)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.shellOut.sizePolicy().hasHeightForWidth())
self.shellOut.setSizePolicy(sizePolicy)
self.shellOut.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.shell.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
##------------ Layout -------------------------------
self.mainLayout = QGridLayout(self)
self.mainLayout.setMargin(0)
self.mainLayout.setSpacing(0)
self.mainLayout.addWidget(self.widgetButton, 0, 0, 1, 1)
self.mainLayout.addWidget(self.splitterEditor, 0, 1, 1, 1)
self.layoutEditor = QGridLayout(self.widgetEditor)
self.layoutEditor.setMargin(0)
self.layoutEditor.setSpacing(0)
self.layoutEditor.addWidget(self.widgetButtonEditor, 0, 0, 2, 1)
self.layoutEditor.addWidget(self.tabEditorWidget, 0, 1, 1, 1)
self.layoutEditor.addWidget(self.widgetFind, 1, 1, 1, 1)
self.toolBarLayout = QGridLayout(self.widgetButton)
self.toolBarLayout.setMargin(0)
self.toolBarLayout.setSpacing(0)
self.toolBarLayout.addWidget(self.toolBar)
self.toolBarEditorLayout = QGridLayout(self.widgetButtonEditor)
self.toolBarEditorLayout.setMargin(0)
self.toolBarEditorLayout.setSpacing(0)
self.toolBarEditorLayout.addWidget(self.toolBarEditor)
## Layout for the find widget
self.layoutFind = QGridLayout(self.widgetFind)
self.layoutFind.setContentsMargins(0, 0, 0, 0)
self.lineEditFind = QgsFilterLineEdit()
placeHolderTxt = QCoreApplication.translate("PythonConsole", "Enter text to find...")
if pyqtconfig.Configuration().qt_version >= 0x40700:
self.lineEditFind.setPlaceholderText(placeHolderTxt)
else:
self.lineEditFind.setToolTip(placeHolderTxt)
self.findNextButton = QToolButton()
self.findNextButton.setEnabled(False)
toolTipfindNext = QCoreApplication.translate("PythonConsole", "Find Next")
self.findNextButton.setToolTip(toolTipfindNext)
self.findNextButton.setIcon(QgsApplication.getThemeIcon("console/iconSearchNextEditorConsole.png"))
self.findNextButton.setIconSize(QSize(24, 24))
self.findNextButton.setAutoRaise(True)
self.findPrevButton = QToolButton()
self.findPrevButton.setEnabled(False)
toolTipfindPrev = QCoreApplication.translate("PythonConsole", "Find Previous")
self.findPrevButton.setToolTip(toolTipfindPrev)
self.findPrevButton.setIcon(QgsApplication.getThemeIcon("console/iconSearchPrevEditorConsole.png"))
self.findPrevButton.setIconSize(QSize(24, 24))
self.findPrevButton.setAutoRaise(True)
self.caseSensitive = QCheckBox()
caseSensTr = QCoreApplication.translate("PythonConsole", "Case Sensitive")
self.caseSensitive.setText(caseSensTr)
self.wholeWord = QCheckBox()
wholeWordTr = QCoreApplication.translate("PythonConsole", "Whole Word")
self.wholeWord.setText(wholeWordTr)
self.wrapAround = QCheckBox()
self.wrapAround.setChecked(True)
wrapAroundTr = QCoreApplication.translate("PythonConsole", "Wrap Around")
self.wrapAround.setText(wrapAroundTr)
self.layoutFind.addWidget(self.lineEditFind, 0, 1, 1, 1)
self.layoutFind.addWidget(self.findPrevButton, 0, 2, 1, 1)
self.layoutFind.addWidget(self.findNextButton, 0, 3, 1, 1)
self.layoutFind.addWidget(self.caseSensitive, 0, 4, 1, 1)
self.layoutFind.addWidget(self.wholeWord, 0, 5, 1, 1)
self.layoutFind.addWidget(self.wrapAround, 0, 6, 1, 1)
##------------ Add first Tab in Editor -------------------------------
#self.tabEditorWidget.newTabEditor(tabName='first', filename=None)
##------------ Signal -------------------------------
self.findTextButton.toggled.connect(self.findTextEditor)
self.objectListButton.toggled.connect(self.toggleObjectListWidget)
self.commentEditorButton.triggered.connect(self.commentCode)
self.uncommentEditorButton.triggered.connect(self.uncommentCode)
self.runScriptEditorButton.triggered.connect(self.runScriptEditor)
self.cutEditorButton.triggered.connect(self.cutEditor)
self.copyEditorButton.triggered.connect(self.copyEditor)
self.pasteEditorButton.triggered.connect(self.pasteEditor)
self.showEditorButton.toggled.connect(self.toggleEditor)
self.clearButton.triggered.connect(self.shellOut.clearConsole)
self.optionsButton.triggered.connect(self.openSettings)
self.loadProcessingButton.triggered.connect(self.processing)
self.loadQtCoreButton.triggered.connect(self.qtCore)
self.loadQtGuiButton.triggered.connect(self.qtGui)
self.runButton.triggered.connect(self.shell.entered)
self.openFileButton.triggered.connect(self.openScriptFile)
self.saveFileButton.triggered.connect(self.saveScriptFile)
self.saveAsFileButton.triggered.connect(self.saveAsScriptFile)
self.helpButton.triggered.connect(self.openHelp)
self.connect(self.listClassMethod, SIGNAL('itemClicked(QTreeWidgetItem*, int)'),
self.onClickGoToLine)
self.lineEditFind.returnPressed.connect(self._findText)
self.findNextButton.clicked.connect(self._findNext)
self.findPrevButton.clicked.connect(self._findPrev)
self.lineEditFind.textChanged.connect(self._textFindChanged)
def _findText(self):
self.tabEditorWidget.currentWidget().newEditor.findText(True)
def _findNext(self):
self.tabEditorWidget.currentWidget().newEditor.findText(True)
def _findPrev(self):
self.tabEditorWidget.currentWidget().newEditor.findText(False)
def _textFindChanged(self):
if self.lineEditFind.text():
self.findNextButton.setEnabled(True)
self.findPrevButton.setEnabled(True)
else:
self.lineEditFind.setStyleSheet('')
self.findNextButton.setEnabled(False)
self.findPrevButton.setEnabled(False)
def onClickGoToLine(self, item, column):
tabEditor = self.tabEditorWidget.currentWidget().newEditor
if item.text(1) == 'syntaxError':
check = tabEditor.syntaxCheck(fromContextMenu=False)
if check and not tabEditor.isReadOnly():
self.tabEditorWidget.currentWidget().save()
return
linenr = int(item.text(1))
itemName = str(item.text(0))
charPos = itemName.find(' ')
if charPos != -1:
objName = itemName[0:charPos]
else:
objName = itemName
tabEditor.goToLine(objName, linenr)
def processing(self):
self.shell.commandConsole('processing')
def qtCore(self):
self.shell.commandConsole('qtCore')
def qtGui(self):
self.shell.commandConsole('qtGui')
def toggleEditor(self, checked):
self.splitterObj.show() if checked else self.splitterObj.hide()
if not self.tabEditorWidget:
self.tabEditorWidget.enableToolBarEditor(checked)
self.tabEditorWidget.restoreTabsOrAddNew()
def toggleObjectListWidget(self, checked):
self.listClassMethod.show() if checked else self.listClassMethod.hide()
def findTextEditor(self, checked):
self.widgetFind.show() if checked else self.widgetFind.hide()
def pasteEditor(self):
self.tabEditorWidget.currentWidget().newEditor.paste()
def cutEditor(self):
self.tabEditorWidget.currentWidget().newEditor.cut()
def copyEditor(self):
self.tabEditorWidget.currentWidget().newEditor.copy()
def runScriptEditor(self):
self.tabEditorWidget.currentWidget().newEditor.runScriptCode()
def commentCode(self):
self.tabEditorWidget.currentWidget().newEditor.commentEditorCode(True)
def uncommentCode(self):
self.tabEditorWidget.currentWidget().newEditor.commentEditorCode(False)
def openScriptFile(self):
lastDirPath = self.settings.value("pythonConsole/lastDirPath", "")
openFileTr = QCoreApplication.translate("PythonConsole", "Open File")
fileList = QFileDialog.getOpenFileNames(
self, openFileTr, lastDirPath, "Script file (*.py)")
if fileList:
for pyFile in fileList:
for i in range(self.tabEditorWidget.count()):
tabWidget = self.tabEditorWidget.widget(i)
if tabWidget.path == pyFile:
self.tabEditorWidget.setCurrentWidget(tabWidget)
break
else:
tabName = QFileInfo(pyFile).fileName()
self.tabEditorWidget.newTabEditor(tabName, pyFile)
lastDirPath = QFileInfo(pyFile).path()
self.settings.setValue("pythonConsole/lastDirPath", pyFile)
self.updateTabListScript(pyFile, action='append')
def saveScriptFile(self):
tabWidget = self.tabEditorWidget.currentWidget()
try:
tabWidget.save()
except (IOError, OSError), error:
msgText = QCoreApplication.translate('PythonConsole',
'The file <b>{0}</b> could not be saved. Error: {1}').format(tabWidget.path,
error.strerror)
self.callWidgetMessageBarEditor(msgText, 2, False)
def saveAsScriptFile(self, index=None):
tabWidget = self.tabEditorWidget.currentWidget()
if not index:
index = self.tabEditorWidget.currentIndex()
if not tabWidget.path:
pathFileName = self.tabEditorWidget.tabText(index) + '.py'
fileNone = True
else:
pathFileName = tabWidget.path
fileNone = False
saveAsFileTr = QCoreApplication.translate("PythonConsole", "Save File As")
filename = QFileDialog.getSaveFileName(self,
saveAsFileTr,
pathFileName, "Script file (*.py)")
if filename:
try:
tabWidget.save(filename)
except (IOError, OSError), error:
msgText = QCoreApplication.translate('PythonConsole',
'The file <b>{0}</b> could not be saved. Error: {1}').format(tabWidget.path,
error.strerror)
self.callWidgetMessageBarEditor(msgText, 2, False)
if fileNone:
tabWidget.path = None
else:
tabWidget.path = pathFileName
return
if not fileNone:
self.updateTabListScript(pathFileName, action='remove')
def openHelp(self):
QgsContextHelp.run( "PythonConsole" )
def openSettings(self):
if optionsDialog(self).exec_():
self.shell.refreshSettingsShell()
self.shellOut.refreshSettingsOutput()
self.tabEditorWidget.refreshSettingsEditor()
def callWidgetMessageBar(self, text):
self.shellOut.widgetMessageBar(iface, text)
def callWidgetMessageBarEditor(self, text, level, timed):
self.tabEditorWidget.widgetMessageBar(iface, text, level, timed)
def updateTabListScript(self, script, action=None):
if action == 'remove':
self.tabListScript.remove(script)
elif action == 'append':
if not self.tabListScript:
self.tabListScript = []
if script not in self.tabListScript:
self.tabListScript.append(script)
else:
self.tabListScript = []
self.settings.setValue("pythonConsole/tabScripts",
self.tabListScript)
def saveSettingsConsole(self):
self.settings.setValue("pythonConsole/splitterConsole", self.splitter.saveState())
self.settings.setValue("pythonConsole/splitterObj", self.splitterObj.saveState())
self.settings.setValue("pythonConsole/splitterEditor", self.splitterEditor.saveState())
self.shell.writeHistoryFile(True)
def restoreSettingsConsole(self):
storedTabScripts = self.settings.value("pythonConsole/tabScripts", [])
self.tabListScript = storedTabScripts
self.splitter.restoreState(self.settings.value("pythonConsole/splitterConsole", QByteArray()))
self.splitterEditor.restoreState(self.settings.value("pythonConsole/splitterEditor", QByteArray()))
self.splitterObj.restoreState(self.settings.value("pythonConsole/splitterObj", QByteArray()))
if __name__ == '__main__':
a = QApplication(sys.argv)
console = PythonConsoleWidget()
console.show()
a.exec_()
|
MJuddBooth/pandas | refs/heads/master | pandas/tests/util/test_hashing.py | 1 | import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples
from pandas.util import hash_array, hash_pandas_object
import pandas.util.testing as tm
@pytest.fixture(params=[
Series([1, 2, 3] * 3, dtype="int32"),
Series([None, 2.5, 3.5] * 3, dtype="float32"),
Series(["a", "b", "c"] * 3, dtype="category"),
Series(["d", "e", "f"] * 3),
Series([True, False, True] * 3),
Series(pd.date_range("20130101", periods=9)),
Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
Series(pd.timedelta_range("2000", periods=9))])
def series(request):
return request.param
@pytest.fixture(params=[True, False])
def index(request):
return request.param
def _check_equal(obj, **kwargs):
"""
Check that hashing an objects produces the same value each time.
Parameters
----------
obj : object
The object to hash.
kwargs : kwargs
Keyword arguments to pass to the hashing function.
"""
a = hash_pandas_object(obj, **kwargs)
b = hash_pandas_object(obj, **kwargs)
tm.assert_series_equal(a, b)
def _check_not_equal_with_index(obj):
"""
Check the hash of an object with and without its index is not the same.
Parameters
----------
obj : object
The object to hash.
"""
if not isinstance(obj, Index):
a = hash_pandas_object(obj, index=True)
b = hash_pandas_object(obj, index=False)
if len(obj):
assert not (a == b).all()
def test_consistency():
# Check that our hash doesn't change because of a mistake
# in the actual code; this is the ground truth.
result = hash_pandas_object(Index(["foo", "bar", "baz"]))
expected = Series(np.array([3600424527151052760, 1374399572096150070,
477881037637427054], dtype="uint64"),
index=["foo", "bar", "baz"])
tm.assert_series_equal(result, expected)
def test_hash_array(series):
arr = series.values
tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr))
@pytest.mark.parametrize("arr2", [
np.array([3, 4, "All"]),
np.array([3, 4, "All"], dtype=object),
])
def test_hash_array_mixed(arr2):
result1 = hash_array(np.array(["3", "4", "All"]))
result2 = hash_array(arr2)
tm.assert_numpy_array_equal(result1, result2)
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_array_errors(val):
msg = "must pass a ndarray-like"
with pytest.raises(TypeError, match=msg):
hash_array(val)
def test_hash_tuples():
tuples = [(1, "one"), (1, "two"), (2, "one")]
result = hash_tuples(tuples)
expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values
tm.assert_numpy_array_equal(result, expected)
result = hash_tuples(tuples[0])
assert result == expected[0]
@pytest.mark.parametrize("tup", [
(1, "one"), (1, np.nan), (1.0, pd.NaT, "A"),
("A", pd.Timestamp("2012-01-01"))])
def test_hash_tuple(tup):
# Test equivalence between
# hash_tuples and hash_tuple.
result = hash_tuple(tup)
expected = hash_tuples([tup])[0]
assert result == expected
@pytest.mark.parametrize("val", [
1, 1.4, "A", b"A", u"A", pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-01", tz="Europe/Brussels"),
datetime.datetime(2012, 1, 1),
pd.Timestamp("2012-01-01", tz="EST").to_pydatetime(),
pd.Timedelta("1 days"), datetime.timedelta(1),
pd.Period("2012-01-01", freq="D"), pd.Interval(0, 1),
np.nan, pd.NaT, None])
def test_hash_scalar(val):
result = _hash_scalar(val)
expected = hash_array(np.array([val], dtype=object), categorize=True)
assert result[0] == expected[0]
@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])
def test_hash_tuples_err(val):
msg = "must be convertible to a list-of-tuples"
with pytest.raises(TypeError, match=msg):
hash_tuples(val)
def test_multiindex_unique():
mi = MultiIndex.from_tuples([(118, 472), (236, 118),
(51, 204), (102, 51)])
assert mi.is_unique is True
result = hash_pandas_object(mi)
assert result.is_unique is True
def test_multiindex_objects():
mi = MultiIndex(levels=[["b", "d", "a"], [1, 2, 3]],
codes=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=["col1", "col2"])
recons = mi._sort_levels_monotonic()
# These are equal.
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# _hashed_values and hash_pandas_object(..., index=False) equivalency.
expected = hash_pandas_object(mi, index=False).values
result = mi._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = hash_pandas_object(recons, index=False).values
result = recons._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = mi._hashed_values
result = recons._hashed_values
# Values should match, but in different order.
tm.assert_numpy_array_equal(np.sort(result), np.sort(expected))
@pytest.mark.parametrize("obj", [
Series([1, 2, 3]),
Series([1.0, 1.5, 3.2]),
Series([1.0, 1.5, np.nan]),
Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
Series(["a", "b", "c"]),
Series(["a", np.nan, "c"]),
Series(["a", None, "c"]),
Series([True, False, True]),
Series(),
Index([1, 2, 3]),
Index([True, False, True]),
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
DataFrame(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
tm.makeTimedeltaIndex(),
tm.makePeriodIndex(),
Series(tm.makePeriodIndex()),
Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),
MultiIndex.from_product([range(5), ["foo", "bar", "baz"],
pd.date_range("20130101", periods=2)]),
MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)])
])
def test_hash_pandas_object(obj, index):
_check_equal(obj, index=index)
_check_not_equal_with_index(obj)
def test_hash_pandas_object2(series, index):
_check_equal(series, index=index)
_check_not_equal_with_index(series)
@pytest.mark.parametrize("obj", [
Series([], dtype="float64"), Series([], dtype="object"), Index([])])
def test_hash_pandas_empty_object(obj, index):
# These are by-definition the same with
# or without the index as the data is empty.
_check_equal(obj, index=index)
@pytest.mark.parametrize("s1", [
Series(["a", "b", "c", "d"]),
Series([1000, 2000, 3000, 4000]),
Series(pd.date_range(0, periods=4))])
@pytest.mark.parametrize("categorize", [True, False])
def test_categorical_consistency(s1, categorize):
# see gh-15143
#
# Check that categoricals hash consistent with their values,
# not codes. This should work for categoricals of any dtype.
s2 = s1.astype("category").cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
# These should all hash identically.
h1 = hash_pandas_object(s1, categorize=categorize)
h2 = hash_pandas_object(s2, categorize=categorize)
h3 = hash_pandas_object(s3, categorize=categorize)
tm.assert_series_equal(h1, h2)
tm.assert_series_equal(h1, h3)
def test_categorical_with_nan_consistency():
c = pd.Categorical.from_codes(
[-1, 0, 1, 2, 3, 4],
categories=pd.date_range("2012-01-01", periods=5, name="B"))
expected = hash_array(c, categorize=False)
c = pd.Categorical.from_codes(
[-1, 0],
categories=[pd.Timestamp("2012-01-01")])
result = hash_array(c, categorize=False)
assert result[0] in expected
assert result[1] in expected
@pytest.mark.parametrize("obj", [pd.Timestamp("20130101")])
def test_pandas_errors(obj):
msg = "Unexpected type for hashing"
with pytest.raises(TypeError, match=msg):
hash_pandas_object(obj)
def test_hash_keys():
# Using different hash keys, should have
# different hashes for the same data.
#
# This only matters for object dtypes.
obj = Series(list("abc"))
a = hash_pandas_object(obj, hash_key="9876543210123456")
b = hash_pandas_object(obj, hash_key="9876543210123465")
assert (a != b).all()
def test_invalid_key():
# This only matters for object dtypes.
msg = "key should be a 16-byte string encoded"
with pytest.raises(ValueError, match=msg):
hash_pandas_object(Series(list("abc")), hash_key="foo")
def test_already_encoded(index):
# If already encoded, then ok.
obj = Series(list("abc")).str.encode("utf8")
_check_equal(obj, index=index)
def test_alternate_encoding(index):
obj = Series(list("abc"))
_check_equal(obj, index=index, encoding="ascii")
@pytest.mark.parametrize("l_exp", range(8))
@pytest.mark.parametrize("l_add", [0, 1])
def test_same_len_hash_collisions(l_exp, l_add):
length = 2**(l_exp + 8) + l_add
s = tm.rands_array(length, 2)
result = hash_array(s, "utf8")
assert not result[0] == result[1]
def test_hash_collisions():
# Hash collisions are bad.
#
# https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
hashes = ["Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa
"Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe"] # noqa
# These should be different.
result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8")
expected1 = np.array([14963968704024874985], dtype=np.uint64)
tm.assert_numpy_array_equal(result1, expected1)
result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8")
expected2 = np.array([16428432627716348016], dtype=np.uint64)
tm.assert_numpy_array_equal(result2, expected2)
result = hash_array(np.asarray(hashes, dtype=object), "utf8")
tm.assert_numpy_array_equal(result, np.concatenate([expected1,
expected2], axis=0))
|
OriHoch/Open-Knesset | refs/heads/master | links/migrations/0005_auto__add_field_link_active.py | 4 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Link.active'
db.add_column('links_link', 'active', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
if not db.dry_run:
# set 'active' field of all links to 'true'
for l in orm.Link.objects.all():
l.active = True
l.save()
# set links to 'parlament.co.il' inactive
for l in orm.Link.objects.filter(url__contains='parlament.co.il'):
l.active = False
l.save()
def backwards(self, orm):
# Deleting field 'Link.active'
db.delete_column('links_link', 'active')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_link'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.LinkType']", 'null': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
'links.linkedfile': {
'Meta': {'object_name': 'LinkedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.Link']"}),
'link_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'sha1': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['links']
|
openhealthcare/gloss | refs/heads/v0.1 | alembic/versions/440bea99abd6_.py | 1 | """empty message
Revision ID: 440bea99abd6
Revises: 716909215049
Create Date: 2016-04-06 13:33:56.292972
"""
# revision identifiers, used by Alembic.
revision = '440bea99abd6'
down_revision = '716909215049'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('outgoingmessage', sa.Column('count', sa.BigInteger(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('outgoingmessage', 'count')
### end Alembic commands ###
|
Hoikas/korman | refs/heads/master | korman/properties/modifiers/region.py | 1 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.props import *
from PyHSPlasma import *
from .base import PlasmaModifierProperties, PlasmaModifierLogicWiz
from .physics import bounds_types
footstep_surface_ids = {
"dirt": 0,
# 1 = NULL
"puddle": 2,
# 3 = tile (NULL in MOUL)
"metal": 4,
"woodbridge": 5,
"rope": 6,
"grass": 7,
# 8 = NULL
"woodfloor": 9,
"rug": 10,
"stone": 11,
# 12 = NULL
# 13 = metal ladder (dupe of metal)
"woodladder": 14,
"water": 15,
# 16 = maintainer's glass (NULL in PotS)
# 17 = maintainer's metal grating (NULL in PotS)
# 18 = swimming (why would you want this?)
}
footstep_surfaces = [("dirt", "Dirt", "Dirt"),
("grass", "Grass", "Grass"),
("metal", "Metal", "Metal Catwalk"),
("puddle", "Puddle", "Shallow Water"),
("rope", "Rope", "Rope Ladder"),
("rug", "Rug", "Carpet Rug"),
("stone", "Stone", "Stone Tile"),
("water", "Water", "Deep Water"),
("woodbridge", "Wood Bridge", "Wood Bridge"),
("woodfloor", "Wood Floor", "Wood Floor"),
("woodladder", "Wood Ladder", "Wood Ladder")]
class PlasmaFootstepRegion(PlasmaModifierProperties, PlasmaModifierLogicWiz):
pl_id = "footstep"
bl_category = "Region"
bl_label = "Footstep"
bl_description = "Footstep Region"
surface = EnumProperty(name="Surface",
description="What kind of surface are we walking on?",
items=footstep_surfaces,
default="stone")
bounds = EnumProperty(name="Region Bounds",
description="Physical object's bounds",
items=bounds_types,
default="hull")
def created(self, obj):
self.display_name = "{}_FootRgn".format(obj.name)
def export(self, exporter, bo, so):
# Generate the logic nodes now
self.logicwiz(bo)
# Now, export the node tree
self.node_tree.export(exporter, bo, so)
def logicwiz(self, bo):
tree = self.node_tree
nodes = tree.nodes
nodes.clear()
# Region Sensor
volsens = nodes.new("PlasmaVolumeSensorNode")
volsens.name = "RegionSensor"
volsens.region = bo.name
volsens.bounds = self.bounds
volsens.find_input_socket("enter").allow = True
volsens.find_input_socket("exit").allow = True
# Responder
respmod = nodes.new("PlasmaResponderNode")
respmod.name = "Resp"
respmod.link_input(tree, volsens, "satisfies", "condition")
respstate = nodes.new("PlasmaResponderStateNode")
respstate.link_input(tree, respmod, "states", "condition")
respstate.default_state = True
respcmd = nodes.new("PlasmaResponderCommandNode")
respcmd.link_input(tree, respstate, "cmds", "whodoneit")
# ArmatureEffectStateMsg
msg = nodes.new("PlasmaFootstepSoundMsgNode")
msg.link_input(tree, respcmd, "msg", "sender")
msg.surface = self.surface
class PlasmaPanicLinkRegion(PlasmaModifierProperties):
pl_id = "paniclink"
bl_category = "Region"
bl_label = "Panic Link"
bl_description = "Panic Link Region"
play_anim = BoolProperty(name="Play Animation",
description="Play the link-out animation when panic linking",
default=True)
def created(self, obj):
self.display_name = "{}_PanicLinkRgn".format(obj.name)
def export(self, exporter, bo, so):
phys_mod = bo.plasma_modifiers.collision
simIface, physical = exporter.physics.generate_physical(bo, so, phys_mod.bounds, self.display_name)
# Now setup the region detector properties
physical.memberGroup = plSimDefs.kGroupDetector
physical.reportGroup = 1 << plSimDefs.kGroupAvatar
# Finally, the panic link region proper
reg = exporter.mgr.add_object(plPanicLinkRegion, name=self.display_name, so=so)
reg.playLinkOutAnim = self.play_anim
@property
def requires_actor(self):
return True
|
antoviaque/edx-platform | refs/heads/master | common/djangoapps/student/tests/test_admin_views.py | 34 | """
Tests student admin.py
"""
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory
class AdminCourseRolesPageTest(SharedModuleStoreTestCase):
"""Test the django admin course roles form saving data in db.
"""
@classmethod
def setUpClass(cls):
super(AdminCourseRolesPageTest, cls).setUpClass()
cls.course = CourseFactory.create(org='edx')
def setUp(self):
super(AdminCourseRolesPageTest, self).setUp()
self.user = UserFactory.create(is_staff=True, is_superuser=True)
self.user.save()
def test_save_valid_data(self):
data = {
'course_id': unicode(self.course.id),
'role': 'finance_admin',
'org': 'edx',
'email': self.user.email
}
self.client.login(username=self.user.username, password='test')
# # adding new role from django admin page
response = self.client.post(reverse('admin:student_courseaccessrole_add'), data=data)
self.assertRedirects(response, reverse('admin:student_courseaccessrole_changelist'))
response = self.client.get(reverse('admin:student_courseaccessrole_changelist'))
self.assertContains(response, 'Select course access role to change')
self.assertContains(response, 'Add course access role')
self.assertContains(response, 'finance_admin')
self.assertContains(response, unicode(self.course.id))
self.assertContains(response, '1 course access role')
#try adding with same information raise error.
response = self.client.post(reverse('admin:student_courseaccessrole_add'), data=data)
self.assertContains(response, 'Duplicate')
def test_save_without_org_and_course_data(self):
data = {
'role': 'staff',
'email': self.user.email,
'course_id': unicode(self.course.id)
}
self.client.login(username=self.user.username, password='test')
# # adding new role from django admin page
response = self.client.post(reverse('admin:student_courseaccessrole_add'), data=data)
self.assertRedirects(response, reverse('admin:student_courseaccessrole_changelist'))
response = self.client.get(reverse('admin:student_courseaccessrole_changelist'))
self.assertContains(response, 'staff')
self.assertContains(response, '1 course access role')
def test_save_with_course_only(self):
data = {
'role': 'beta_testers',
'email': self.user.email,
}
self.client.login(username=self.user.username, password='test')
# # adding new role from django admin page
response = self.client.post(reverse('admin:student_courseaccessrole_add'), data=data)
self.assertRedirects(response, reverse('admin:student_courseaccessrole_changelist'))
response = self.client.get(reverse('admin:student_courseaccessrole_changelist'))
self.assertContains(response, 'beta_testers')
self.assertContains(response, '1 course access role')
def test_save_with_org_only(self):
data = {
'role': 'beta_testers',
'email': self.user.email,
'org': 'myorg'
}
self.client.login(username=self.user.username, password='test')
# # adding new role from django admin page
response = self.client.post(reverse('admin:student_courseaccessrole_add'), data=data)
self.assertRedirects(response, reverse('admin:student_courseaccessrole_changelist'))
response = self.client.get(reverse('admin:student_courseaccessrole_changelist'))
self.assertContains(response, 'myorg')
self.assertContains(response, '1 course access role')
def test_save_with_invalid_course(self):
course = unicode('no/edx/course')
email = "invalid@email.com"
data = {
'course_id': course,
'role': 'finance_admin',
'org': 'edx',
'email': email
}
self.client.login(username=self.user.username, password='test')
# Adding new role with invalid data
response = self.client.post(reverse('admin:student_courseaccessrole_add'), data=data)
self.assertContains(
response,
'Cannot find course with id {} in the modulestore'.format(
course
)
)
self.assertContains(
response,
"Email does not exist. Could not find {}. Please re-enter email address".format(
email
)
)
def test_save_valid_course_invalid_org(self):
data = {
'course_id': unicode(self.course.id),
'role': 'finance_admin',
'org': 'edxxx',
'email': self.user.email
}
self.client.login(username=self.user.username, password='test')
# # adding new role from django admin page
response = self.client.post(reverse('admin:student_courseaccessrole_add'), data=data)
self.assertContains(
response,
'Org name {} is not valid. Valid name is {}.'.format(
'edxxx', 'edx'
)
)
|
run2/citytour | refs/heads/master | 4symantec/Lib/site-packages/numpy-1.9.2-py2.7-win-amd64.egg/numpy/distutils/line_endings.py | 256 | """ Functions for converting from DOS to UNIX line endings
"""
from __future__ import division, absolute_import, print_function
import sys, re, os
def dos2unix(file):
"Replace CRLF with LF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
data = open(file, "rb").read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
if newdata != data:
print('dos2unix:', file)
f = open(file, "wb")
f.write(newdata)
f.close()
return file
else:
print(file, 'ok')
def dos2unix_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
file = dos2unix(full_path)
if file is not None:
modified_files.append(file)
def dos2unix_dir(dir_name):
modified_files = []
os.path.walk(dir_name, dos2unix_one_dir, modified_files)
return modified_files
#----------------------------------
def unix2dos(file):
"Replace LF with CRLF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
data = open(file, "rb").read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
newdata = re.sub("\n", "\r\n", newdata)
if newdata != data:
print('unix2dos:', file)
f = open(file, "wb")
f.write(newdata)
f.close()
return file
else:
print(file, 'ok')
def unix2dos_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
unix2dos(full_path)
if file is not None:
modified_files.append(file)
def unix2dos_dir(dir_name):
modified_files = []
os.path.walk(dir_name, unix2dos_one_dir, modified_files)
return modified_files
if __name__ == "__main__":
dos2unix_dir(sys.argv[1])
|
vinutah/apps | refs/heads/master | tools/llvm/llvm_39/opt/utils/release/findRegressions-simple.py | 123 | #!/usr/bin/env python
import re, string, sys, os, time, math
DEBUG = 0
(tp, exp) = ('compile', 'exec')
def parse(file):
f = open(file, 'r')
d = f.read()
# Cleanup weird stuff
d = re.sub(r',\d+:\d', '', d)
r = re.findall(r'TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n', d)
test = {}
fname = ''
for t in r:
if DEBUG:
print t
if t[0] == 'PASS' or t[0] == 'FAIL' :
tmp = t[2].split('llvm-test/')
if DEBUG:
print tmp
if len(tmp) == 2:
fname = tmp[1].strip('\r\n')
else:
fname = tmp[0].strip('\r\n')
if not test.has_key(fname):
test[fname] = {}
test[fname][t[1] + ' state'] = t[0]
test[fname][t[1] + ' time'] = float('nan')
else :
try:
n = t[0].split('RESULT-')[1]
if DEBUG:
print "n == ", n;
if n == 'compile-success':
test[fname]['compile time'] = float(t[2].split('program')[1].strip('\r\n'))
elif n == 'exec-success':
test[fname]['exec time'] = float(t[2].split('program')[1].strip('\r\n'))
if DEBUG:
print test[fname][string.replace(n, '-success', '')]
else :
# print "ERROR!"
sys.exit(1)
except:
continue
return test
# Diff results and look for regressions.
def diffResults(d_old, d_new):
regressions = {}
passes = {}
removed = ''
for x in ['compile state', 'compile time', 'exec state', 'exec time']:
regressions[x] = ''
passes[x] = ''
for t in sorted(d_old.keys()) :
if d_new.has_key(t):
# Check if the test passed or failed.
for x in ['compile state', 'compile time', 'exec state', 'exec time']:
if not d_old[t].has_key(x) and not d_new[t].has_key(x):
continue
if d_old[t].has_key(x):
if d_new[t].has_key(x):
if d_old[t][x] == 'PASS':
if d_new[t][x] != 'PASS':
regressions[x] += t + "\n"
else:
if d_new[t][x] == 'PASS':
passes[x] += t + "\n"
else :
regressions[x] += t + "\n"
if x == 'compile state' or x == 'exec state':
continue
# For execution time, if there is no result it's a fail.
if not d_old[t].has_key(x) and not d_new[t].has_key(x):
continue
elif not d_new[t].has_key(x):
regressions[x] += t + "\n"
elif not d_old[t].has_key(x):
passes[x] += t + "\n"
if math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
continue
elif math.isnan(d_old[t][x]) and not math.isnan(d_new[t][x]):
passes[x] += t + "\n"
elif not math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
regressions[x] += t + ": NaN%\n"
if d_new[t][x] > d_old[t][x] and d_old[t][x] > 0.0 and \
(d_new[t][x] - d_old[t][x]) / d_old[t][x] > .05:
regressions[x] += t + ": " + "{0:.1f}".format(100 * (d_new[t][x] - d_old[t][x]) / d_old[t][x]) + "%\n"
else :
removed += t + "\n"
if len(regressions['compile state']) != 0:
print 'REGRESSION: Compilation Failed'
print regressions['compile state']
if len(regressions['exec state']) != 0:
print 'REGRESSION: Execution Failed'
print regressions['exec state']
if len(regressions['compile time']) != 0:
print 'REGRESSION: Compilation Time'
print regressions['compile time']
if len(regressions['exec time']) != 0:
print 'REGRESSION: Execution Time'
print regressions['exec time']
if len(passes['compile state']) != 0:
print 'NEW PASSES: Compilation'
print passes['compile state']
if len(passes['exec state']) != 0:
print 'NEW PASSES: Execution'
print passes['exec state']
if len(removed) != 0:
print 'REMOVED TESTS'
print removed
# Main
if len(sys.argv) < 3 :
print 'Usage:', sys.argv[0], '<old log> <new log>'
sys.exit(-1)
d_old = parse(sys.argv[1])
d_new = parse(sys.argv[2])
diffResults(d_old, d_new)
|
Yuudachimoe/HikariChun-RedBot | refs/heads/master | lib/pip/_vendor/html5lib/_inputstream.py | 328 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, binary_type
from pip._vendor.six.moves import http_client, urllib
import codecs
import re
from pip._vendor import webencodings
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import ReparseException
from . import _utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except: # pylint:disable=bare-except
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overriden, we've been overriden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
# pylint:disable=unused-argument
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for _ in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def lookupEncoding(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, binary_type):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None
|
WhileLoop/ansible-modules-extras | refs/heads/devel | cloud/amazon/iam_mfa_device_facts.py | 44 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_mfa_device_facts
short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
description:
- List the MFA (Multi-Factor Authentication) devices registered for a user
version_added: "2.2"
author: Victor Costan (@pwnall)
options:
user_name:
description:
- The name of the user whose MFA devices will be listed
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
'''
RETURN = """
mfa_devices:
description: The MFA devices registered for the given user
returned: always
type: list
sample:
- enable_date: "2016-03-11T23:25:36+00:00"
serial_number: arn:aws:iam::085120003701:mfa/pwnall
user_name: pwnall
- enable_date: "2016-03-11T23:25:37+00:00"
serial_number: arn:aws:iam::085120003702:mfa/pwnall
user_name: pwnall
"""
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# List MFA devices (more details: http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html)
iam_mfa_device_facts:
register: mfa_devices
# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
sts_assume_role:
mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
role_arn: "arn:aws:iam::123456789012:role/someRole"
role_session_name: "someRoleSession"
register: assumed_role
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def list_mfa_devices(connection, module):
user_name = module.params.get('user_name')
changed = False
args = {}
if user_name is not None:
args['UserName'] = user_name
try:
response = connection.list_mfa_devices(**args)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
user_name=dict(required=False, default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
else:
module.fail_json(msg="region must be specified")
list_mfa_devices(connection, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
ChuckM/micropython | refs/heads/master | tests/basics/builtin_hash.py | 28 | # test builtin hash function
print(hash(False))
print(hash(True))
print({():1}) # hash tuple
print({1 << 66:1}) # hash big int
print({-(1 << 66):2}) # hash negative big int
print(hash in {hash:1}) # hash function
try:
hash([])
except TypeError:
print("TypeError")
class A:
def __hash__(self):
return 123
def __repr__(self):
return "a instance"
print(hash(A()))
print({A():1})
# all user-classes have default __hash__
class B:
pass
hash(B())
# if __eq__ is defined then default __hash__ is not used
class C:
def __eq__(self, another):
return True
try:
hash(C())
except TypeError:
print("TypeError")
# __hash__ must return an int
class D:
def __hash__(self):
return None
try:
hash(D())
except TypeError:
print("TypeError")
# __hash__ returning a bool should be converted to an int
class E:
def __hash__(self):
return True
print(hash(E()))
# __hash__ returning a large number should be truncated
class F:
def __hash__(self):
return 1 << 70 | 1
print(hash(F()) != 0)
|
MortimerGoro/servo | refs/heads/master | tests/wpt/css-tests/tools/html5lib/html5lib/serializer/__init__.py | 1731 | from __future__ import absolute_import, division, unicode_literals
from .. import treewalkers
from .htmlserializer import HTMLSerializer
def serialize(input, tree="etree", format="html", encoding=None,
**serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
if format == "html":
s = HTMLSerializer(**serializer_opts)
else:
raise ValueError("type must be html")
return s.render(walker(input), encoding)
|
kntem/webdeposit | refs/heads/webdeposit-final | modules/websearch/lib/websearch_admin_templates.py | 3 | # -*- coding: utf-8 -*-
##
## handles rendering of webmessage module
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" templates for webmessage module """
__revision__ = "$Id$"
from invenio.webmessage_mailutils import email_quoted_txt2html, email_quote_txt
from invenio.webmessage_config import CFG_WEBMESSAGE_STATUS_CODE, \
CFG_WEBMESSAGE_SEPARATOR, \
CFG_WEBMESSAGE_RESULTS_FIELD
from invenio.config import CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES
from invenio.dateutils import convert_datetext_to_dategui, \
datetext_default, \
create_day_selectbox, \
create_month_selectbox, \
create_year_selectbox
from invenio.urlutils import create_html_link, create_url
from invenio.htmlutils import escape_html
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG
from invenio.messages import gettext_set_language
from invenio.webuser import get_user_info
class Template:
"""Templates for WebMessage module"""
|
raymondnijssen/QGIS | refs/heads/master | python/plugins/processing/algs/gdal/rgb2pct.py | 6 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
rgb2pct.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class rgb2pct(GdalAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NCOLORS = 'NCOLORS'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.NCOLORS,
self.tr('Number of colors'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
maxValue=255,
defaultValue=2))
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('RGB to PCT')))
def name(self):
return 'rgbtopct'
def displayName(self):
return self.tr('RGB to PCT')
def group(self):
return self.tr('Raster conversion')
def groupId(self):
return 'rasterconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', '24-to-8-bits.png'))
def commandName(self):
return 'rgb2pct'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = []
arguments.append('-n')
arguments.append(str(self.parameterAsInt(parameters, self.NCOLORS, context)))
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
arguments.append(out)
if isWindows():
commands = ['cmd.exe', '/C ', self.commandName() + '.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = [self.commandName() + '.py', GdalUtils.escapeAndJoin(arguments)]
return commands
|
gzavitz/pgmagick | refs/heads/master | test/test_pgmagick_image.py | 2 | import sys
import unittest
import pgmagick
from pgmagick import Blob, Image, Geometry, Color, LineJoin, StorageType
from pgmagick import ChannelType
from pgmagick import gminfo
if gminfo.library == 'ImageMagick':
from pgmagick import DistortImageMethod, SparseColorMethod
class TestImage(unittest.TestCase):
def test_noarg_init(self):
im = Image()
self.assertEqual(type(im), Image)
def test_fromfile_init_error(self):
self.assertRaises(RuntimeError, Image, "xXxX.jpg")
def test_size_and_color_init(self):
im = Image(Geometry(300, 200), Color('transparent'))
size = im.size()
self.assertEqual(300, size.width())
self.assertEqual(200, size.height())
def test_stroke_linejoin(self):
im = Image(Geometry(300, 200), Color('transparent'))
im.strokeLineJoin(LineJoin.MiterJoin)
im.strokeLineJoin(LineJoin.RoundJoin)
im.strokeLineJoin(LineJoin.BevelJoin)
#def test_image_getpixels(self):
# img = Image(Geometry(300, 200), Color('transparent'))
# img.getPixels(10, 10, 10, 10)
def test_image_init_storagetype(self):
data = ["0" for i in range(10000)]
img = Image(100, 100, "RGB", StorageType.CharPixel, "".join(data))
#def test_haldClut(self):
# img = Image()
# if hasattr(img, "haldClut"):
# clutimg = Image(Geometry(400, 300), Color("transparent"))
# clutimg.read("gradient:white-black")
# img.haldClut(clutimg)
class TestIMImage(unittest.TestCase):
def setUp(self):
self.img = Image()
self.is_imagemagick = False
if gminfo.library == 'ImageMagick':
self.is_imagemagick = True
def test_adaptiveBlur(self):
if self.is_imagemagick:
self.img.adaptiveBlur()
def test_distort(self):
if self.is_imagemagick:
self.img.distort(DistortImageMethod.ScaleRotateTranslateDistortion,
1, 1.0, True)
def test_extent(self):
if self.is_imagemagick:
self.img.extent(Geometry(100, 100))
def test_inverseFourierTransform(self):
if self.is_imagemagick:
phase = Image()
self.img.inverseFourierTransform(phase)
def test_sigmoidalContrast(self):
if self.is_imagemagick:
self.img.sigmoidalContrast(2, 2.)
def test_splice(self):
if self.is_imagemagick:
self.img.splice(Geometry(100, 100))
def test_sparseColor(self):
if self.is_imagemagick:
self.img.sparseColor(ChannelType.RedChannel,
SparseColorMethod.PolynomialColorInterpolate,
1, 1.)
def test_sparseColor(self):
if self.is_imagemagick:
ret = self.img.exifProfile()
self.assertEqual(type(ret), type(pgmagick._pgmagick.Blob()))
def test_virtualPixelMethod(self):
if self.is_imagemagick:
ret = self.img.virtualPixelMethod()
self.assertEqual(type(ret),
type(pgmagick._pgmagick.VirtualPixelMethod()))
unittest.main()
|
MaplePlan/djwp | refs/heads/master | django/middleware/__init__.py | 12133432 | |
CSC301H-Fall2013/JuakStore | refs/heads/master | site-packages/tests/regressiontests/http_utils/models.py | 12133432 | |
qnub/django-cms | refs/heads/develop | cms/tests/test_signals.py | 23 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.utils import override_settings
from cms.api import create_page
from cms.models import UrlconfRevision
from cms.signals import urls_need_reloading
from cms.test_utils.testcases import CMSTestCase
APP_NAME = 'SampleApp'
class SignalTester(object):
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.calls.append((args, kwargs))
@contextmanager
def signal_tester(signal):
env = SignalTester()
signal.connect(env, weak=True)
try:
yield env
finally:
signal.disconnect(env, weak=True)
class SignalTests(TestCase):
def test_urls_need_reloading_signal_create(self):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_delete(self):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
page.delete()
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_change_slug(self):
with signal_tester(urls_need_reloading) as env:
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
title = page.title_set.get(language="en")
title.slug += 'test'
title.save()
page.publish('en')
self.client.get('/')
self.assertEqual(env.call_count, 2)
@override_settings(
MIDDLEWARE_CLASSES=[
'cms.middleware.utils.ApphookReloadMiddleware'
] + settings.MIDDLEWARE_CLASSES,
)
class ApphooksReloadTests(CMSTestCase):
def test_urls_reloaded(self):
"""
Tests that URLs are automatically reloaded when the ApphookReload
middleware is installed.
"""
#
# Sets up an apphook'ed page, but does not yet publish it.
#
superuser = get_user_model().objects.create_superuser(
'admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
page.publish('en')
app_page = create_page("app_page", "nav_playground.html", "en",
created_by=superuser, parent=page,
published=False, apphook="SampleApp")
self.client.get('/') # Required to invoke the middleware
#
# Gets the current urls revision for testing against later.
#
current_revision, _ = UrlconfRevision.get_or_create_revision()
#
# Publishes the apphook. This is one of many ways to trigger the
# firing of the signal. The tests above test some of the other ways
# already.
#
app_page.publish('en')
self.client.get('/') # Required to invoke the middleware
# And, this should result in a the updating of the UrlconfRevision
new_revision, _ = UrlconfRevision.get_or_create_revision()
self.assertNotEquals(current_revision, new_revision)
|
CoolCloud/taiga-back | refs/heads/master | taiga/projects/history/mixins.py | 8 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import warnings
from .services import take_snapshot
from taiga.projects.notifications import services as notifications_services
class HistoryResourceMixin(object):
"""
Rest Framework resource mixin for resources
susceptible to have models with history.
"""
# This attribute will store the last history entry
# created for this resource. It is mainly used for
# notifications mixin.
__last_history = None
__object_saved = False
def get_last_history(self):
if not self.__object_saved:
message = ("get_last_history() function called before any object are saved. "
"Seems you have a wrong mixing order on your resource.")
warnings.warn(message, RuntimeWarning)
return self.__last_history
def get_object_for_snapshot(self, obj):
"""
Method that returns a model instance ready to snapshot.
It is by default noop, but should be overwrited when
snapshot ready instance is found in one of foreign key
fields.
"""
return obj
def persist_history_snapshot(self, obj=None, delete:bool=False):
"""
Shortcut for resources with special save/persist
logic.
"""
user = self.request.user
comment = self.request.DATA.get("comment", "")
if obj is None:
obj = self.get_object()
sobj = self.get_object_for_snapshot(obj)
if sobj != obj and delete:
delete = False
notifications_services.analize_object_for_watchers(obj, comment, user)
self.__last_history = take_snapshot(sobj, comment=comment, user=user, delete=delete)
self.__object_saved = True
def post_save(self, obj, created=False):
self.persist_history_snapshot(obj=obj)
super().post_save(obj, created=created)
def pre_delete(self, obj):
self.persist_history_snapshot(obj, delete=True)
super().pre_delete(obj)
|
flaing/gemrb | refs/heads/master | gemrb/GUIScripts/iwd2/Class.py | 4 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, class (GUICG2)
import GemRB
from GUIDefines import *
import GUICommon
import CommonTables
ClassWindow = 0
TextAreaControl = 0
DoneButton = 0
BackButton = 0
ClassCount = 0
HasSubClass = 0
ClassID = 0
def AdjustTextArea():
global HasSubClass, ClassID
ClassName = GUICommon.GetClassRowName (GemRB.GetVar ("Class")-1, "index")
TextAreaControl.SetText (CommonTables.Classes.GetValue (ClassName, "DESC_REF"))
ClassID = CommonTables.Classes.GetValue(ClassName, "ID")
#determining if this class has any subclasses
HasSubClass = 0
for i in range(1, ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed != ClassID:
continue
HasSubClass = 1
break
if HasSubClass == 0:
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
else:
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
return
def OnLoad():
global ClassWindow, TextAreaControl, DoneButton, BackButton
global ClassCount
GemRB.LoadWindowPack("GUICG", 800, 600)
#this replaces help02.2da for class restrictions
ClassCount = CommonTables.Classes.GetRowCount()+1
ClassWindow = GemRB.LoadWindow(2)
rid = CommonTables.Races.FindValue(3, GemRB.GetVar('BaseRace'))
RaceName = CommonTables.Races.GetRowName(rid)
#radiobutton groups must be set up before doing anything else to them
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_SET)
Button.SetState(IE_GUI_BUTTON_DISABLED)
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Allowed = CommonTables.Classes.GetValue(ClassName, RaceName)
Button = ClassWindow.GetControl(j+2)
j = j+1
t = CommonTables.Classes.GetValue(ClassName, "NAME_REF")
Button.SetText(t )
if Allowed==0:
continue
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, ClassPress)
Button.SetVarAssoc("Class", i)
BackButton = ClassWindow.GetControl(17)
BackButton.SetText(15416)
BackButton.SetFlags(IE_GUI_BUTTON_CANCEL,OP_OR)
DoneButton = ClassWindow.GetControl(0)
DoneButton.SetText(36789)
DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR)
ScrollBarControl = ClassWindow.GetControl(15)
TextAreaControl = ClassWindow.GetControl(16)
Class = GemRB.GetVar("Class")-1
if Class<0:
TextAreaControl.SetText(17242)
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
else:
AdjustTextArea()
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress)
ClassWindow.SetVisible(WINDOW_VISIBLE)
return
def ClassPress():
global HasSubClass
AdjustTextArea()
if HasSubClass == 0:
return
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
j = 0
for i in range(1,ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed > 0:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_SET)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetText("")
j=0
for i in range(1, ClassCount):
ClassName = CommonTables.Classes.GetRowName(i-1)
#determining if this is a kit or class
Allowed = CommonTables.Classes.GetValue(ClassName, "CLASS")
if Allowed != ClassID:
continue
Button = ClassWindow.GetControl(j+2)
j = j+1
t = CommonTables.Classes.GetValue(ClassName, "NAME_REF")
Button.SetText(t )
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, ClassPress2)
Button.SetVarAssoc("Class", i)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress2)
return
def ClassPress2():
ClassName = GUICommon.GetClassRowName (GemRB.GetVar ("Class")-1, "index")
TextAreaControl.SetText (CommonTables.Classes.GetValue (ClassName, "DESC_REF"))
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def BackPress2():
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
if ClassWindow:
ClassWindow.Unload()
OnLoad()
return
def BackPress():
if ClassWindow:
ClassWindow.Unload()
GemRB.SetNextScript("CharGen3")
GemRB.SetVar("Class",0) #scrapping the class value
MyChar = GemRB.GetVar("Slot")
GemRB.SetPlayerStat (MyChar, IE_CLASS, 0)
GemRB.SetPlayerStat (MyChar, IE_KIT, 0)
return
def NextPress():
#classcolumn is base class
ClassName = GUICommon.GetClassRowName (GemRB.GetVar ("Class")-1, "index")
ClassColumn = CommonTables.Classes.GetValue (ClassName, "CLASS")
if ClassColumn <= 0: #it was already a base class
ClassColumn = GemRB.GetVar("Class")
GemRB.SetVar("BaseClass", ClassColumn)
if ClassWindow:
ClassWindow.Unload()
GemRB.SetNextScript("CharGen4") #alignment
return
|
raycarnes/e-commerce | refs/heads/8.0 | website_sale_product_brand/models/__init__.py | 7 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from . import website
|
ddipp/Euler_solutions | refs/heads/main | p021.py | 1 | #!/usr/bin/env python3
# ===============================================================================
# Let d(n) be defined as the sum of proper divisors of n (numbers less than n
# which divide evenly into n).
# If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
# each of a and b are called amicable numbers.
#
# For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
# and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
# and 142; so d(284) = 220.
#
# Evaluate the sum of all the amicable numbers under 10000.
# ===============================================================================
def PrimeFactors(n):
divisor = 2
lis = []
while divisor ** 2 <= n:
if n % divisor == 0:
n //= divisor
lis.append(divisor)
else:
divisor += 1
if n != 1:
lis.append(n)
return lis
def ListToDict(l):
d = {}.fromkeys(l, 0)
for i in l:
d[i] += 1
return d
def divisors(n):
df = ListToDict(PrimeFactors(n))
div = [1]
for p, r in df.items():
div = [d * p**e for d in div for e in range(r + 1)]
div.sort()
return div[:-1]
def solution():
s = 0
for a in range(1, 10000):
b = sum(divisors(a))
if a != b and a == sum(divisors(b)):
s += a
return s
if __name__ == '__main__':
print(solution())
|
kmonsoor/pyglet | refs/heads/master | contrib/spryte/rect.py | 29 |
class Rect(object):
'''Define a rectangular area.
Many convenience handles and other properties are also defined - all of
which may be assigned to which will result in altering the position
and sometimes dimensions of the Rect.
The Rect area includes the bottom and left borders but not the top and
right borders.
'''
def __init__(self, x, y, width, height):
'''Create a Rect with the bottom-left corner at (x, y) and
dimensions (width, height).
'''
self._x, self._y = x, y
self._width, self._height = width, height
# the following four properties will most likely be overridden in a
# subclass
def set_x(self, value): self._x = value
x = property(lambda self: self._x, set_x)
def set_y(self, value): self._y = value
y = property(lambda self: self._y, set_y)
def set_width(self, value): self._width = value
width = property(lambda self: self._width, set_width)
def set_height(self, value): self._height = value
height = property(lambda self: self._height, set_height)
def set_pos(self, value): self._x, self._y = value
pos = property(lambda self: (self._x, self._y), set_pos)
def set_size(self, value): self._width, self._height = value
size = property(lambda self: (self._width, self._height), set_size)
def contains(self, x, y):
'''Return boolean whether the point defined by x, y is inside the
rect area.
'''
if x < self._x or x > self._x + self._width: return False
if y < self._y or y > self._y + self._height: return False
return True
def intersects(self, other):
'''Return boolean whether the "other" rect (an object with .x, .y,
.width and .height attributes) overlaps this Rect in any way.
'''
if self._x + self._width < other.x: return False
if other.x + other.width < self._x: return False
if self._y + self._height < other.y: return False
if other.y + other.height < self._y: return False
return True
# r/w, in pixels, y extent
def get_top(self): return self.y + self.height
def set_top(self, y): self.y = y - self.height
top = property(get_top, set_top)
# r/w, in pixels, y extent
def get_bottom(self): return self.y
def set_bottom(self, y): self.y = y
bottom = property(get_bottom, set_bottom)
# r/w, in pixels, x extent
def get_left(self): return self.x
def set_left(self, x): self.x = x
left = property(get_left, set_left)
# r/w, in pixels, x extent
def get_right(self): return self.x + self.width
def set_right(self, x): self.x = x - self.width
right = property(get_right, set_right)
# r/w, in pixels, (x, y)
def get_center(self):
return (self.x + self.width/2, self.y + self.height/2)
def set_center(self, center):
x, y = center
self.pos = (x - self.width/2, y - self.height/2)
center = property(get_center, set_center)
# r/w, in pixels, (x, y)
def get_midtop(self):
return (self.x + self.width/2, self.y + self.height)
def set_midtop(self, midtop):
x, y = midtop
self.pos = (x - self.width/2, y - self.height)
midtop = property(get_midtop, set_midtop)
# r/w, in pixels, (x, y)
def get_midbottom(self):
return (self.x + self.width/2, self.y)
def set_midbottom(self, midbottom):
x, y = midbottom
self.pos = (x - self.width/2, y)
midbottom = property(get_midbottom, set_midbottom)
# r/w, in pixels, (x, y)
def get_midleft(self):
return (self.x, self.y + self.height/2)
def set_midleft(self, midleft):
x, y = midleft
self.pos = (x, y - self.height/2)
midleft = property(get_midleft, set_midleft)
# r/w, in pixels, (x, y)
def get_midright(self):
return (self.x + self.width, self.y + self.height/2)
def set_midright(self, midright):
x, y = midright
self.pos = (x - self.width, y - self.height/2)
midright = property(get_midright, set_midright)
# r/w, in pixels, (x, y)
def get_topleft(self):
return (self.x, self.y + self.height)
def set_topleft(self, pos):
x, y = pos
self.pos = (x, y - self.height)
topleft = property(get_topleft, set_topleft)
# r/w, in pixels, (x, y)
def get_topright(self):
return (self.x + self.width, self.y + self.height)
def set_topright(self, pos):
x, y = pos
self.pos = (x - self.width, y - self.height)
topright = property(get_topright, set_topright)
# r/w, in pixels, (x, y)
def get_bottomright(self):
return (self.x + self.width, self.y)
def set_bottomright(self, pos):
x, y = pos
self.pos = (x - self.width, y)
bottomright = property(get_bottomright, set_bottomright)
# r/w, in pixels, (x, y)
def get_bottomleft(self):
return (self.x, self.y)
def set_bottomleft(self, pos):
self.x, self.y = pos
bottomleft = property(get_bottomleft, set_bottomleft)
|
denlap007/devnullect | refs/heads/group-support | src/models/resource.py | 1 | from peewee import PrimaryKeyField, CharField, DateTimeField
from theBaseModel import MyBaseModel
class Resource(MyBaseModel):
id = PrimaryKeyField(db_column="id")
rs_content = CharField(2000)
rs_date = DateTimeField()
class Meta:
order_by = ('id',)
db_table = 'resource'
|
ArchAssault-Project/archassaultweb | refs/heads/master | main/migrations/0006_add_more_info_to_packages.py | 5 | from south.db import db
from django.db import models
from main.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Package.compressed_size'
db.add_column('packages', 'compressed_size', orm['main.package:compressed_size'])
# Adding field 'Package.installed_size'
db.add_column('packages', 'installed_size', orm['main.package:installed_size'])
# Adding field 'Package.build_date'
db.add_column('packages', 'build_date', orm['main.package:build_date'])
def backwards(self, orm):
# Deleting field 'Package.compressed_size'
db.delete_column('packages', 'compressed_size')
# Deleting field 'Package.installed_size'
db.delete_column('packages', 'installed_size')
# Deleting field 'Package.build_date'
db.delete_column('packages', 'build_date')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.altforum': {
'Meta': {'db_table': "'alt_forums'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.arch': {
'Meta': {'db_table': "'arches'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.donor': {
'Meta': {'db_table': "'donors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.externalproject': {
'description': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'main.mirror': {
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'admin_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isos': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'main.mirrorprotocol': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
'main.mirrorrsync': {
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rsync_ips'", 'to': "orm['main.Mirror']"})
},
'main.mirrorurl': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': "orm['main.Mirror']"}),
'protocol': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': "orm['main.MirrorProtocol']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.news': {
'Meta': {'db_table': "'news'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'news_author'", 'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postdate': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.package': {
'Meta': {'db_table': "'packages'"},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Arch']"}),
'build_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'compressed_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'maintained_packages'", 'null': 'True', 'to': "orm['auth.User']"}),
'needupdate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pkgdesc': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Repo']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.packagedepend': {
'Meta': {'db_table': "'package_depends'"},
'depname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'depvcmp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.packagefile': {
'Meta': {'db_table': "'package_files'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.press': {
'Meta': {'db_table': "'press'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.repo': {
'Meta': {'db_table': "'repos'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.signoff': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolist': {
'Meta': {'db_table': "'todolists'"},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolistpkg': {
'Meta': {'unique_together': "(('list', 'pkg'),)", 'db_table': "'todolist_pkgs'"},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Todolist']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.userprofile': {
'Meta': {'db_table': "'user_profiles'"},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'allowed_repos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Repo']", 'blank': 'True'}),
'favorite_distros': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'other_contact': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'default': "'devs/silhouette.png'", 'max_length': '100'}),
'public_email': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'roles': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userprofile_user'", 'unique': 'True', 'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'yob': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['main']
|
mattclay/ansible | refs/heads/devel | test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py | 29 | # -*- coding: utf-8 -*-
# Copyright: (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.facts.system.distribution import DistributionFiles
@pytest.mark.parametrize('realpath', ('SUSE_SLES_SAP.prod', 'SLES_SAP.prod'))
def test_distribution_sles4sap_suse_sles_sap(mock_module, mocker, realpath):
mocker.patch('os.path.islink', return_value=True)
mocker.patch('os.path.realpath', return_value='/etc/products.d/' + realpath)
test_input = {
'name': 'SUSE',
'path': '',
'data': 'suse',
'collected_facts': None,
}
test_result = (
True,
{
'distribution': 'SLES_SAP',
}
)
distribution = DistributionFiles(module=mock_module())
assert test_result == distribution.parse_distribution_file_SUSE(**test_input)
|
BlueLens/bl-magi | refs/heads/master | tensorflow/object_detection/builders/box_predictor_builder_test.py | 21 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import mock
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.4)
self.assertTrue(box_predictor._apply_sigmoid_to_scores)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.8)
self.assertFalse(box_predictor._apply_sigmoid_to_scores)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_builder_calls_fc_argscope_fn(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
mock_argscope_fn.assert_called_with(hyperparams_proto, False)
self.assertEqual(box_predictor._fc_hyperparams, 'arg_scope')
def test_non_default_mask_rcnn_box_predictor(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto = """
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
def mock_fc_argscope_builder(fc_hyperparams_arg, is_training):
return (fc_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_fc_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertTrue(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.8)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
def test_build_default_mask_rcnn_box_predictor(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor = box_predictor_builder.build(
argscope_fn=mock.Mock(return_value='arg_scope'),
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertFalse(box_predictor._predict_instance_masks)
self.assertFalse(box_predictor._predict_keypoints)
def test_build_box_predictor_with_mask_branch(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
self.assertFalse(box_predictor._use_dropout)
self.assertAlmostEqual(box_predictor._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertTrue(box_predictor._predict_instance_masks)
self.assertEqual(box_predictor._mask_prediction_conv_depth, 512)
self.assertFalse(box_predictor._predict_keypoints)
class RfcnBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_fc_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_non_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto = """
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._num_spatial_bins, [4, 4])
self.assertEqual(box_predictor._crop_size, [16, 16])
def test_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertEqual(box_predictor._num_spatial_bins, [3, 3])
self.assertEqual(box_predictor._crop_size, [12, 12])
if __name__ == '__main__':
tf.test.main()
|
mfherbst/spack | refs/heads/develop | var/spack/repos/builtin/packages/gurobi/package.py | 2 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Gurobi(Package):
"""The Gurobi Optimizer was designed from the ground up to be the fastest,
most powerful solver available for your LP, QP, QCP, and MIP (MILP, MIQP,
and MIQCP) problems.
Note: Gurobi is licensed software. You will need to create an account on
the Gurobi homepage and download Gurobi Optimizer yourself. Spack will
search your current directory for the download file. Alternatively, add
this file to a mirror so that Spack can find it. For instructions on how to
set up a mirror, see http://spack.readthedocs.io/en/latest/mirrors.html
Please set the path to licence file with the following command (for bash)
export GRB_LICENSE_FILE=/path/to/gurobi/license/. See section 4 in
$GUROBI_HOME/docs/quickstart_linux.pdf for more details."""
homepage = "http://www.gurobi.com/index"
version('7.5.2', '01f6dbb8d165838cca1664a1a14e4a85')
# Licensing
license_required = True
license_vars = ['GRB_LICENSE_FILE']
license_url = 'http://www.gurobi.com/downloads/download-center'
def url_for_version(self, version):
return "file://{0}/gurobi{1}_linux64.tar.gz".format(os.getcwd(), version)
def setup_environment(self, spack_env, run_env):
run_env.set('GUROBI_HOME', self.prefix)
def install(self, spec, prefix):
install_tree('linux64', prefix)
|
ThePletch/ansible | refs/heads/devel | test/units/contrib/inventory/test_vmware_inventory.py | 15 | #!/usr/bin/env python
import json
import os
import pickle
import unittest
import sys
try:
from vmware_inventory import VMWareInventory
except ImportError:
from nose.plugins.skip import SkipTest
raise SkipTest("test_vmware_inventory.py requires the python module 'vmware_inventory'")
# contrib's dirstruct doesn't contain __init__.py files
checkout_path = os.path.dirname(__file__)
checkout_path = checkout_path.replace('/test/units/contrib/inventory', '')
inventory_dir = os.path.join(checkout_path, 'contrib', 'inventory')
sys.path.append(os.path.abspath(inventory_dir))
# cleanup so that nose's path is not polluted with other inv scripts
sys.path.remove(os.path.abspath(inventory_dir))
BASICINVENTORY = {'all': {'hosts': ['foo', 'bar']},
'_meta': { 'hostvars': { 'foo': {'hostname': 'foo'},
'bar': {'hostname': 'bar'}}
}
}
class FakeArgs(object):
debug = False
write_dumpfile = None
load_dumpfile = None
host = False
list = True
class TestVMWareInventory(unittest.TestCase):
def test_host_info_returns_single_host(self):
vmw = VMWareInventory(load=False)
vmw.inventory = BASICINVENTORY
foo = vmw.get_host_info('foo')
bar = vmw.get_host_info('bar')
assert foo == {'hostname': 'foo'}
assert bar == {'hostname': 'bar'}
def test_show_returns_serializable_data(self):
fakeargs = FakeArgs()
vmw = VMWareInventory(load=False)
vmw.args = fakeargs
vmw.inventory = BASICINVENTORY
showdata = vmw.show()
serializable = False
try:
json.loads(showdata)
serializable = True
except:
pass
assert serializable
#import epdb; epdb.st()
def test_show_list_returns_serializable_data(self):
fakeargs = FakeArgs()
vmw = VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.list = True
vmw.inventory = BASICINVENTORY
showdata = vmw.show()
serializable = False
try:
json.loads(showdata)
serializable = True
except:
pass
assert serializable
#import epdb; epdb.st()
def test_show_list_returns_all_data(self):
fakeargs = FakeArgs()
vmw = VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.list = True
vmw.inventory = BASICINVENTORY
showdata = vmw.show()
expected = json.dumps(BASICINVENTORY, indent=2)
assert showdata == expected
def test_show_host_returns_serializable_data(self):
fakeargs = FakeArgs()
vmw = VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.host = 'foo'
vmw.inventory = BASICINVENTORY
showdata = vmw.show()
serializable = False
try:
json.loads(showdata)
serializable = True
except:
pass
assert serializable
#import epdb; epdb.st()
def test_show_host_returns_just_host(self):
fakeargs = FakeArgs()
vmw = VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.list = False
vmw.args.host = 'foo'
vmw.inventory = BASICINVENTORY
showdata = vmw.show()
expected = BASICINVENTORY['_meta']['hostvars']['foo']
expected = json.dumps(expected, indent=2)
#import epdb; epdb.st()
assert showdata == expected
if __name__ == '__main__':
unittest.main()
|
olgabot/prettyplotlib | refs/heads/master | prettyplotlib/utils.py | 1 | __author__ = 'olga'
import matplotlib as mpl
import matplotlib.pyplot as plt
def remove_chartjunk(ax, spines, grid=None, ticklabels=None, show_ticks=False,
xkcd=False):
'''
Removes "chartjunk", such as extra lines of axes and tick marks.
If grid="y" or "x", will add a white grid at the "y" or "x" axes,
respectively
If ticklabels="y" or "x", or ['x', 'y'] will remove ticklabels from that
axis
'''
all_spines = ['top', 'bottom', 'right', 'left', 'polar']
for spine in spines:
# The try/except is for polar coordinates, which only have a 'polar'
# spine and none of the others
try:
ax.spines[spine].set_visible(False)
except KeyError:
pass
# For the remaining spines, make their line thinner and a slightly
# off-black dark grey
if not xkcd:
for spine in set(all_spines).difference(set(spines)):
# The try/except is for polar coordinates, which only have a
# 'polar' spine and none of the others
try:
ax.spines[spine].set_linewidth(0.5)
except KeyError:
pass
# ax.spines[spine].set_color(almost_black)
# ax.spines[spine].set_tick_params(color=almost_black)
# Check that the axes are not log-scale. If they are, leave
# the ticks because otherwise people assume a linear scale.
x_pos = set(['top', 'bottom'])
y_pos = set(['left', 'right'])
xy_pos = [x_pos, y_pos]
xy_ax_names = ['xaxis', 'yaxis']
for ax_name, pos in zip(xy_ax_names, xy_pos):
axis = ax.__dict__[ax_name]
# axis.set_tick_params(color=almost_black)
#print 'axis.get_scale()', axis.get_scale()
if show_ticks or axis.get_scale() == 'log':
# if this spine is not in the list of spines to remove
for p in pos.difference(spines):
#print 'p', p
axis.set_tick_params(direction='out')
axis.set_ticks_position(p)
# axis.set_tick_params(which='both', p)
else:
axis.set_ticks_position('none')
if grid is not None:
for g in grid:
assert g in ('x', 'y')
ax.grid(axis=grid, color='white', linestyle='-', linewidth=0.5)
if ticklabels is not None:
if type(ticklabels) is str:
assert ticklabels in set(('x', 'y'))
if ticklabels == 'x':
ax.set_xticklabels([])
if ticklabels == 'y':
ax.set_yticklabels([])
else:
assert set(ticklabels) | set(('x', 'y')) > 0
if 'x' in ticklabels:
ax.set_xticklabels([])
elif 'y' in ticklabels:
ax.set_yticklabels([])
def maybe_get_ax(*args, **kwargs):
"""
It used to be that the first argument of prettyplotlib had to be the 'ax'
object, but that's not the case anymore.
@param args:
@type args:
@param kwargs:
@type kwargs:
@return:
@rtype:
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
elif len(args) == 0:
fig = plt.gcf()
ax = plt.gca()
elif isinstance(args[0], mpl.axes.Axes):
ax = args[0]
args = args[1:]
else:
ax = plt.gca()
return ax, args, dict(kwargs)
def maybe_get_fig_ax(*args, **kwargs):
"""
It used to be that the first argument of prettyplotlib had to be the 'ax'
object, but that's not the case anymore. This is specially made for
pcolormesh.
@param args:
@type args:
@param kwargs:
@type kwargs:
@return:
@rtype:
"""
if 'ax' in kwargs:
ax = kwargs.pop('ax')
if 'fig' in kwargs:
fig = kwargs.pop('fig')
else:
fig = plt.gcf()
elif len(args) == 0:
fig = plt.gcf()
ax = plt.gca()
elif isinstance(args[0], mpl.figure.Figure) and \
isinstance(args[1], mpl.axes.Axes):
fig = args[0]
ax = args[1]
args = args[2:]
else:
fig, ax = plt.subplots(1)
return fig, ax, args, dict(kwargs)
def maybe_get_linewidth(**kwargs):
try:
key = (set(["lw", "linewidth", 'linewidths']) & set(kwargs)).pop()
lw = kwargs[key]
except KeyError:
lw = 0.15
return lw
|
Eric-Zhong/odoo | refs/heads/8.0 | addons/account/edi/__init__.py | 450 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
csirtgadgets/bearded-avenger | refs/heads/master | cif/httpd/views/indicators.py | 1 | from ..common import pull_token, jsonify_success, jsonify_unauth, jsonify_unknown, compress, response_compress, \
VALID_FILTERS, jsonify_busy
from flask.views import MethodView
from flask import request, current_app
from cifsdk.client.zeromq import ZMQ as Client
from cifsdk.client.dummy import Dummy as DummyClient
from cif.constants import ROUTER_ADDR, PYVERSION
from cifsdk.exceptions import AuthError, TimeoutError, InvalidSearch, SubmissionFailed, CIFBusy
import logging
import zlib
remote = ROUTER_ADDR
logger = logging.getLogger('cif-httpd')
if PYVERSION > 2:
basestring = (str, bytes)
else:
basestring = (str, unicode)
class IndicatorsAPI(MethodView):
def get(self):
filters = {}
for f in VALID_FILTERS:
if request.args.get(f):
filters[f] = request.args.get(f)
if request.args.get('q'):
filters['indicator'] = request.args.get('q')
if request.args.get('confidence'):
filters['confidence'] = request.args.get('confidence')
if request.args.get('provider'):
filters['provider'] = request.args.get('provider')
if request.args.get('group'):
filters['group'] = request.args.get('group')
if request.args.get('tags'):
filters['tags'] = request.args.get('tags')
if request.args.get('lasttime'):
filters['lasttime'] = request.args.get('lasttime')
if current_app.config.get('dummy'):
r = DummyClient(remote, pull_token()).indicators_search(filters)
return jsonify_success(r)
try:
with Client(remote, pull_token()) as cli:
r = cli.indicators_search(filters, decode=False)
except RuntimeError as e:
logger.error(e)
return jsonify_unknown(msg='search failed')
except InvalidSearch as e:
return jsonify_unknown(msg='invalid search', code=400)
except AuthError:
return jsonify_unauth()
except Exception as e:
logger.error(e)
return jsonify_unknown(msg='search failed, system may be too busy, check back later')
response = current_app.response_class(r, mimetype='application/json')
if isinstance(r, basestring):
if '"message":"unauthorized"' in r and '"message":"unauthorized"' in r:
response.status_code = 401
return response
return response
def post(self):
fireball = False
nowait = request.args.get('nowait', False)
if request.headers.get('Content-Length'):
logger.debug('content-length: %s' % request.headers['Content-Length'])
if int(request.headers['Content-Length']) > 5000:
logger.info('fireball mode')
fireball = True
try:
with Client(remote, pull_token()) as cli:
r = cli.indicators_create(request.data, nowait=nowait,
fireball=fireball)
if nowait:
r = 'pending'
except SubmissionFailed as e:
logger.error(e)
return jsonify_unknown(msg='submission failed: %s' % e, code=422)
except RuntimeError as e:
logger.error(e)
return jsonify_unknown(msg='submission had a runtime error, check logs for more information', code=422)
except TimeoutError as e:
logger.error(e)
return jsonify_unknown('submission timed out, check logs for more information', 408)
except CIFBusy:
return jsonify_busy()
except Exception as e:
logger.error(e)
return jsonify_unknown('submission failed with generic exception, check logs for more information', 422)
except AuthError:
return jsonify_unauth()
return jsonify_success(r, code=201)
def delete(self):
try:
data = request.data.decode('utf-8')
with Client(remote, pull_token()) as cli:
r = cli.indicators_delete(data)
except RuntimeError as e:
logger.error(e)
return jsonify_unknown(msg='submission failed, check logs for more information', code=422)
except TimeoutError as e:
logger.error(e)
return jsonify_unknown('submission failed, check logs for more information', 408)
except Exception as e:
logger.error(e)
return jsonify_unknown('submission failed, check logs for more information', 422)
except AuthError:
return jsonify_unauth()
return jsonify_success(r)
|
GunnerJnr/_CodeInstitute | refs/heads/master | Stream-3/Full-Stack-Development/17.Create-A-Django-Based-Forum/3.Showing-Our-Subjects/we_are_social/products/views.py | 16 | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from .models import Product
# Create your views here.
@login_required(login_url='/login/')
def all_products(request):
products = Product.objects.all()
return render(request, "products/products.html", {"products": products})
|
RanadeepPolavarapu/kuma | refs/heads/master | kuma/wiki/migrations/0003_auto_20150703_0419.py | 7 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import kuma.core.fields
class Migration(migrations.Migration):
dependencies = [
('wiki', '0002_auto_20150430_0805'),
]
operations = [
migrations.AlterField(
model_name='document',
name='locale',
field=kuma.core.fields.LocaleField(default=b'en-US', max_length=7, db_index=True, choices=[(b'af', 'Afrikaans'), (b'ar', '\u0639\u0631\u0628\u064a'), (b'az', 'Az\u0259rbaycanca'), (b'bm', 'Bamanankan'), (b'bn-BD', '\u09ac\u09be\u0982\u09b2\u09be (\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6)'), (b'bn-IN', '\u09ac\u09be\u0982\u09b2\u09be (\u09ad\u09be\u09b0\u09a4)'), (b'ca', 'Catal\xe0'), (b'cs', '\u010ce\u0161tina'), (b'de', 'Deutsch'), (b'ee', 'E\u028be'), (b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'en-US', 'English (US)'), (b'es', 'Espa\xf1ol'), (b'fa', '\u0641\u0627\u0631\u0633\u06cc'), (b'ff', 'Pulaar-Fulfulde'), (b'fi', 'suomi'), (b'fr', 'Fran\xe7ais'), (b'fy-NL', 'Frysk'), (b'ga-IE', 'Gaeilge'), (b'ha', 'Hausa'), (b'he', '\u05e2\u05d1\u05e8\u05d9\u05ea'), (b'hi-IN', '\u0939\u093f\u0928\u094d\u0926\u0940 (\u092d\u093e\u0930\u0924)'), (b'hr', 'Hrvatski'), (b'hu', 'magyar'), (b'id', 'Bahasa Indonesia'), (b'ig', 'Igbo'), (b'it', 'Italiano'), (b'ja', '\u65e5\u672c\u8a9e'), (b'ka', '\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'ln', 'Ling\xe1la'), (b'ml', '\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02'), (b'ms', 'Melayu'), (b'my', '\u1019\u103c\u1014\u103a\u1019\u102c\u1018\u102c\u101e\u102c'), (b'nl', 'Nederlands'), (b'pl', 'Polski'), (b'pt-BR', 'Portugu\xeas (do\xa0Brasil)'), (b'pt-PT', 'Portugu\xeas (Europeu)'), (b'ro', 'rom\xe2n\u0103'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'son', 'So\u014bay'), (b'sq', 'Shqip'), (b'sw', 'Kiswahili'), (b'ta', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd'), (b'th', '\u0e44\u0e17\u0e22'), (b'tl', 'Tagalog'), (b'tr', 'T\xfcrk\xe7e'), (b'vi', 'Ti\u1ebfng Vi\u1ec7t'), (b'wo', 'Wolof'), (b'xh', 'isiXhosa'), (b'yo', 'Yor\xf9b\xe1'), (b'zh-CN', '\u4e2d\u6587 (\u7b80\u4f53)'), (b'zh-TW', '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)'), (b'zu', 'isiZulu')]),
preserve_default=True,
),
migrations.AlterField(
model_name='documentdeletionlog',
name='locale',
field=kuma.core.fields.LocaleField(default=b'en-US', max_length=7, db_index=True, choices=[(b'af', 'Afrikaans'), (b'ar', '\u0639\u0631\u0628\u064a'), (b'az', 'Az\u0259rbaycanca'), (b'bm', 'Bamanankan'), (b'bn-BD', '\u09ac\u09be\u0982\u09b2\u09be (\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6)'), (b'bn-IN', '\u09ac\u09be\u0982\u09b2\u09be (\u09ad\u09be\u09b0\u09a4)'), (b'ca', 'Catal\xe0'), (b'cs', '\u010ce\u0161tina'), (b'de', 'Deutsch'), (b'ee', 'E\u028be'), (b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'en-US', 'English (US)'), (b'es', 'Espa\xf1ol'), (b'fa', '\u0641\u0627\u0631\u0633\u06cc'), (b'ff', 'Pulaar-Fulfulde'), (b'fi', 'suomi'), (b'fr', 'Fran\xe7ais'), (b'fy-NL', 'Frysk'), (b'ga-IE', 'Gaeilge'), (b'ha', 'Hausa'), (b'he', '\u05e2\u05d1\u05e8\u05d9\u05ea'), (b'hi-IN', '\u0939\u093f\u0928\u094d\u0926\u0940 (\u092d\u093e\u0930\u0924)'), (b'hr', 'Hrvatski'), (b'hu', 'magyar'), (b'id', 'Bahasa Indonesia'), (b'ig', 'Igbo'), (b'it', 'Italiano'), (b'ja', '\u65e5\u672c\u8a9e'), (b'ka', '\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'ln', 'Ling\xe1la'), (b'ml', '\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02'), (b'ms', 'Melayu'), (b'my', '\u1019\u103c\u1014\u103a\u1019\u102c\u1018\u102c\u101e\u102c'), (b'nl', 'Nederlands'), (b'pl', 'Polski'), (b'pt-BR', 'Portugu\xeas (do\xa0Brasil)'), (b'pt-PT', 'Portugu\xeas (Europeu)'), (b'ro', 'rom\xe2n\u0103'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'son', 'So\u014bay'), (b'sq', 'Shqip'), (b'sw', 'Kiswahili'), (b'ta', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd'), (b'th', '\u0e44\u0e17\u0e22'), (b'tl', 'Tagalog'), (b'tr', 'T\xfcrk\xe7e'), (b'vi', 'Ti\u1ebfng Vi\u1ec7t'), (b'wo', 'Wolof'), (b'xh', 'isiXhosa'), (b'yo', 'Yor\xf9b\xe1'), (b'zh-CN', '\u4e2d\u6587 (\u7b80\u4f53)'), (b'zh-TW', '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)'), (b'zu', 'isiZulu')]),
preserve_default=True,
),
migrations.AlterField(
model_name='localizationtaggedrevision',
name='tag',
field=models.ForeignKey(related_name='wiki_localizationtaggedrevision_items', to='wiki.LocalizationTag'),
preserve_default=True,
),
migrations.AlterField(
model_name='reviewtaggedrevision',
name='tag',
field=models.ForeignKey(related_name='wiki_reviewtaggedrevision_items', to='wiki.ReviewTag'),
preserve_default=True,
),
migrations.AlterField(
model_name='taggeddocument',
name='tag',
field=models.ForeignKey(related_name='wiki_taggeddocument_items', to='wiki.DocumentTag'),
preserve_default=True,
),
]
|
DepthDeluxe/ansible | refs/heads/devel | lib/ansible/modules/remote_management/hpilo/hpilo_facts.py | 33 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: hpilo_facts
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Gather facts through an HP iLO interface
description:
- This module gathers facts for a specific system using its HP iLO interface.
These facts include hardware and network related information useful
for provisioning (e.g. macaddress, uuid).
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
requirements:
- hpilo
notes:
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
# Task to gather facts from a HP iLO interface only if the system is an HP server
- hpilo_facts:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- fail:
msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ hw_system_serial }}) !'
when: cmdb_serialno != hw_system_serial
'''
RETURN = r'''
# Typical output of HP iLO_facts for a physical system
hw_bios_date:
description: BIOS date
returned: always
type: string
sample: 05/05/2011
hw_bios_version:
description: BIOS version
returned: always
type: string
sample: P68
hw_ethX:
description: Interface information (for each interface)
returned: always
type: dictionary
sample:
- macaddress: 00:11:22:33:44:55
macaddress_dash: 00-11-22-33-44-55
hw_eth_ilo:
description: Interface information (for the iLO network interface)
returned: always
type: dictionary
sample:
- macaddress: 00:11:22:33:44:BA
- macaddress_dash: 00-11-22-33-44-BA
hw_product_name:
description: Product name
returned: always
type: string
sample: ProLiant DL360 G7
hw_product_uuid:
description: Product UUID
returned: always
type: string
sample: ef50bac8-2845-40ff-81d9-675315501dac
hw_system_serial:
description: System serial number
returned: always
type: string
sample: ABC12345D6
hw_uuid:
description: Hardware UUID
returned: always
type: string
sample: 123456ABC78901D2
'''
import re
import warnings
from ansible.module_utils.basic import AnsibleModule
try:
import hpilo
HAS_HPILO = True
except ImportError:
HAS_HPILO = False
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
try:
factname = 'hw_eth' + str(int(entry['Port']) - 1)
except:
factname = non_numeric
facts = {
'macaddress': entry['MAC'].replace('-', ':'),
'macaddress_dash': entry['MAC']
}
return (factname, facts)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, type='str'),
login = dict(default='Administrator', type='str'),
password = dict(default='admin', type='str', no_log=True),
),
supports_check_mode=True,
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
ilo = hpilo.Ilo(host, login=login, password=password)
facts = {
'module_hw': True,
}
# TODO: Count number of CPUs, DIMMs and total memory
data = ilo.get_host_data()
for entry in data:
if 'type' not in entry:
continue
elif entry['type'] == 0: # BIOS Information
facts['hw_bios_version'] = entry['Family']
facts['hw_bios_date'] = entry['Date']
elif entry['type'] == 1: # System Information
facts['hw_uuid'] = entry['UUID']
facts['hw_system_serial'] = entry['Serial Number'].rstrip()
facts['hw_product_name'] = entry['Product Name']
facts['hw_product_uuid'] = entry['cUUID']
elif entry['type'] == 209: # Embedded NIC MAC Assignment
if 'fields' in entry:
for (name, value) in [ (e['name'], e['value']) for e in entry['fields'] ]:
if name.startswith('Port'):
try:
factname = 'hw_eth' + str(int(value) - 1)
except:
factname = 'hw_eth_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
else:
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_iscsi' + str(int(value) - 1)
except:
factname = 'hw_iscsi_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
# Collect health (RAM/CPU data)
health = ilo.get_embedded_health()
facts['hw_health'] = health
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
if memory_details_summary:
facts['hw_memory_details_summary'] = memory_details_summary
facts['hw_memory_total'] = 0
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search('(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))
# reformat into a text friendly format
facts['hw_memory_total'] = "{0} GB".format(facts['hw_memory_total'])
module.exit_json(ansible_facts=facts)
if __name__ == '__main__':
main()
|
akhmadMizkat/odoo | refs/heads/master | addons/product_margin/__openerp__.py | 27 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Margins by Products',
'version': '1.0',
'category': 'Sales Management',
'description': """
Adds a reporting menu in products that computes sales, purchases, margins and other interesting indicators based on invoices.
=============================================================================================================================
The wizard to launch the report has several options to help you get the data you need.
""",
'depends': ['account'],
'data': [
'security/ir.model.access.csv',
'wizard/product_margin_view.xml',
'product_margin_view.xml'
],
'test':['test/product_margin.yml'],
'demo': [],
'installable': True,
'auto_install': False,
}
|
reminisce/mxnet | refs/heads/master | example/sparse/wide_deep/train.py | 6 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet.test_utils import *
from config import *
from data import get_uci_adult
from model import wide_deep_model
import argparse
import os
parser = argparse.ArgumentParser(description="Run sparse wide and deep classification ",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=10,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=100,
help='number of examples per batch')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
parser.add_argument('--gpu', action='store_true', default=False,
help='Train on GPU with CUDA')
parser.add_argument('--optimizer', type=str, default='adam',
help='what optimizer to use',
choices=["ftrl", "sgd", "adam"])
parser.add_argument('--log-interval', type=int, default=100,
help='number of batches to wait before logging training status')
if __name__ == '__main__':
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = args.optimizer
log_interval = args.log_interval
lr = args.lr
ctx = mx.gpu(0) if args.gpu else mx.cpu()
# dataset
data_dir = os.path.join(os.getcwd(), 'data')
train_data = os.path.join(data_dir, ADULT['train'])
val_data = os.path.join(data_dir, ADULT['test'])
train_csr, train_dns, train_label = get_uci_adult(data_dir, ADULT['train'], ADULT['url'])
val_csr, val_dns, val_label = get_uci_adult(data_dir, ADULT['test'], ADULT['url'])
model = wide_deep_model(ADULT['num_linear_features'], ADULT['num_embed_features'],
ADULT['num_cont_features'], ADULT['embed_input_dims'],
ADULT['hidden_units'])
# data iterator
train_data = mx.io.NDArrayIter({'csr_data': train_csr, 'dns_data': train_dns},
{'softmax_label': train_label}, batch_size,
shuffle=True, last_batch_handle='discard')
eval_data = mx.io.NDArrayIter({'csr_data': val_csr, 'dns_data': val_dns},
{'softmax_label': val_label}, batch_size,
shuffle=True, last_batch_handle='discard')
# module
mod = mx.mod.Module(symbol=model, context=ctx, data_names=['csr_data', 'dns_data'],
label_names=['softmax_label'])
mod.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label)
mod.init_params()
optim = mx.optimizer.create(optimizer, learning_rate=lr, rescale_grad=1.0/batch_size)
mod.init_optimizer(optimizer=optim)
# use accuracy as the metric
metric = mx.metric.create(['acc'])
# get the sparse weight parameter
speedometer = mx.callback.Speedometer(batch_size, log_interval)
logging.info('Training started ...')
data_iter = iter(train_data)
for epoch in range(num_epoch):
nbatch = 0
metric.reset()
for batch in data_iter:
nbatch += 1
mod.forward_backward(batch)
# update all parameters (including the weight parameter)
mod.update()
# update training metric
mod.update_metric(metric, batch.label)
speedometer_param = mx.model.BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=metric, locals=locals())
speedometer(speedometer_param)
# evaluate metric on validation dataset
score = mod.score(eval_data, ['acc'])
logging.info('epoch %d, accuracy = %s' % (epoch, score[0][1]))
mod.save_checkpoint("checkpoint", epoch, save_optimizer_states=True)
# reset the iterator for next pass of data
data_iter.reset()
logging.info('Training completed.')
|
jeffzheng1/tensorflow | refs/heads/master | tensorflow/python/framework/common_shapes_test.py | 60 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for common shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class CommonShapesTest(test_util.TensorFlowTestCase):
def testBroadcast_one_dimension(self):
s1 = tensor_shape.vector(5)
s2 = tensor_shape.vector(7)
unknown = tensor_shape.unknown_shape()
scalar = tensor_shape.scalar()
expanded_scalar = tensor_shape.TensorShape([1])
# Tensors with same shape should have the same broadcast result.
self.assertEqual(s1, common_shapes.broadcast_shape(s1, s1))
self.assertEqual(s2, common_shapes.broadcast_shape(s2, s2))
self.assertEqual(unknown, common_shapes.broadcast_shape(unknown, unknown))
self.assertEqual(scalar, common_shapes.broadcast_shape(scalar, scalar))
self.assertEqual(expanded_scalar, common_shapes.broadcast_shape(
expanded_scalar, expanded_scalar))
# [] acts like an identity.
self.assertEqual(s1, common_shapes.broadcast_shape(s1, scalar))
self.assertEqual(s2, common_shapes.broadcast_shape(s2, scalar))
self.assertEqual(s1, common_shapes.broadcast_shape(s1, expanded_scalar))
self.assertEqual(s2, common_shapes.broadcast_shape(s2, expanded_scalar))
self.assertEqual(unknown, common_shapes.broadcast_shape(s1, unknown))
self.assertEqual(unknown, common_shapes.broadcast_shape(s2, unknown))
self.assertEqual(expanded_scalar, common_shapes.broadcast_shape(
scalar, expanded_scalar))
with self.assertRaises(ValueError):
common_shapes.broadcast_shape(s1, s2)
common_shapes.broadcast_shape(s2, s1)
if __name__ == "__main__":
googletest.main()
|
legrosbuffle/or-tools | refs/heads/master | examples/python/set_covering_deployment.py | 5 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set covering deployment in Google CP Solver
From http://mathworld.wolfram.com/SetCoveringDeployment.html
'''
Set covering deployment (sometimes written 'set-covering deployment'
and abbreviated SCDP for 'set covering deployment problem') seeks
an optimal stationing of troops in a set of regions so that a
relatively small number of troop units can control a large
geographic region. ReVelle and Rosing (2000) first described
this in a study of Emperor Constantine the Great's mobile field
army placements to secure the Roman Empire.
'''
Compare with the the following models:
* MiniZinc: http://www.hakank.org/minizinc/set_covering_deployment.mzn
* Comet : http://www.hakank.org/comet/set_covering_deployment.co
* Gecode : http://www.hakank.org/gecode/set_covering_deployment.cpp
* ECLiPSe : http://www.hakank.org/eclipse/set_covering_deployment.ecl
* SICStus : http://hakank.org/sicstus/set_covering_deployment.pl
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Set covering deployment")
#
# data
#
countries = ["Alexandria",
"Asia Minor",
"Britain",
"Byzantium",
"Gaul",
"Iberia",
"Rome",
"Tunis"]
n = len(countries)
# the incidence matrix (neighbours)
mat = [
[0, 1, 0, 1, 0, 0, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 1, 1, 0]
]
#
# declare variables
#
# First army
X = [solver.IntVar(0, 1, "X[%i]" % i) for i in range(n)]
# Second (reserv) army
Y = [solver.IntVar(0, 1, "Y[%i]" % i) for i in range(n)]
#
# constraints
#
# total number of armies
num_armies = solver.Sum([X[i] + Y[i] for i in range(n)])
#
# Constraint 1: There is always an army in a city
# (+ maybe a backup)
# Or rather: Is there a backup, there
# must be an an army
#
[solver.Add(X[i] >= Y[i]) for i in range(n)]
#
# Constraint 2: There should always be an backup army near every city
#
for i in range(n):
neighbors = solver.Sum([Y[j] for j in range(n) if mat[i][j] == 1])
solver.Add(X[i] + neighbors >= 1)
objective = solver.Minimize(num_armies, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(X)
solution.Add(Y)
solution.Add(num_armies)
solution.AddObjective(num_armies)
collector = solver.LastSolutionCollector(solution)
solver.Solve(solver.Phase(X + Y,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT),
[collector, objective])
print("num_armies:", collector.ObjectiveValue(0))
print("X:", [collector.Value(0, X[i]) for i in range(n)])
print("Y:", [collector.Value(0, Y[i]) for i in range(n)])
for i in range(n):
if collector.Value(0, X[i]) == 1:
print("army:", countries[i], end=' ')
if collector.Value(0, Y[i]) == 1:
print("reserv army:", countries[i], " ")
print()
print()
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
main()
|
mezz64/home-assistant | refs/heads/dev | homeassistant/components/goalzero/binary_sensor.py | 12 | """Support for Goal Zero Yeti Sensors."""
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import CONF_NAME
from . import YetiEntity
from .const import BINARY_SENSOR_DICT, DATA_KEY_API, DATA_KEY_COORDINATOR, DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Goal Zero Yeti sensor."""
name = entry.data[CONF_NAME]
goalzero_data = hass.data[DOMAIN][entry.entry_id]
sensors = [
YetiBinarySensor(
goalzero_data[DATA_KEY_API],
goalzero_data[DATA_KEY_COORDINATOR],
name,
sensor_name,
entry.entry_id,
)
for sensor_name in BINARY_SENSOR_DICT
]
async_add_entities(sensors, True)
class YetiBinarySensor(YetiEntity, BinarySensorEntity):
"""Representation of a Goal Zero Yeti sensor."""
def __init__(self, api, coordinator, name, sensor_name, server_unique_id):
"""Initialize a Goal Zero Yeti sensor."""
super().__init__(api, coordinator, name, server_unique_id)
self._condition = sensor_name
variable_info = BINARY_SENSOR_DICT[sensor_name]
self._condition_name = variable_info[0]
self._icon = variable_info[2]
self._device_class = variable_info[1]
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._condition_name}"
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return f"{self._server_unique_id}/{self._condition_name}"
@property
def is_on(self):
"""Return if the service is on."""
if self.api.data:
return self.api.data[self._condition] == 1
return False
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
|
MatthewWilkes/django | refs/heads/master | tests/migrations/test_migrations_conflict/0002_conflicting_second.py | 429 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0001_initial")]
operations = [
migrations.CreateModel(
"Something",
[
("id", models.AutoField(primary_key=True)),
],
)
]
|
berth64/modded_modded_1257ad | refs/heads/master | source/module_music.py | 1 | from header_music import *
####################################################################################################################
# Each track record contains the following fields:
# 1) Track id: used for referencing tracks.
# 2) Track file: filename of the track
# 3) Track flags. See header_music.py for a list of available flags
# 4) Continue Track flags: Shows in which situations or cultures the track can continue playing. See header_music.py for a list of available flags
####################################################################################################################
# WARNING: You MUST add mtf_module_track flag to the flags of the tracks located under module directory
tracks = [
("cant_find_this", "cant_find_this.ogg", 0, 0),
#("start", "armorer.mp3", mtf_start_immediately|mtf_sit_main_title|mtf_module_track, 0), #original menu music
("start", "start.ogg", mtf_start_immediately|mtf_sit_main_title|mtf_module_track, 0),
("capture", "capture.ogg", mtf_module_track, 0),
("empty_village", "empty_village.ogg", mtf_persist_until_finished|mtf_module_track, 0),
("escape", "escape.ogg", mtf_persist_until_finished|mtf_module_track, 0),
("retreat", "retreat.ogg", mtf_persist_until_finished|mtf_sit_killed|mtf_module_track, 0),
#euro begin
("armorer1", "armorer.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
#euro battle music 1
("euro_1", "euro_1.ogg", mtf_culture_1|mtf_culture_2|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_module_track,mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#euro battle/siege music 2
("euro_2", "euro_2.ogg", mtf_culture_1|mtf_culture_2|mtf_culture_6|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#euro travel music 1
("euro_3", "euro_3.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
#euro travel music 2
("euro_4", "euro_4.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
#euro battle/siege music 3
("euro_5", "euro_5.ogg", mtf_culture_1|mtf_culture_2|mtf_culture_6|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#euro siege music 1
("euro_6", "euro_6.ogg", mtf_culture_1|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#euro travel music 3
("pog", "pog.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
#euro battle/siege music 4
("euro_8", "euro_8.ogg", mtf_culture_1|mtf_culture_2|mtf_culture_6|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_sit_ambushed|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#euro travel music 4
("euro_9", "euro_9.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
#euro battle/siege music 5
("euro_10", "euro_10.ogg", mtf_culture_1|mtf_culture_2|mtf_culture_6|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#euro battle/siege music 6
("euro_11", "euro_11.ogg", mtf_culture_1|mtf_culture_2|mtf_culture_6|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_sit_ambushed|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#euro battle music 11
#("euro_12", "euro_12.ogg", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
#euro travel music 5
("euro_13", "euro_13.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
("euro_14", "euro_14.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
("euro_15", "euro_15.mp3", mtf_culture_1|mtf_sit_travel|mtf_module_track, mtf_culture_1|mtf_sit_travel),
##tavern music
("euro_13t", "euro_13.mp3", mtf_sit_tavern|mtf_module_track, mtf_sit_tavern),
("euro_9t", "euro_9.mp3", mtf_sit_tavern|mtf_module_track, mtf_sit_tavern),
("pogt", "pog.mp3", mtf_sit_tavern|mtf_module_track, mtf_sit_tavern),
("euro_4t", "euro_4.mp3", mtf_sit_tavern|mtf_module_track, mtf_sit_tavern),
("euro_3t", "euro_3.mp3", mtf_sit_tavern|mtf_module_track, mtf_sit_tavern),
("euro_15t", "euro_15.mp3", mtf_sit_tavern|mtf_module_track, mtf_sit_tavern),
("byz_2t", "byz_2.mp3", mtf_sit_tavern|mtf_module_track, mtf_sit_tavern),
##end
#byz begin
("byz_1", "byz_1.mp3", mtf_culture_2|mtf_sit_travel|mtf_module_track, mtf_culture_2|mtf_sit_travel),
("byz_2", "byz_2.mp3", mtf_culture_2|mtf_sit_travel|mtf_module_track, mtf_culture_2|mtf_sit_travel),
("byz_3", "byz_3.mp3", mtf_culture_2|mtf_sit_travel|mtf_module_track, mtf_culture_2|mtf_sit_travel),
("byz_4", "byz_4.mp3", mtf_culture_2|mtf_sit_travel|mtf_module_track, mtf_culture_2|mtf_sit_travel),
("byz_5", "byz_5.mp3", mtf_culture_2|mtf_sit_travel|mtf_module_track, mtf_culture_2|mtf_sit_travel),
#end
#euro end
#balt begin
#balt siege music 1
# ("baltic_1", "baltic_1.ogg", mtf_culture_2|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#balt battle/siege music 1
#balt travel music 1
#("baltic_3", "baltic_3.mp3", mtf_culture_2|mtf_sit_travel|mtf_module_track, mtf_culture_2|mtf_sit_travel),
#balt battle/siege music 2
#
#balt siege music 2
#
#balt travel music 2
# ("baltic_6", "baltic_6.mp3", mtf_culture_2|mtf_sit_travel|mtf_module_track, mtf_culture_2|mtf_sit_travel),
#balt travel music 3
#balt travel music 4
#balt battle/siege music 3
#balt battle/siege music 4
#balt battle/siege music 5
#balt battle/siege music 6
#balt end
#rus begin
#rus battle/siege music 1
("rus_1", "rus_1.ogg", mtf_culture_5|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#rus battle/siege music 2
("rus_2", "rus_2.ogg", mtf_culture_5|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_sit_ambushed|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#rus travel music 1
("rus_3", "rus_3.ogg", mtf_culture_5|mtf_sit_travel|mtf_module_track, mtf_culture_5|mtf_sit_travel),
#rus battle/siege music rus 3
("rus_4", "rus_4.ogg", mtf_culture_5|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_sit_ambushed|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#rus travel music 2
("rus_5", "rus_5.ogg", mtf_culture_5|mtf_sit_travel|mtf_module_track, mtf_culture_5|mtf_sit_travel),
("rus_6", "rus_6.mp3", mtf_culture_5|mtf_sit_travel|mtf_module_track, mtf_culture_5|mtf_sit_travel),
("baltic_2", "baltic_2.ogg", mtf_culture_5|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
("baltic_5", "baltic_5.ogg", mtf_culture_5|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
("baltic_4", "baltic_4.ogg", mtf_culture_5|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#rus travel music 3
#rus travel music 4
#rus battle/siege music 4
#rus battle/siege music 5
#rus battle/siege music 6
#rus end
#sandnigger begin
#sandnigger battle/siege music 1
("saracen_1", "saracen_1.ogg", mtf_culture_4|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_sit_ambushed|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#sandnigger travel music 1
("saracen_2", "saracen_2.ogg", mtf_culture_4|mtf_sit_travel|mtf_module_track, mtf_culture_4|mtf_sit_travel),
#sandnigger siege music 1
("saracen_3", "saracen_3.ogg", mtf_culture_4|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#sandnigger travel music 2
("saracen_4", "saracen_4.ogg", mtf_culture_4|mtf_sit_travel|mtf_module_track, mtf_culture_4|mtf_sit_travel),
#("saracen_5", "saracen_5.ogg", mtf_culture_4|mtf_sit_travel|mtf_module_track, mtf_culture_4), <<<--- WHAT the fuck?
#sandnigger battle/siege music 2
("saracen_6", "saracen_6.ogg", mtf_culture_4|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_sit_ambushed|mtf_module_track, mtf_culture_4),
#sandnigger siege music 2
("saracen_7", "saracen_7.ogg", mtf_culture_4|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#sandnigger travel music 2
("saracen_2", "saracen_8.mp3", mtf_culture_4|mtf_sit_travel|mtf_module_track, mtf_culture_4|mtf_sit_travel),
#sandnigger travel music 3
("saracen_3", "saracen_9.mp3", mtf_culture_4|mtf_sit_travel|mtf_module_track, mtf_culture_4|mtf_sit_travel),
#sandnigger travel music 4
#sandnigger battle/siege music 3
#sandnigger battle/siege music 4
#sandnigger battle/siege music 5
#sandnigger end
#mongols begin
#("mong_1", "mong_1.ogg", mtf_culture_3|mtf_sit_travel|mtf_module_track, mtf_culture_3), <-- this is rus wtf is in here?
#not mongol music btw
("mong_2", "mong_2.ogg", mtf_culture_3|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all), #<-- this is not mongol wtf is in here?
#mongols battle/siege music 1
("mong_3", "mong_3.ogg", mtf_culture_3|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#mongols travel music 1
#("mong_4", "mong_4.ogg", mtf_culture_3|mtf_sit_travel|mtf_module_track, mtf_culture_3|mtf_sit_travel),
#mongols travel music 2
("mong_5", "mong_5.ogg", mtf_culture_3|mtf_sit_travel|mtf_module_track, mtf_culture_3|mtf_sit_travel),
#mongols battle/siege music 2
("mong_6", "mong_6.ogg", mtf_culture_3|mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#mongols travel music 3
("mong_7", "mong_7.ogg", mtf_culture_3|mtf_sit_travel|mtf_module_track, mtf_culture_3|mtf_sit_travel),
#mongols siege music 1
("mong_8", "mong_8.ogg", mtf_culture_3|mtf_sit_siege|mtf_module_track, mtf_sit_fight|mtf_sit_multiplayer_fight|mtf_culture_all),
#mongols end
("victorious_evil", "victorious_evil.ogg", mtf_persist_until_finished|mtf_module_track, 0),
("wedding", "wedding.ogg", mtf_persist_until_finished, 0),
("coronation", "coronation.ogg", mtf_persist_until_finished, 0),
#("ambient_1", "ambient_1.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_2", "ambient_2.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_3", "ambient_3.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_4", "ambient_4.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_5", "ambient_5.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_6", "ambient_6.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_7", "ambient_7.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_8", "ambient_8.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_9", "ambient_9.ogg", mtf_culture_all|mtf_persist_until_finished|mtf_sit_fight|mtf_module_track, mtf_culture_all),
#("ambient_10", "ambient_10.ogg", mtf_persist_until_finished|mtf_module_track, 0),
#("silence", "silence.ogg", mtf_persist_until_finished|mtf_module_track, 0),
("victorious_neutral_1", "victorious_neutral_1.ogg", mtf_persist_until_finished|mtf_sit_victorious, 0),
("victorious_neutral_2", "victorious_neutral_2.ogg", mtf_persist_until_finished|mtf_sit_victorious, 0),
("victorious_neutral_3", "victorious_neutral_3.ogg", mtf_persist_until_finished|mtf_sit_victorious, 0),
] |
KenKundert/ec | refs/heads/master | tests/test_ec.py | 1 | #!/usr/bin/env python3
# encoding: utf8
# Test EC
# Imports {{{1
from engineering_calculator.calculator import Calculator, Display, CalculatorError
from engineering_calculator.actions import (
allActions, predefinedVariables, defaultFormat, defaultDigits, detailedHelp
)
import pytest
# Utility functions {{{1
messages = []
def grab_messages(message, style=None):
global messages
messages += [message]
warnings = []
def grab_warnings(warning):
global warnings
warnings += [warning]
reltol=1e-9
abstol = 1e-13
def close(result, expected):
return abs(result-expected) <= (reltol*abs(expected)+abstol)
# test_built_ins() {{{1
def test_built_ins():
global messages
global warnings
testCases = []
alreadySeen = set([
None, # skip categories
detailedHelp.getName() # skip detailed help for now
])
for action in allActions:
if not action:
continue
actionName = action.getName()
if actionName not in alreadySeen:
alreadySeen.add(actionName)
# Same action may show up several times because it is in several
# different personalities. Just test it the first time it is seen.
if hasattr(action, 'tests'):
testCases += action.tests
# Also exercise the detailed help for this action
detailedHelp.addTest(
stimulus='?%s' % actionName
, messages=True
)
# Add detailedHelp tests (the originals specified with the action, plus the ones
# we just added above)
testCases += detailedHelp.tests
# Finally, you man manually specify additional tests not tied to any particular
# action here
testCases += [
dict(stimulus = '-failure', error = "-failure: unrecognized.")
]
calc = Calculator(
allActions,
Display(defaultFormat, defaultDigits),
predefinedVariables = predefinedVariables,
messagePrinter = grab_messages,
warningPrinter = grab_warnings,
backUpStack = True
)
# Run tests {{{1
for index, case in enumerate(testCases):
messages = []
warnings = []
stimulus = case['stimulus']
expectedResult = case.get('result', None)
expectedUnits = case.get('units', None)
expectedFormattedResult = case.get('text', None)
expectedError = case.get('error', None)
expectedMessages = case.get('messages', [])
expectedWarnings = case.get('warnings', [])
calc.clear()
try:
result, units = calc.evaluate(calc.split(stimulus))
if expectedMessages == True:
if messages:
messages = True
if expectedResult:
assert close(result, expectedResult), stimulus
if expectedFormattedResult:
assert calc.format((result, units)) == expectedFormattedResult, stimulus
if expectedUnits:
assert units == expectedUnits, stimulus
assert not expectedError
assert messages == expectedMessages, stimulus
assert warnings == expectedWarnings, stimulus
except CalculatorError as e:
calc.restoreStack()
assert expectedError == e.getMessage(), stimulus
# main {{{1
if __name__ == '__main__':
# As a debugging aid allow the tests to be run on their own, outside pytest.
# This makes it easier to see and interpret and textual output.
defined = dict(globals())
for k, v in defined.items():
if callable(v) and k.startswith('test_'):
print()
print('Calling:', k)
print((len(k)+9)*'=')
v()
|
bjackman/workload-automation | refs/heads/master | wlauto/utils/formatter.py | 9 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.utils.terminalsize import get_terminal_size
INDENTATION_FROM_TITLE = 4
class TextFormatter(object):
"""
This is a base class for text formatting. It mainly ask to implement two
methods which are add_item and format_data. The formar will add new text to
the formatter, whereas the latter will return a formatted text. The name
attribute represents the name of the foramtter.
"""
name = None
data = None
def __init__(self):
pass
def add_item(self, new_data, item_title):
"""
Add new item to the text formatter.
:param new_data: The data to be added
:param item_title: A title for the added data
"""
raise NotImplementedError()
def format_data(self):
"""
It returns a formatted text
"""
raise NotImplementedError()
class DescriptionListFormatter(TextFormatter):
name = 'description_list_formatter'
data = None
def get_text_width(self):
if not self._text_width:
self._text_width, _ = get_terminal_size() # pylint: disable=unpacking-non-sequence
return self._text_width
def set_text_width(self, value):
self._text_width = value
text_width = property(get_text_width, set_text_width)
def __init__(self, title=None, width=None):
super(DescriptionListFormatter, self).__init__()
self.data_title = title
self._text_width = width
self.longest_word_length = 0
self.data = []
def add_item(self, new_data, item_title):
if len(item_title) > self.longest_word_length:
self.longest_word_length = len(item_title)
self.data[len(self.data):] = [(item_title, self._remove_newlines(new_data))]
def format_data(self):
parag_indentation = self.longest_word_length + INDENTATION_FROM_TITLE
string_formatter = '{}:<{}{} {}'.format('{', parag_indentation, '}', '{}')
formatted_data = ''
if self.data_title:
formatted_data += self.data_title
line_width = self.text_width - parag_indentation
for title, paragraph in self.data:
if paragraph:
formatted_data += '\n'
title_len = self.longest_word_length - len(title)
title += ':'
if title_len > 0:
title = (' ' * title_len) + title
parag_lines = self._break_lines(paragraph, line_width).splitlines()
if parag_lines:
formatted_data += string_formatter.format(title, parag_lines[0])
for line in parag_lines[1:]:
formatted_data += '\n' + string_formatter.format('', line)
self.text_width = None
return formatted_data
# Return text's paragraphs sperated in a list, such that each index in the
# list is a single text paragraph with no new lines
def _remove_newlines(self, new_data): # pylint: disable=R0201
parag_list = ['']
parag_num = 0
prv_parag = None
# For each paragraph sperated by a new line
for paragraph in new_data.splitlines():
if paragraph:
parag_list[parag_num] += ' ' + paragraph
# if the previous line is NOT empty, then add new empty index for
# the next paragraph
elif prv_parag:
parag_num = 1
parag_list.append('')
prv_parag = paragraph
# sometimes, we end up with an empty string as the last item so we reomve it
if not parag_list[-1]:
return parag_list[:-1]
return parag_list
def _break_lines(self, parag_list, line_width): # pylint: disable=R0201
formatted_paragraphs = []
for para in parag_list:
words = para.split()
if words:
formatted_text = words.pop(0)
current_width = len(formatted_text)
# for each word in the paragraph, line width is an accumlation of
# word length + 1 (1 is for the space after each word).
for word in words:
word = word.strip()
if current_width + len(word) + 1 >= line_width:
formatted_text += '\n' + word
current_width = len(word)
else:
formatted_text += ' ' + word
current_width += len(word) + 1
formatted_paragraphs.append(formatted_text)
return '\n\n'.join(formatted_paragraphs)
|
yashvesikar/robotax | refs/heads/master | classes/employee_calculations.py | 1 | import calculations_template
class employee_calculations(calculations):
def __init__(self, array_of_employees):
self.employees = array_of_employees
def sum():
employee_tax_sum = 0
for employee in self.employees:
employee_tax_sum += employee.payrollTax
return employee_tax_sum
def productivity():
employee_productivity = self.averageOutput * len(array_of_employees)
return employee_productivity
# for employee in self.employees:
# employee_productivity+=
# employee_productivity = employee_productivity/len(array_of_employees)
# def get_salary():
# return self.salary
#
# self.hourlyRate = hourlyRate
# self.averageOutput = averageOutput
# self.position = position
# self.payrollTax = payrollTax
# self.annualCost = annualCost
#
# def get_hourlyRate():
# return self.hourlyRate
#
# def get_averageOutput():
# return self.averageOutput
#
# def get_position():
# return self.position
#
# def get_payrollTax():
# return self.payrollTax
#
# def get_annualCost():
# return self.annualCost
|
nocarryr/AV-Asset-Manager | refs/heads/master | avam/categories/tests.py | 1 | from django.test import TestCase
from django.db import IntegrityError
from categories.models import Category, CategoryItem
class CategoriesTestCase(TestCase):
def setUp(self):
base_names = ['root', 'branch', 'leaf']
def build_children(base_name=None, parent=None):
if base_name is None:
base_name = base_names[0]
try:
next_name = base_names[base_names.index(base_name) + 1]
except IndexError:
next_name = None
for i in range(3):
name = '{0}_{1}'.format(base_name, i)
category = Category(name=name, parent_category=parent)
category.save()
if next_name is not None:
build_children(next_name, category)
build_children()
def get_category(self, *args):
category = None
for name in args:
if category is None:
category = Category.objects.get(name=name)
else:
category = category.subcategories.get(name=name)
return category
def test_str(self):
leaf = self.get_category('root_1', 'branch_1', 'leaf_1')
self.assertEqual(str(leaf), 'root_1/branch_1/leaf_1')
def test_uniques(self):
root = self.get_category('root_1')
with self.assertRaises(IntegrityError):
bad_branch = Category(name='branch_1', parent_category=root)
bad_branch.save()
def test_walk(self):
root = self.get_category('root_1')
names = {'branches':[], 'leaves':[]}
for sub_category in root.walk_subcategories():
if 'branch' in sub_category.name:
key = 'branches'
else:
key = 'leaves'
names[key].append(int(sub_category.name.split('_')[1]))
self.assertEqual(names, {'branches':[0, 1, 2], 'leaves':[0, 1, 2]*3})
def test_ancestry(self):
root = self.get_category('root_1')
branch = self.get_category('root_1', 'branch_1')
leaf = self.get_category('root_1', 'branch_1', 'leaf_1')
not_root = self.get_category('root_0')
self.assertTrue(leaf.is_ancestor(branch))
self.assertTrue(leaf.is_ancestor(root))
self.assertFalse(leaf.is_ancestor(not_root))
class CategoryItemTestCase(TestCase):
def setUp(self):
from categories.models import CategorizedMixin
def get_default_categories(*args):
return []
CategorizedMixin.get_default_categories = get_default_categories
from assettypes.tests import build_test_fixtures as build_assettypes_fixures
assettypes_fixtures = build_assettypes_fixures()
category_fixtures = {}
for name in ['Video', 'Lighting', 'Accessories']:
category = Category.objects.create(name=name)
category_fixtures[name] = category
if name != 'Accessories':
sub_category = Category.objects.create(
name='Accessories',
parent_category=category,
)
category_fixtures[str(sub_category)] = sub_category
self.category_fixtures = category_fixtures
self.assettypes_fixtures = assettypes_fixtures
def get_fk_fields(self, obj):
for field in obj._meta.get_fields():
if not field.is_relation:
continue
if not field.one_to_many:
continue
yield field
def assign_items(self):
proj = self.assettypes_fixtures['projector']
manuf = proj.manufacturer
category = self.category_fixtures['Video']
category.add_item(manuf)
for f in self.get_fk_fields(manuf):
if not hasattr(f.related_model, 'manufacturer'):
continue
attr = f.get_accessor_name()
for obj in getattr(manuf, attr).all():
category.add_item(obj)
def test_assignment(self):
self.assign_items()
category = self.category_fixtures['Video']
q = category.get_items()
proj = self.assettypes_fixtures['projector']
manuf = proj.manufacturer
for category_item in q:
obj = category_item.content_object
if obj._meta.model_name == 'manufacturer':
continue
self.assertEqual(obj.manufacturer, manuf)
for category_item in CategoryItem.objects.get_for_object(proj):
self.assertEqual(category_item.content_object, proj)
self.assertEqual(category_item.category, category)
def test_linked_categories(self):
def get_content_objects(category):
return [ci.content_object for ci in category.get_items()]
self.assign_items()
acc_cat = self.category_fixtures['Accessories']
vid_acc_cat = self.category_fixtures['Video/Accessories']
vid_acc_cat.linked_categories.add(acc_cat)
vid_acc_cat.save()
proj = self.assettypes_fixtures['projector']
lamp = proj.lamp_type
vid_acc_cat.add_item(lamp)
self.assertIn(lamp, get_content_objects(acc_cat))
test_lamp = lamp._meta.model(
model_name='Test Projector Lamp 2',
max_hours=1,
manufacturer=lamp.manufacturer,
)
test_lamp.save()
vid_acc_cat.add_item(test_lamp)
count = acc_cat.get_items().count()
self.assertIn(test_lamp, get_content_objects(acc_cat))
test_lamp.delete()
self.assertEqual(acc_cat.get_items().count(), count - 1)
|
svn2github/audacity | refs/heads/master | lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Tools/compiler_c.py | 343 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,imp,types
from waflib.Tools import ccroot
from waflib import Utils,Configure
from waflib.Logs import debug
c_compiler={'win32':['msvc','gcc'],'cygwin':['gcc'],'darwin':['gcc'],'aix':['xlc','gcc'],'linux':['gcc','icc'],'sunos':['suncc','gcc'],'irix':['gcc','irixcc'],'hpux':['gcc'],'gnu':['gcc'],'java':['gcc','msvc','icc'],'default':['gcc'],}
def configure(conf):
try:test_for_compiler=conf.options.check_c_compiler
except AttributeError:conf.fatal("Add options(opt): opt.load('compiler_c')")
for compiler in test_for_compiler.split():
conf.env.stash()
conf.start_msg('Checking for %r (c compiler)'%compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError ,e:
conf.env.revert()
conf.end_msg(False)
debug('compiler_c: %r'%e)
else:
if conf.env['CC']:
conf.end_msg(conf.env.get_flat('CC'))
conf.env['COMPILER_CC']=compiler
break
conf.end_msg(False)
else:
conf.fatal('could not configure a c compiler!')
def options(opt):
opt.load_special_tools('c_*.py',ban=['c_dumbpreproc.py'])
global c_compiler
build_platform=Utils.unversioned_sys_platform()
possible_compiler_list=c_compiler[build_platform in c_compiler and build_platform or'default']
test_for_compiler=' '.join(possible_compiler_list)
cc_compiler_opts=opt.add_option_group("C Compiler Options")
cc_compiler_opts.add_option('--check-c-compiler',default="%s"%test_for_compiler,help='On this platform (%s) the following C-Compiler will be checked by default: "%s"'%(build_platform,test_for_compiler),dest="check_c_compiler")
for x in test_for_compiler.split():
opt.load('%s'%x)
|
cdqwertz/qaxiwa | refs/heads/master | src/main.py | 1 | import utils
from parser import *
from compiler import *
from language import *
if __name__ == "__main__":
string = utils.load_file("code.txt")
data = parse(string)
s = []
for my_node in data:
s.append(str(my_node))
print("[" + ", ".join(s) + "]")
print()
lang = input("language: ")
my_language = language("languages/" + lang + ".txt")
my_compiler = compiler(my_language)
out = my_compiler.get_code(data)
print(out)
utils.save_file("output" + my_language.data["file_ending"], out)
|
kidmaple/CoolWall | refs/heads/nios2 | user/python/Demo/metaclasses/Eiffel.py | 4 | """Support Eiffel-style preconditions and postconditions.
For example,
class C:
def m1(self, arg):
require arg > 0
return whatever
ensure Result > arg
can be written (clumsily, I agree) as:
class C(Eiffel):
def m1(self, arg):
return whatever
def m1_pre(self, arg):
assert arg > 0
def m1_post(self, Result, arg):
assert Result > arg
Pre- and post-conditions for a method, being implemented as methods
themselves, are inherited independently from the method. This gives
much of the same effect of Eiffel, where pre- and post-conditions are
inherited when a method is overridden by a derived class. However,
when a derived class in Python needs to extend a pre- or
post-condition, it must manually merge the base class' pre- or
post-condition with that defined in the derived class', for example:
class D(C):
def m1(self, arg):
return whatever**2
def m1_post(self, Result, arg):
C.m1_post(self, Result, arg)
assert Result < 100
This gives derived classes more freedom but also more responsibility
than in Eiffel, where the compiler automatically takes care of this.
In Eiffel, pre-conditions combine using contravariance, meaning a
derived class can only make a pre-condition weaker; in Python, this is
up to the derived class. For example, a derived class that takes away
the requirement that arg > 0 could write:
def m1_pre(self, arg):
pass
but one could equally write a derived class that makes a stronger
requirement:
def m1_pre(self, arg):
require arg > 50
It would be easy to modify the classes shown here so that pre- and
post-conditions can be disabled (separately, on a per-class basis).
A different design would have the pre- or post-condition testing
functions return true for success and false for failure. This would
make it possible to implement automatic combination of inherited
and new pre-/post-conditions. All this is left as an exercise to the
reader.
"""
from Meta import MetaClass, MetaHelper, MetaMethodWrapper
class EiffelMethodWrapper(MetaMethodWrapper):
def __init__(self, func, inst):
MetaMethodWrapper.__init__(self, func, inst)
# Note that the following causes recursive wrappers around
# the pre-/post-condition testing methods. These are harmless
# but inefficient; to avoid them, the lookup must be done
# using the class.
try:
self.pre = getattr(inst, self.__name__ + "_pre")
except AttributeError:
self.pre = None
try:
self.post = getattr(inst, self.__name__ + "_post")
except AttributeError:
self.post = None
def __call__(self, *args, **kw):
if self.pre:
apply(self.pre, args, kw)
Result = apply(self.func, (self.inst,) + args, kw)
if self.post:
apply(self.post, (Result,) + args, kw)
return Result
class EiffelHelper(MetaHelper):
__methodwrapper__ = EiffelMethodWrapper
class EiffelMetaClass(MetaClass):
__helper__ = EiffelHelper
Eiffel = EiffelMetaClass('Eiffel', (), {})
def _test():
class C(Eiffel):
def m1(self, arg):
return arg+1
def m1_pre(self, arg):
assert arg > 0, "precondition for m1 failed"
def m1_post(self, Result, arg):
assert Result > arg
x = C()
x.m1(12)
## x.m1(-1)
if __name__ == '__main__':
_test()
|
ibmsoe/ImpalaPPC | refs/heads/Impala2.6-main | bin/run-workload.py | 2 | #!/usr/bin/env impala-python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is used as the driver to run performance benchmarks.
# It does the following:
# - parses the user defined options and validates them.
# - Matches each workload to its set of queries and constructs the required objects.
# - Runs each workload in serial order (a workload is a combination of dataset and scale
# factor)
# - Pretty prints the results of each query's execution.
# - Stores the execution details in JSON format.
#
import getpass
import json
import logging
import os
import prettytable
from collections import deque
from copy import deepcopy
from datetime import datetime
from decimal import Decimal
from itertools import groupby
from optparse import OptionParser
from random import shuffle
from sys import exit
from tests.common.test_dimensions import TableFormatInfo
from tests.performance.query import Query, HiveQueryResult, ImpalaQueryResult
from tests.performance.query_executor import QueryExecConfig
from tests.performance.workload_runner import WorkloadRunner
from tests.performance.workload import Workload
from tests.util.plugin_runner import PluginRunner
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="If set, outputs all benchmark diagnostics.")
parser.add_option("--exploration_strategy", dest="exploration_strategy", default="core",
help=("The exploration strategy to use for running benchmark: 'core', "
"'pairwise', or 'exhaustive'"))
parser.add_option("-w", "--workloads", dest="workloads", default="tpcds",
help=("The workload(s) and scale factors to run in a comma-separated "
" list format. Optional scale factors for each workload are specified"
" using colons. For example: -w tpcds,tpch:400gb,tpch:1gb. "
"Some valid workloads:'tpch', 'tpcds', ..."))
parser.add_option("--impalads", dest="impalads", default="localhost",
help=("A comma-separated list of impalad instances to run the "
"workload against."))
parser.add_option("--exec_options", dest="exec_options", default=str(),
help="Runquery exec option string.")
parser.add_option("--results_json_file", dest="results_json_file",
default=os.environ['IMPALA_HOME'] + "/benchmark_results.json",
help="The output file where benchmark results are saved")
parser.add_option("-i", "--query_iterations", type="int", dest="query_iterations",
default=1, help="Number of times to run each query within a workload")
parser.add_option("-x", "--workload_iterations", type="int", dest="workload_iterations",
default=1, help="Number of times to run each workload.")
parser.add_option("--num_clients", type="int", dest="num_clients", default=1,
help="Number of clients (threads) to use when executing each query.")
parser.add_option("--query_names", dest="query_names", default=str(),
help="A comma-separated list of query names to execute.")
parser.add_option("--table_formats", dest="table_formats", default=str(),
help=("Override the default test vectors and run using only the"
" specified table formats. Ex. --table_formats=seq/snap/block"
",text/none"))
parser.add_option("--shuffle_query_exec_order", dest="shuffle_queries",
action="store_true", default=False, help=("Randomizes the order "
"of query execution. Useful when the execution scope is a workload"))
parser.add_option("--use_kerberos", dest="use_kerberos", action="store_true",
default=False, help="If set, enables talking to a kerberized impalad")
parser.add_option("--continue_on_query_error", dest="continue_on_query_error",
action="store_true", default=False,
help="If set, continue execution on each query error.")
parser.add_option("-c", "--client_type", dest="client_type", default='beeswax',
choices=['beeswax', 'jdbc', 'hs2'],
help="Client type. Valid options are 'beeswax' or 'jdbc' or 'hs2'")
parser.add_option("--plugin_names", dest="plugin_names", default=None,
help=("Set of comma-separated plugin names with scope; Plugins are"
" specified as <plugin_name>[:<scope>]. If no scope if specified,"
" it defaults to Query. Plugin names are case sensitive"))
parser.add_option("--exec_engine", dest="exec_engine", default="impala",
choices=['impala', 'hive'],
help=("Which SQL engine to use - impala, hive are valid options"))
parser.add_option("--hiveserver", dest="hiveserver", default="localhost",
help=("Host that has HiveServers2 service running"))
parser.add_option("--user", dest="user", default=getpass.getuser(),
help=("User account under which workload/query will run"))
options, args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='[%(name)s]: %(message)s')
LOG = logging.getLogger('run-workload')
class WorkloadConfig(object):
"""Converts the options dict into a class"""
def __init__(self, **config):
self.__dict__.update(config)
class CustomJSONEncoder(json.JSONEncoder):
"""Override the JSONEncoder's default method.
This class is needed for two reasons:
- JSON does have a datetime field. We intercept a datetime object and convert it into
a standard iso string.
- JSON does not know how to serialize object. We intercept the objects and
provide their __dict__ representations
"""
def default(self, obj,):
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
# Convert datetime into an standard iso string
return obj.isoformat()
elif isinstance(obj, (Query, HiveQueryResult, QueryExecConfig, TableFormatInfo)):
# Serialize these objects manually by returning their __dict__ methods.
return obj.__dict__
else:
super(CustomJSONEncoder, self).default(obj)
def prettytable_print(results, failed=False):
"""Print a list of query results in prettytable"""
column_names = ['Query', 'Start Time', 'Time Taken (s)', 'Client ID']
if failed: column_names.append('Error')
table = prettytable.PrettyTable(column_names)
table.align = 'l'
table.float_format = '.2'
# Group the results by table format.
for table_format_str, gr in groupby(results, lambda x: x.query.table_format_str):
print "Table Format: %s" % table_format_str
for result in gr:
start_time = result.start_time.strftime("%Y-%m-%d %H:%M:%S") if result.start_time \
is not None else '-'
row = [result.query.name, start_time, result.time_taken, result.client_name]
if failed: row.append(result.query_error)
table.add_row(row)
print table.get_string(sortby='Client ID')
table.clear_rows()
print str()
def print_result_summary(results):
"""Print failed and successfull queries for a given result list"""
failed_results = filter(lambda x: x.success == False, results)
successful_results = filter(lambda x: x.success == True, results)
prettytable_print(successful_results)
if failed_results: prettytable_print(failed_results, failed=True)
def get_workload_scale_factor():
"""Extract the workload -> scale factor mapping from the command line
The expected string is "workload_1[:scale_factor_1],...,workload_n[:scale_factor_n]"
"""
workload_str = options.workloads
workload_tuples = split_and_strip(workload_str)
assert len(workload_tuples) > 0, "At least one workload must be specified"
for workload_tuple in workload_tuples:
# Each member should conform to workload[:scale_factor]
workload_tuple = split_and_strip(workload_tuple, delim=":")
assert len(workload_tuple) in [1,2], "Error parsing workload:scale_factor"
if len(workload_tuple) == 1: workload_tuple.append(str())
yield workload_tuple
def split_and_strip(input_string, delim=","):
"""Convert a string into a list using the given delimiter"""
if not input_string: return list()
return map(str.strip, input_string.split(delim))
def create_workload_config():
"""Parse command line inputs.
Some user inputs needs to be transformed from delimited strings to lists in order to be
consumed by the performacne framework. Additionally, plugin_names are converted into
objects, and need to be added to the config.
"""
config = deepcopy(vars(options))
# We don't need workloads and query_names in the config map as they're already specified
# in the workload object.
del config['workloads']
del config['query_names']
config['plugin_runner'] = plugin_runner
# transform a few options from strings to lists
config['table_formats'] = split_and_strip(config['table_formats'])
impalads = split_and_strip(config['impalads'])
# Randomize the order of impalads.
shuffle(impalads)
config['impalads'] = deque(impalads)
return WorkloadConfig(**config)
def _validate_options():
"""Basic validation for some commandline options"""
# the sasl module must be importable on a secure setup.
if options.use_kerberos: import sasl
# If Hive is the exec engine, hs2 is the only suported interface.
if options.exec_engine.lower() == "hive" and options.client_type != "hs2":
raise RuntimeError("The only supported client type for Hive engine is hs2")
# Check for duplicate workload/scale_factor combinations
workloads = split_and_strip(options.workloads)
if not len(set(workloads)) == len(workloads):
raise RuntimeError("Duplicate workload/scale factor combinations are not allowed")
# The list of Impalads must be provided as a comma separated list of either host:port
# combination or just host.
for impalad in split_and_strip(options.impalads):
if len(impalad.split(":")) not in [1,2]:
raise RuntimeError("Impalads must be of the form host:port or host.")
if __name__ == "__main__":
# Check for badly formed user options.
_validate_options()
# Intialize the PluginRunner.
plugin_runner = None
if options.plugin_names:
plugin_runner = PluginRunner(split_and_strip(options.plugin_names))
# Intialize workloads.
workload_runners = list()
query_name_filters = split_and_strip(options.query_names)
# Create a workload config object.
for workload_name, scale_factor in get_workload_scale_factor():
config = create_workload_config()
workload = Workload(workload_name, query_name_filters=query_name_filters)
workload_runners.append(WorkloadRunner(workload, scale_factor, config))
# Run all the workloads serially
result_map = dict()
exit_code = 0
for workload_runner in workload_runners:
try:
if plugin_runner: plugin_runner.run_plugins_pre(scope="Workload")
workload_runner.run()
if plugin_runner: plugin_runner.run_plugins_post(scope="Workload")
finally:
key = "%s_%s" % (workload_runner.workload.name, workload_runner.scale_factor)
result_map[key] = workload_runner.results
if not all(result.success for result in workload_runner.results): exit_code = 1
# Print the results
print "\nWorkload: {0}, Scale Factor: {1}\n".format(
workload_runner.workload.name.upper(), workload_runner.scale_factor)
print_result_summary(workload_runner.results)
# Store the results
with open(options.results_json_file, 'w') as f:
json.dump(result_map, f, cls=CustomJSONEncoder)
exit(exit_code)
|
mecax/pyrobotlab | refs/heads/master | home/AdolphSmith/AdolphSmith.py | 5 | # script for AdolphSmith
# http://myrobotlab.org/content/my-inmoov-parts-list-and-way-working
from org.myrobotlab.framework import Service
# mouth = Runtime.createAndStart("mouth","Speech") - we don't need 2 mouths
Service.reserveRootAs("tracker.arduino","arduino")
Service.reserveRootAs("mouthControl.arduino","arduino")
tracker = Runtime.createAndStart("tracker", "Tracking")
ear = Runtime.createAndStart("ear","Sphinx")
mouthControl = Runtime.createAndStart("mouthControl","MouthControl")
mouth = mouthControl.mouth
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM11")
mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Ryan&txt=")
mouth.speakBlocking("Hello. I have powered up")
mouth.speakBlocking("And now I will start a Tracking service")
# mouthControl.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Ryan&txt=")
# mouthControl.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Peter&txt=")
mouthControl.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Will&txt=")
# mouthControl.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Kenny&txt=")
# mouthControl.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Graham&txt=")
mouthControl.jaw.detach()
mouthControl.jaw.setPin(5)
mouthControl.jaw.attach()
mouthControl.setmouth(162,155)
mouthControl.mouth.speak("hello. i am testing mouth control. does it work. i dont know")
# set specifics on each Servo
servoX = tracker.getX()
servoX.setPin(6)
servoX.setMinMax(30, 170)
servoY = tracker.getY()
servoY.setPin(10)
servoY.setMinMax(30, 150)
servoY.setRest(56)
# optional filter settings
opencv = tracker.getOpenCV()
# setting camera index to 1 default is 0
opencv.setCameraIndex(1)
# connect to the Arduino
tracker.connect("COM10")
# Gray & PyramidDown make face tracking
# faster - if you dont like these filters - you
# may remove them before you select a tracking type with
# the following command
# tracker.clearPreFilters()
# diffrent types of tracking
# simple face detection and tracking
# tracker.faceDetect()
# lkpoint - click in video stream with
# mouse and it should track
tracker.startLKTracking()
# scans for faces - tracks if found
# tracker.findFace()
# mouthControl.mouth.speak("I am a Humanoid programmed by My Robot Lab")
# mouthControl.mouth.speak("I moove now for the first time and i am very hapy")
# mouthControl.mouth.speak("Thank yo GroG for my live ")
# mouthControl.mouth.speak("I am a Humanoid programmed by My Robot Lab")
# mouthControl.mouth.speak("I am Syler the unbelievable tree d printed robot")
# mouthControl.mouth.speak("i am here to take over tegnology ")
# mouthControl.mouth.speak("if you need anything from the internet or if you want help with anything just ask ")
# mouthControl.mouth.speak("i will do my best to help ")
# mouthControl.mouth.speak("if i do not know what to do , i search google an then i know more than you, human. ")
# start listening for the words we are interested in
ear.startListening("hello | how are you | are you alive | what are you doing")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.name, "heard");
# prevent infinite loop - this will suppress the
# recognition when speaking - default behavior
# when attaching an ear to a mouth :)
ear.attach(mouth)
# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
def heard():
data = msg_ear_recognized.data[0]
#mouth.speak("you said " + data)
#print "heard ", data
if (data == "hello"):
mouth.speak("hello. I am a Humanoid programmed by My Robot Lab")
servoY.moveTo(60)
sleep(1)
servoY.moveTo(53)
elif (data == "how are you"):
mouth.speak("i am fine and all my circuits are functioning perfectly")
elif (data == "are you alive"):
mouth.speak("define alive. are you alive just because you breath. is a virus alive. is the earth alive.")
# ... etc
|
MartinHjelmare/home-assistant | refs/heads/dev | tests/components/time_date/__init__.py | 36 | """Tests for the time_date component."""
|
linuxlewis/channels-api | refs/heads/master | channels_api/bindings.py | 1 | import json
from channels.binding import websockets
from channels.binding.base import CREATE, UPDATE, DELETE, BindingMetaclass
from django.http import Http404
from django.utils import six
from rest_framework.exceptions import APIException, NotFound
from rest_framework.generics import get_object_or_404
from .mixins import SerializerMixin, SubscribeModelMixin, CreateModelMixin, UpdateModelMixin, \
PatchModelMixin, RetrieveModelMixin, ListModelMixin, DeleteModelMixin
from .settings import api_settings
class ResourceBindingMetaclass(BindingMetaclass):
"""
Metaclass that records action methods
"""
def __new__(cls, name, bases, body):
binding = super(ResourceBindingMetaclass, cls).__new__(cls, name, bases, body)
binding.available_actions = {}
for methodname in dir(binding):
attr = getattr(binding, methodname)
is_action = getattr(attr, 'action', False)
if is_action:
kwargs = getattr(attr, 'kwargs', {})
name = kwargs.get('name', methodname)
binding.available_actions[name] = methodname
return binding
@six.add_metaclass(ResourceBindingMetaclass)
class ResourceBindingBase(SerializerMixin, websockets.WebsocketBinding):
fields = [] # hack to pass cls.register() without ValueError
queryset = None
# mark as abstract
model = None
serializer_class = None
lookup_field = 'pk'
permission_classes = ()
def deserialize(self, message):
body = json.loads(message['text'])
self.request_id = body.get("request_id")
action = body['action']
pk = body.get('pk', None)
data = body.get('data', None)
return action, pk, data
@classmethod
def pre_change_receiver(cls, instance, action):
"""
Entry point for triggering the binding from save signals.
"""
if action == CREATE:
group_names = set()
else:
group_names = set(cls.group_names(instance, action))
if not hasattr(instance, '_binding_group_names'):
instance._binding_group_names = {}
instance._binding_group_names[cls] = group_names
@classmethod
def post_change_receiver(cls, instance, action, **kwargs):
"""
Triggers the binding to possibly send to its group.
"""
old_group_names = instance._binding_group_names[cls]
if action == DELETE:
new_group_names = set()
else:
new_group_names = set(cls.group_names(instance, action))
# if post delete, new_group_names should be []
self = cls()
self.instance = instance
# Django DDP had used the ordering of DELETE, UPDATE then CREATE for good reasons.
self.send_messages(instance, old_group_names - new_group_names, DELETE, **kwargs)
self.send_messages(instance, old_group_names & new_group_names, UPDATE, **kwargs)
self.send_messages(instance, new_group_names - old_group_names, CREATE, **kwargs)
@classmethod
def group_names(cls, instance, action):
self = cls()
groups = [self._group_name(action)]
if instance.pk:
groups.append(self._group_name(action, id=instance.pk))
return groups
def _group_name(self, action, id=None):
"""Formatting helper for group names."""
if id:
return "{}-{}-{}".format(self.model_label, action, id)
else:
return "{}-{}".format(self.model_label, action)
def has_permission(self, user, action, pk):
if self.permission_classes:
permissions = self.permission_classes
else:
permissions = api_settings.DEFAULT_PERMISSION_CLASSES
for cls in permissions:
if not cls().has_permission(user, action, pk):
return False
return True
def filter_queryset(self, queryset):
return queryset
def _format_errors(self, errors):
if isinstance(errors, list):
return errors
elif isinstance(errors, six.string_types):
return [errors]
elif isinstance(errors, dict):
return [errors]
def get_object_or_404(self, pk):
queryset = self.filter_queryset(self.get_queryset())
filter_kwargs = {self.lookup_field: pk}
try:
return get_object_or_404(queryset, **filter_kwargs)
except Http404:
# transform Http404 into an APIException
raise NotFound
def get_queryset(self):
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, "
"or override the `get_queryset()` method."
% self.__class__.__name__
)
return self.queryset.all()
def run_action(self, action, pk, data):
try:
if not self.has_permission(self.user, action, pk):
self.reply(action, errors=['Permission Denied'], status=401,
request_id=self.request_id)
elif action not in self.available_actions:
self.reply(action, errors=['Invalid Action'], status=400,
request_id=self.request_id)
else:
methodname = self.available_actions[action]
method = getattr(self, methodname)
detail = getattr(method, 'detail', True)
if detail:
rv = method(pk, data=data)
else:
rv = method(data=data)
data, status = rv
self.reply(action, data=data, status=status, request_id=self.request_id)
except APIException as ex:
self.reply(action, errors=self._format_errors(ex.detail), status=ex.status_code, request_id=self.request_id)
def reply(self, action, data=None, errors=[], status=200, request_id=None):
"""
Helper method to send a encoded response to the message's reply_channel.
"""
payload = {
'errors': errors,
'data': data,
'action': action,
'response_status': status,
'request_id': request_id
}
return self.message.reply_channel.send(self.encode(self.stream, payload))
class ResourceBinding(CreateModelMixin, RetrieveModelMixin, ListModelMixin,
UpdateModelMixin, PatchModelMixin, DeleteModelMixin, SubscribeModelMixin, ResourceBindingBase):
# mark as abstract
model = None
class ReadOnlyResourceBinding(RetrieveModelMixin, ListModelMixin,
ResourceBindingBase):
# mark as abstract
model = None
|
ctasims/Dive-Into-Python-3 | refs/heads/master | examples/roman8.py | 2 | '''Convert to and from Roman numerals
This program is part of 'Dive Into Python 3', a free Python book for
experienced programmers. Visit http://diveintopython3.org/ for the
latest version.
'''
import re
class OutOfRangeError(ValueError): pass
class NotIntegerError(ValueError): pass
class InvalidRomanNumeralError(ValueError): pass
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
roman_numeral_pattern = re.compile('''
^ # beginning of string
M{0,3} # thousands - 0 to 3 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
''', re.VERBOSE)
def to_roman(n):
'''convert integer to Roman numeral'''
if not (0 < n < 4000):
raise OutOfRangeError('number out of range (must be 1..3999)')
if not isinstance(n, int):
raise NotIntegerError('non-integers can not be converted')
result = ''
for numeral, integer in roman_numeral_map:
while n >= integer:
result += numeral
n -= integer
return result
def from_roman(s):
'''convert Roman numeral to integer'''
if not isinstance(s, str):
raise InvalidRomanNumeralError('Input must be a string')
if not s:
raise InvalidRomanNumeralError('Input can not be blank')
if not roman_numeral_pattern.search(s):
raise InvalidRomanNumeralError('Invalid Roman numeral: {0}'.format(s))
result = 0
index = 0
for numeral, integer in roman_numeral_map:
while s[index : index + len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
crcresearch/osf.io | refs/heads/develop | api_tests/applications/views/__init__.py | 12133432 | |
chrishas35/django-travis-ci | refs/heads/master | tests/modeltests/proxy_model_inheritance/__init__.py | 12133432 | |
rapilabs/sensible-text-test-runner | refs/heads/master | sensible_text_test_runner/management/__init__.py | 12133432 | |
LarryHillyer/PoolHost | refs/heads/master | PoolHost/env/Lib/site-packages/wheel/test/simple.dist/setup.py | 565 | from setuptools import setup
try:
unicode
def u8(s):
return s.decode('unicode-escape').encode('utf-8')
except NameError:
def u8(s):
return s.encode('utf-8')
setup(name='simple.dist',
version='0.1',
description=u8('A testing distribution \N{SNOWMAN}'),
packages=['simpledist'],
extras_require={'voting': ['beaglevote']},
)
|
totolef/Sick-beard | refs/heads/master | lib/subliminal/services/subswiki.py | 35 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords, split_keyword
from ..videos import Episode, Movie
from bs4 import BeautifulSoup
import logging
import urllib
logger = logging.getLogger("subliminal")
class SubsWiki(ServiceBase):
server_url = 'http://www.subswiki.com'
site_url = 'http://www.subswiki.com'
api_based = False
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'pob', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
u'Català': Language('cat'), u'Brazilian': Language('pob'), u'English (US)': Language('eng-US'),
u'English (UK)': Language('eng-GB')}
language_code = 'name'
videos = [Episode, Movie]
require_video = False
#required_features = ['permissive']
def list_checked(self, video, languages):
results = []
if isinstance(video, Episode):
results = self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode)
elif isinstance(video, Movie) and video.year:
results = self.query(video.path or video.release, languages, get_keywords(video.guess), movie=video.title, year=video.year)
return results
def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None, movie=None, year=None):
if series and season and episode:
request_series = series.lower().replace(' ', '_')
if isinstance(request_series, unicode):
request_series = request_series.encode('utf-8')
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
r = self.session.get('%s/serie/%s/%s/%s/' % (self.server_url, urllib.quote(request_series), season, episode))
if r.status_code == 404:
logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
return []
elif movie and year:
request_movie = movie.title().replace(' ', '_')
if isinstance(request_movie, unicode):
request_movie = request_movie.encode('utf-8')
logger.debug(u'Getting subtitles for %s (%d) with languages %r' % (movie, year, languages))
r = self.session.get('%s/film/%s_(%d)' % (self.server_url, urllib.quote(request_movie), year))
if r.status_code == 404:
logger.debug(u'Could not find subtitles for %s (%d) with languages %r' % (movie, year, languages))
return []
else:
raise ServiceError('One or more parameter missing')
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
soup = BeautifulSoup(r.content, self.required_features)
subtitles = []
for sub in soup('td', {'class': 'NewsTitle'}):
sub_keywords = split_keyword(sub.b.string.lower())
if keywords and not keywords & sub_keywords:
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
continue
for html_language in sub.parent.parent.find_all('td', {'class': 'language'}):
language = self.get_language(html_language.string.strip())
if language not in languages:
logger.debug(u'Language %r not in wanted languages %r' % (language, languages))
continue
html_status = html_language.find_next_sibling('td')
status = html_status.strong.string.strip()
if status != 'Completado':
logger.debug(u'Wrong subtitle status %s' % status)
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s%s' % (self.server_url, html_status.find_next('td').find('a')['href']))
subtitles.append(subtitle)
return subtitles
Service = SubsWiki
|
salsita/flask-raml | refs/heads/master | flask_raml.py | 2 | """Flask-RAML (REST API Markup Language) API server with parameter conversion, response encoding, and examples."""
__all__ = 'MimeEncoders API Loader Converter Content ApiError RequestError ParameterError AuthError'.split()
__version__ = '0.2.2'
from sys import exc_info
from operator import itemgetter
from functools import wraps
from flask import abort, request, has_request_context, Response
from flask.app import HTTPException
from werkzeug.http import HTTP_STATUS_CODES
from werkzeug.datastructures import MultiDict
import flask.ext.mime_encoders
import flask.ext.mime_encoders.json
import raml
from raml import Content, ApiError, RequestError, ParameterError, AuthError
# Export raml module properties.
class MimeEncoders(flask.ext.mime_encoders.MimeEncoders):
default = flask.ext.mime_encoders.MimeEncoders.json
class Converter(raml.Converter):
log = True
def convert_params(self, specification, params):
if isinstance(params, MultiDict):
params, multidict = {}, params
for key, values in multidict.iteritems():
params[key] = values[0] if len(values) == 1 else values
return super(Converter, self).convert_params(specification, params)
class Loader(raml.Loader):
log = True
spec_param_template = '{{{name}}}'
flask_param_template = '<{flask_type}:{name}>'
flask_types = {
'integer': 'int',
'number': 'float',
'string': 'string',
'boolean': 'bool',
'date': 'date',
}
def get_resource_uri(self, resource):
uri = resource['relativeUri']
if 'uriParameters' in resource:
spec_format, flask_format = self.spec_param_template.format, self.flask_param_template.format
for name, param in resource['uriParameters'].items():
param['name'] = name
param['flask_type'] = self.flask_types[param['type']]
uri = uri.replace(spec_format(**param), flask_format(**param))
resource['allUriParameters'].update(resource['uriParameters'])
return uri
class API(raml.API):
"""Flask API.
"""
plugins = dict(raml.API.plugins, loader=Loader, encoders=MimeEncoders, converter=Converter)
auth = None
logger_name = '{app}:api'
decode_request = True
encode_response = True
convert_query_params = True
convert_uri_params = True
endpoint_template = '{api}{resource}_{methods}'
requested_response_status_header = 'X-Test-Response-Status'
default_error_status = 500
default_error_message = 'internal server error'
config_exclude = raml.API.config_exclude.union('unhandled_uris unhandled_methods'.split())
def __init__(self, app, path, uri=None, id=None, log=None, **options):
self.app = app
self.views = {}
if log is None or isinstance(log, basestring):
log = app.logger.manager.getLogger(log or options.get('logger_name', self.logger_name).format(app=app.name))
super(API, self).__init__(path, uri, id, log, **options)
self.default_mimetype = self.encoders.default.mimetype
if self.auth and getattr(self.auth, 'log', None) is True:
self.auth.log = log
if log:
log.debug(repr(self))
@property
def unhandled_uris(self):
return [uri for uri in self.api if uri not in self.views]
@property
def unhandled_methods(self):
result = []
for uri, resource in self.api.iteritems():
methods = self.views.get(uri, ())
result.extend((uri, method) for method in resource['methodsByName'] if method.upper() not in methods)
return result
def abort(self, status, error=None, encoder=True):
(self.log.exception if self.app.debug and exc_info()[0] else self.log.error)(
'%r %s %s >> %s', status, request.method, request.path,
error or HTTP_STATUS_CODES.get(status, 'Unknown Error'))
if error:
return abort(status, description=error, response = self.encoders[encoder].make_response(
dict(status=status, error=error), status=status))
else:
return abort(status)
def add_route(self, resource, view, methods=None, endpoint=None, **options):
return self.route(resource, methods, endpoint, **options)(view)
def route(self, resource, methods=None, endpoint=None, **options):
resource = self.get_resource(resource)
uri = resource['uri']
config = dict(self.config, **options) if options else self.config
methods = self.get_resource_methods(resource, methods)
if endpoint is None:
endpoint = self.get_endpoint(resource, methods, self.endpoint_template)
auth = config['auth']
decorate = config.get('decorate', None)
decode_request = self.encoders[config['decode_request']]
encode_response = self.encoders[config['encode_response']]
convert_uri_params = config['convert_uri_params']
convert_query_params = config['convert_query_params']
def decorator(view):
self.log.debug('map %s %s %s', self.id, '/'.join(sorted(methods)), uri)
@wraps(view)
def decorated_view(**uri_params):
try:
url = request.path
self.log.info('%s %s << %s [%s|%s|%s]', request.method, url,
uri_params if self.app.debug or not uri_params else '{...}',
len(uri_params) or '-', len(request.args) or '-', len(request.data) or '-')
if auth:
auth.authorize(uri_params, request)
method = self.get_method_spec(resource, request.method)
if convert_uri_params:
uri_params = self.converter.convert_params(resource['allUriParameters'], uri_params)
if convert_query_params:
if 'queryParameters' in method:
uri_params.update(self.converter.convert_params(method['queryParameters'], request.args))
elif request.args:
self.abort(400, 'resource does not accept query parameters')
if uri_params:
self.log.debug('%s %s << args: %s [%s]', request.method, url, uri_params,
len(uri_params) or '-')
if decode_request:
self.log.debug('%s %s << data: %s [%s]', request.method, url, decode_request.name,
len(request.data))
uri_params.update(decode_request.get_request_data())
response = view(**uri_params)
if encode_response and not isinstance(response, (Response, basestring)):
response = encode_response.make_response(response)
self.log.debug('%s %s >> %s [%s:%s] (%s)', request.method, url, encode_response.name,
type(response.response), len(response.response), response.status)
return response
except HTTPException as error:
if error.response:
# Use exception response if it was already created, either by API.abort(), or custom way.
raise
else:
# Otherwise, create a custom response via API.abort().
self.abort(error.code, error.description)
except ApiError as error:
self.abort(error.status, error.message)
except Exception as error:
msg = str(error) if self.app.debug else self.default_error_message
self.abort(self.default_error_status, msg)
if decorate:
decorated_view = decorate(decorated_view)
self.app.add_url_rule(uri, endpoint, decorated_view, methods=methods)
for method in methods:
self.views.setdefault(uri, {})[method] = decorated_view
return decorated_view
return decorator
def serve(self, view, *args, **kwargs):
try:
return view(*args, **kwargs)
except ApiError as error:
self.abort(error.status, error.message)
def get_endpoint(self, resource, methods=None, template=None):
return (template or self.endpoint_template).format(
api=self.id,
resource=resource['uniqueId'],
methods='+'.join(methods) if methods else 'any',
)
def get_response_mimetype(self, response, accept=None, request=request):
if accept is None:
if request and has_request_context():
accept = map(itemgetter(0), request.accept_mimetypes)
return super(API, self).get_response_mimetype(response, accept)
def get_default_status(self, status=None, request=request):
try:
return request.headers[self.requested_response_status_header]
except (KeyError, RuntimeError):
return super(API, self).get_default_status()
def serve_examples(self, **options):
for uri, method in self.unhandled_methods:
self.serve_example(uri, method)
def serve_example(self, resource, methods=None, **options):
resource = self.get_resource(resource)
for method in self.get_resource_methods(resource, methods):
method_spec = self.get_method_spec(resource, method)
self.route(resource, method, **options)(self.create_example_view(method_spec))
def create_example_view(self, method_spec):
def view(**params):
return self.serve(self.get_example, method_spec)
return view
def get_example(self, method_spec, status=None, mimetype=None):
response = self.get_response(method_spec, status)
body = self.get_example_body(response, mimetype)
headers = self.get_example_headers(response)
self.log.info('%s %s: %s %s (%d bytes, %d headers)', method_spec['method'].upper(), method_spec['uri'],
response['status'], body.mimetype, len(body), len(headers))
return Response(body.content, status=response['status'], headers=headers, mimetype=body.mimetype)
|
enthought/etsproxy | refs/heads/master | enthought/persistence/versioned_unpickler.py | 1 | # proxy module
from __future__ import absolute_import
from apptools.persistence.versioned_unpickler import *
|
jmflinuxtx/kerneltest-harness | refs/heads/master | kerneltest/db/meta.py | 1 | """
This module sets up the basic database objects that all other database modules
will rely on. This includes the declarative base class and global scoped
session.
This is in its own module to avoid circular imports from forming. Models and
events need to be imported by ``__init__.py``, but they also need access to
the :class:`Base` model and :class:`Session`.
"""
import collections
from sqlalchemy import create_engine, event
from sqlalchemy.ext import declarative
from sqlalchemy.orm import sessionmaker, scoped_session, query as sa_query
#: This is a configured scoped session. It creates thread-local sessions. This
#: means that ``Session() is Session()`` is ``True``. This is a convenient way
#: to avoid passing a session instance around. Consult SQLAlchemy's documentation
#: for details.
#:
#: Before you can use this, you must call :func:`initialize`.
Session = scoped_session(sessionmaker())
#: A namedtuple that represents a page of database results.
Page = collections.namedtuple(
"Page", ("items", "page", "items_per_page", "total_items")
)
#: The default number of items in a page when using pagination.
DEFAULT_PAGE_SIZE = 25
#: The maximum page size when using pagination.
MAX_PAGE_SIZE = 250
def initialize(config):
"""
Initialize the database.
This creates a database engine from the provided configuration and
configures the scoped session to use the engine.
.. note::
This approach makes it very simple to write your unit tests. Since
everything accessing the database should use the :data:`Session`,
just call this function with your test database configuration in your
test setup code.
Args:
config (dict): A dictionary that contains the configuration necessary
to initialize the database.
Returns:
sqlalchemy.engine: The database engine created from the configuration.
"""
engine = create_engine(config["DB_URL"], echo=config["SQL_DEBUG"])
if config["DB_URL"].startswith("sqlite:"):
# Flip on foreign key constraints if the database in use is SQLite. See
# http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support
event.listen(
engine,
"connect",
lambda db_con, con_record: db_con.execute("PRAGMA foreign_keys=ON"),
)
Session.configure(bind=engine)
return engine
class BaseQuery(sa_query.Query):
"""A base Query object that provides queries for all models."""
def paginate(self, page=1, items_per_page=DEFAULT_PAGE_SIZE):
"""
Retrieve a page of items.
Args:
page (int): The page number to retrieve. This page is 1-indexed and
defaults to 1. This value should be validated before being passed
to this function.
items_per_page (int): The number of items per page. This defaults
to 25. This value should be validated before being passed
to this function.
Returns:
Page: A namedtuple of the items.
"""
total_items = self.count()
items = self.limit(items_per_page).offset(items_per_page * (page - 1)).all()
return Page(
items=items,
page=page,
total_items=total_items,
items_per_page=items_per_page,
)
class DeclarativeBaseMixin(object):
"""
A mix-in class for the declarative base class.
This provides a place to attach functionality that should be available on
all models derived from the declarative base.
Attributes:
query (sqlalchemy.orm.query.Query): a class property which produces a
:class:`BaseQuery` object against the class and the current Session
when called. Classes that want a customized Query class should
sub-class :class:`BaseQuery` and explicitly set the query property
on the model.
"""
query = Session.query_property(query_cls=BaseQuery)
#: The SQLAlchemy declarative base class all models must sub-class.
Base = declarative.declarative_base(cls=DeclarativeBaseMixin)
|
ric2b/Vivaldi-browser | refs/heads/master | chromium/tools/perf/cli_tools/soundwave/commands.py | 10 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import logging
try:
import sqlite3
except ImportError:
pass
from core import cli_utils
from core.external_modules import pandas
from core.services import dashboard_service
from cli_tools.soundwave import pandas_sqlite
from cli_tools.soundwave import studies
from cli_tools.soundwave import tables
from cli_tools.soundwave import worker_pool
def _FetchBugsWorker(args):
con = sqlite3.connect(args.database_file, timeout=10)
def Process(bug_id):
bugs = tables.bugs.DataFrameFromJson([dashboard_service.Bugs(bug_id)])
pandas_sqlite.InsertOrReplaceRecords(con, 'bugs', bugs)
worker_pool.Process = Process
def FetchAlertsData(args):
params = {
'test_suite': args.benchmark,
'min_timestamp': cli_utils.DaysAgoToTimestamp(args.days)
}
if args.sheriff != 'all':
params['sheriff'] = args.sheriff
with tables.DbSession(args.database_file) as con:
# Get alerts.
num_alerts = 0
bug_ids = set()
# TODO: This loop may be slow when fetching thousands of alerts, needs a
# better progress indicator.
for data in dashboard_service.IterAlerts(**params):
alerts = tables.alerts.DataFrameFromJson(data)
pandas_sqlite.InsertOrReplaceRecords(con, 'alerts', alerts)
num_alerts += len(alerts)
bug_ids.update(alerts['bug_id'].unique())
print('%d alerts found!' % num_alerts)
# Get set of bugs associated with those alerts.
bug_ids.discard(0) # A bug_id of 0 means untriaged.
print('%d bugs found!' % len(bug_ids))
# Filter out bugs already in cache.
if args.use_cache:
known_bugs = set(
b for b in bug_ids if tables.bugs.Get(con, b) is not None)
if known_bugs:
print('(skipping %d bugs already in the database)' % len(known_bugs))
bug_ids.difference_update(known_bugs)
# Use worker pool to fetch bug data.
total_seconds = worker_pool.Run(
'Fetching data of %d bugs: ' % len(bug_ids),
_FetchBugsWorker, args, bug_ids)
print('[%.1f bugs per second]' % (len(bug_ids) / total_seconds))
def _IterStaleTestPaths(con, test_paths):
"""Iterate over test_paths yielding only those with stale or absent data.
A test_path is considered to be stale if the most recent data point we have
for it in the db is more than a day older.
"""
a_day_ago = pandas.Timestamp.utcnow() - pandas.Timedelta(days=1)
a_day_ago = a_day_ago.tz_convert(tz=None)
for test_path in test_paths:
latest = tables.timeseries.GetMostRecentPoint(con, test_path)
if latest is None or latest['timestamp'] < a_day_ago:
yield test_path
def _FetchTimeseriesWorker(args):
con = sqlite3.connect(args.database_file, timeout=10)
min_timestamp = cli_utils.DaysAgoToTimestamp(args.days)
def Process(test_path):
try:
if isinstance(test_path, tables.timeseries.Key):
params = test_path.AsApiParams()
params['min_timestamp'] = min_timestamp
data = dashboard_service.Timeseries2(**params)
else:
data = dashboard_service.Timeseries(test_path, days=args.days)
except KeyError:
logging.info('Timeseries not found: %s', test_path)
return
timeseries = tables.timeseries.DataFrameFromJson(test_path, data)
pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries)
worker_pool.Process = Process
def _ReadTimeseriesFromFile(filename):
with open(filename, 'r') as f:
data = json.load(f)
return [tables.timeseries.Key.FromDict(ts) for ts in data]
def FetchTimeseriesData(args):
def _MatchesAllFilters(test_path):
return all(f in test_path for f in args.filters)
with tables.DbSession(args.database_file) as con:
# Get test_paths.
if args.benchmark is not None:
test_paths = dashboard_service.ListTestPaths(
args.benchmark, sheriff=args.sheriff)
elif args.input_file is not None:
test_paths = _ReadTimeseriesFromFile(args.input_file)
elif args.study is not None:
test_paths = list(args.study.IterTestPaths())
else:
raise ValueError('No source for test paths specified')
# Apply --filter's to test_paths.
if args.filters:
test_paths = filter(_MatchesAllFilters, test_paths)
num_found = len(test_paths)
print('%d test paths found!' % num_found)
# Filter out test_paths already in cache.
if args.use_cache:
test_paths = list(_IterStaleTestPaths(con, test_paths))
num_skipped = num_found - len(test_paths)
if num_skipped:
print('(skipping %d test paths already in the database)' % num_skipped)
# Use worker pool to fetch test path data.
total_seconds = worker_pool.Run(
'Fetching data of %d timeseries: ' % len(test_paths),
_FetchTimeseriesWorker, args, test_paths)
print('[%.1f test paths per second]' % (len(test_paths) / total_seconds))
if args.output_csv is not None:
print()
print('Post-processing data for study ...')
dfs = []
with tables.DbSession(args.database_file) as con:
for test_path in test_paths:
df = tables.timeseries.GetTimeSeries(con, test_path)
dfs.append(df)
df = studies.PostProcess(pandas.concat(dfs, ignore_index=True))
with cli_utils.OpenWrite(args.output_csv) as f:
df.to_csv(f, index=False)
print('Wrote timeseries data to:', args.output_csv)
|
tobbad/micropython | refs/heads/master | tests/basics/builtin_chr.py | 108 | # test builtin chr (whether or not we support unicode)
print(chr(65))
try:
chr(0x110000)
except ValueError:
print("ValueError")
|
google/grr | refs/heads/master | grr/client/grr_response_client/unprivileged/filesystem/server_lib.py | 1 | #!/usr/bin/env python
"""Unprivileged filesystem RPC server."""
import abc
import os
import sys
import traceback
from typing import TypeVar, Generic, Optional, Tuple
from grr_response_client.unprivileged import communication
from grr_response_client.unprivileged.filesystem import filesystem
from grr_response_client.unprivileged.filesystem import ntfs
from grr_response_client.unprivileged.filesystem import tsk
from grr_response_client.unprivileged.proto import filesystem_pb2
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class DispatchError(Error):
"""Error while dispatching a request."""
pass
class State:
"""State of the filesystem RPC server.
Contains open files and filesystems.
"""
def __init__(self):
self.filesystem = None # type: Optional[filesystem.Filesystem]
self.files = filesystem.Files()
class ConnectionWrapper:
"""Wraps a connection, adding protobuf serialization."""
def __init__(self, connection: communication.Connection):
self._connection = connection
def Send(self, response: filesystem_pb2.Response, attachment: bytes) -> None:
self._connection.Send(
communication.Message(response.SerializeToString(), attachment))
def Recv(self) -> Tuple[filesystem_pb2.Request, bytes]:
raw_request, attachment = self._connection.Recv()
request = filesystem_pb2.Request()
request.ParseFromString(raw_request)
return request, attachment
class RpcDevice(filesystem.Device):
"""A device implementation which reads data blocks via a connection."""
def __init__(self, connection: ConnectionWrapper):
self._connection = connection
def Read(self, offset: int, size: int) -> bytes:
device_data_request = filesystem_pb2.DeviceDataRequest(
offset=offset, size=size)
self._connection.Send(
filesystem_pb2.Response(device_data_request=device_data_request), b'')
_, attachment = self._connection.Recv()
return attachment
class FileDevice(filesystem.Device):
"""A device implementation backed by a file identified by file descriptor."""
def __init__(self, file_descriptor: int):
self._file = os.fdopen(file_descriptor, 'rb')
def Read(self, offset: int, size: int) -> bytes:
self._file.seek(offset)
return self._file.read(size)
RequestType = TypeVar('RequestType')
ResponseType = TypeVar('ResponseType')
class OperationHandler(abc.ABC, Generic[RequestType, ResponseType]):
"""Base class for RPC handlers.
The purpose is to handles the DeviceDataRequest/DeviceData messages
common to most RPCs.
"""
def __init__(self, state: State, request: filesystem_pb2.Request,
connection: ConnectionWrapper):
self._state = state
self._request = request
self._connection = connection
def Run(self) -> None:
request = self.UnpackRequest(self._request)
response = self.HandleOperation(self._state, request)
attachment = self.ExtractResponseAttachment(response)
self._connection.Send(self.PackResponse(response), attachment)
def CreateDevice(self) -> filesystem.Device:
return RpcDevice(self._connection)
@abc.abstractmethod
def HandleOperation(self, state: State, request: RequestType) -> ResponseType:
"""The actual implementation of the RPC."""
pass
@abc.abstractmethod
def PackResponse(self, response: ResponseType) -> filesystem_pb2.Response:
"""Packs an inner Response message into a response RPC message."""
pass
@abc.abstractmethod
def UnpackRequest(self, request: filesystem_pb2.Request) -> RequestType:
"""Extracts an inner Request message from a Request RPC message."""
pass
def ExtractResponseAttachment(self, response: ResponseType) -> bytes:
"""Extracts and clears an attachment from the response."""
return b''
class InitHandler(OperationHandler[filesystem_pb2.InitRequest,
filesystem_pb2.InitResponse]):
"""Implements the Init operation."""
def HandleOperation(
self, state: State,
request: filesystem_pb2.InitRequest) -> filesystem_pb2.InitResponse:
if request.HasField('serialized_device_file_descriptor'):
device = FileDevice(
communication.FileDescriptor.FromSerialized(
request.serialized_device_file_descriptor,
communication.Mode.READ).ToFileDescriptor())
else:
device = self.CreateDevice()
if request.implementation_type == filesystem_pb2.NTFS:
state.filesystem = ntfs.NtfsFilesystem(device)
elif request.implementation_type == filesystem_pb2.TSK:
state.filesystem = tsk.TskFilesystem(device)
else:
raise DispatchError(
f'Bad implementation type: {request.implementation_type}')
return filesystem_pb2.InitResponse()
def PackResponse(
self, response: filesystem_pb2.InitResponse) -> filesystem_pb2.Response:
return filesystem_pb2.Response(init_response=response)
def UnpackRequest(
self, request: filesystem_pb2.Request) -> filesystem_pb2.InitRequest:
return request.init_request
class OpenHandler(OperationHandler[filesystem_pb2.OpenRequest,
filesystem_pb2.OpenResponse]):
"""Implements the Open operation."""
def HandleOperation(
self, state: State,
request: filesystem_pb2.OpenRequest) -> filesystem_pb2.OpenResponse:
path = request.path if request.HasField('path') else None
inode = request.inode if request.HasField('inode') else None
stream_name = request.stream_name if request.HasField(
'stream_name') else None
if inode is None:
file_obj = state.filesystem.Open(path, stream_name)
else:
try:
file_obj = state.filesystem.OpenByInode(inode, stream_name)
except filesystem.StaleInodeError:
return filesystem_pb2.OpenResponse(
status=filesystem_pb2.OpenResponse.Status.STALE_INODE)
file_id = state.files.Add(file_obj)
return filesystem_pb2.OpenResponse(
status=filesystem_pb2.OpenResponse.Status.NO_ERROR,
file_id=file_id,
inode=file_obj.Inode())
def PackResponse(
self, response: filesystem_pb2.OpenResponse) -> filesystem_pb2.Response:
return filesystem_pb2.Response(open_response=response)
def UnpackRequest(
self, request: filesystem_pb2.Request) -> filesystem_pb2.OpenRequest:
return request.open_request
class ReadHandler(OperationHandler[filesystem_pb2.ReadRequest,
filesystem_pb2.ReadResponse]):
"""Implements the Read operation."""
def HandleOperation(
self, state: State,
request: filesystem_pb2.ReadRequest) -> filesystem_pb2.ReadResponse:
file = state.files.Get(request.file_id)
data = file.Read(offset=request.offset, size=request.size)
return filesystem_pb2.ReadResponse(data=data)
def PackResponse(
self, response: filesystem_pb2.ReadResponse) -> filesystem_pb2.Response:
return filesystem_pb2.Response(read_response=response)
def ExtractResponseAttachment(self,
response: filesystem_pb2.ReadResponse) -> bytes:
attachment = response.data
response.ClearField('data')
return attachment
def UnpackRequest(
self, request: filesystem_pb2.Request) -> filesystem_pb2.ReadRequest:
return request.read_request
class StatHandler(OperationHandler[filesystem_pb2.StatRequest,
filesystem_pb2.StatResponse]):
"""Implements the Stat operation."""
def HandleOperation(
self, state: State,
request: filesystem_pb2.StatRequest) -> filesystem_pb2.StatResponse:
file_obj = state.files.Get(request.file_id)
return filesystem_pb2.StatResponse(entry=file_obj.Stat())
def PackResponse(
self, response: filesystem_pb2.StatResponse) -> filesystem_pb2.Response:
return filesystem_pb2.Response(stat_response=response)
def UnpackRequest(
self, request: filesystem_pb2.Request) -> filesystem_pb2.StatRequest:
return request.stat_request
class ListFilesHandler(OperationHandler[filesystem_pb2.ListFilesRequest,
filesystem_pb2.ListFilesResponse]):
"""Implements the ListFiles operation."""
def HandleOperation(
self, state: State, request: filesystem_pb2.ListFilesRequest
) -> filesystem_pb2.ListFilesResponse:
file_obj = state.files.Get(request.file_id)
return filesystem_pb2.ListFilesResponse(entries=file_obj.ListFiles())
def PackResponse(
self,
response: filesystem_pb2.ListFilesResponse) -> filesystem_pb2.Response:
return filesystem_pb2.Response(list_files_response=response)
def UnpackRequest(
self, request: filesystem_pb2.Request) -> filesystem_pb2.ListFilesRequest:
return request.list_files_request
class CloseHandler(OperationHandler[filesystem_pb2.CloseRequest,
filesystem_pb2.CloseResponse]):
"""Implements the Close operation."""
def HandleOperation(
self, state: State,
request: filesystem_pb2.CloseRequest) -> filesystem_pb2.CloseResponse:
file_obj = state.files.Get(request.file_id)
file_obj.Close()
state.files.Remove(request.file_id)
return filesystem_pb2.CloseResponse()
def PackResponse(
self, response: filesystem_pb2.CloseResponse) -> filesystem_pb2.Response:
return filesystem_pb2.Response(close_response=response)
def UnpackRequest(
self, request: filesystem_pb2.Request) -> filesystem_pb2.CloseRequest:
return request.close_request
class LookupCaseInsensitiveHandler(
OperationHandler[filesystem_pb2.LookupCaseInsensitiveRequest,
filesystem_pb2.LookupCaseInsensitiveResponse]):
"""Implements the LookupCaseInsensitive operation."""
def HandleOperation(
self, state: State, request: filesystem_pb2.LookupCaseInsensitiveRequest
) -> filesystem_pb2.LookupCaseInsensitiveResponse:
file_obj = state.files.Get(request.file_id)
result = file_obj.LookupCaseInsensitive(request.name)
return filesystem_pb2.LookupCaseInsensitiveResponse(name=result)
def PackResponse(
self, response: filesystem_pb2.LookupCaseInsensitiveResponse
) -> filesystem_pb2.Response:
return filesystem_pb2.Response(lookup_case_insensitive_response=response)
def UnpackRequest(
self, request: filesystem_pb2.Request
) -> filesystem_pb2.LookupCaseInsensitiveRequest:
return request.lookup_case_insensitive_request
def DispatchWrapped(connection: ConnectionWrapper) -> None:
"""Dispatches a request to the proper OperationHandler."""
state = State()
while True:
try:
request, att = connection.Recv()
if state.filesystem is None and not request.HasField('init_request'):
raise DispatchError('The first request must be Init')
if state.filesystem is not None and request.HasField('init_request'):
raise DispatchError('Init can be called only once on a connection')
if request.HasField('init_request'):
handler_class = InitHandler
elif request.HasField('open_request'):
handler_class = OpenHandler
elif request.HasField('read_request'):
handler_class = ReadHandler
elif request.HasField('close_request'):
handler_class = CloseHandler
elif request.HasField('stat_request'):
handler_class = StatHandler
elif request.HasField('list_files_request'):
handler_class = ListFilesHandler
elif request.HasField('lookup_case_insensitive_request'):
handler_class = LookupCaseInsensitiveHandler
else:
raise DispatchError('No request set.')
handler = handler_class(state, request, connection)
handler.Run()
except: # pylint: disable=bare-except
exception = filesystem_pb2.Exception(
message=str(sys.exc_info()[1]),
formatted_exception=traceback.format_exc())
connection.Send(filesystem_pb2.Response(exception=exception), b'')
def Dispatch(connection: communication.Connection):
DispatchWrapped(ConnectionWrapper(connection))
|
Lemma1/MAC-POSTS | refs/heads/master | doc_builder/sphinx-contrib/libreoffice/doc/conf.py | 1 | # -*- coding: utf-8 -*-
#
# Sphinx LibreOffice Plugin documentation build configuration file
#
import sys
import os
# -- General configuration ------------------------------------------------
# Extensions
extensions = ['sphinxcontrib.libreoffice']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sphinx LibreOffice Plugin'
copyright = u'2014, Gerard Marull-Paretas'
# The version info for the project
version = '0.2'
release = '0.2'
# Exclude patterns
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme
html_theme = 'default'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SphinxLibreOfficePlugin.tex',
u'Sphinx LibreOffice Plugin Documentation',
u'Gerard Marull-Paretas', 'manual'),
]
|
crazybiocomputing/align2seq | refs/heads/master | matrix/parser.py | 1 | "This program will parse matrix files to convert them into objects usable by JS files"
# libraries' imports for create the parser
## librairy to create the json object
import json
## library for using regular expressions
import re
# opening of all files, each containing a BLOSUM matrix
blosum30=open("Blosum/EBLOSUM30.txt")
blosum35=open("Blosum/EBLOSUM35.txt")
blosum40=open("Blosum/EBLOSUM40.txt")
blosum45=open("Blosum/EBLOSUM45.txt")
blosum50=open("Blosum/EBLOSUM50.txt")
blosum55=open("Blosum/EBLOSUM55.txt")
blosum60=open("Blosum/EBLOSUM60.txt")
blosum62=open("Blosum/EBLOSUM62.txt")
blosum6212=open("Blosum/EBLOSUM62-12.txt")
blosum65=open("Blosum/EBLOSUM65.txt")
blosum70=open("Blosum/EBLOSUM70.txt")
blosum75=open("Blosum/EBLOSUM75.txt")
blosum80=open("Blosum/EBLOSUM80.txt")
blosum85=open("Blosum/EBLOSUM85.txt")
blosum90=open("Blosum/EBLOSUM90.txt")
blosumn=open("Blosum/EBLOSUMN.txt")
# opening of all files, each containing a PAM matrix
PAM10=open("PAM/EPAM10.txt")
PAM20=open("PAM/EPAM20.txt")
PAM30=open("PAM/EPAM30.txt")
PAM40=open("PAM/EPAM40.txt")
PAM50=open("PAM/EPAM50.txt")
PAM60=open("PAM/EPAM60.txt")
PAM70=open("PAM/EPAM70.txt")
PAM80=open("PAM/EPAM80.txt")
PAM90=open("PAM/EPAM90.txt")
PAM100=open("PAM/EPAM100.txt")
PAM110=open("PAM/EPAM110.txt")
PAM120=open("PAM/EPAM120.txt")
PAM130=open("PAM/EPAM130.txt")
PAM140=open("PAM/EPAM140.txt")
PAM150=open("PAM/EPAM150.txt")
PAM160=open("PAM/EPAM160.txt")
PAM170=open("PAM/EPAM170.txt")
PAM180=open("PAM/EPAM180.txt")
PAM190=open("PAM/EPAM190.txt")
PAM200=open("PAM/EPAM200.txt")
PAM210=open("PAM/EPAM210.txt")
PAM220=open("PAM/EPAM220.txt")
PAM230=open("PAM/EPAM230.txt")
PAM240=open("PAM/EPAM240.txt")
PAM250=open("PAM/EPAM250.txt")
PAM260=open("PAM/EPAM260.txt")
PAM270=open("PAM/EPAM270.txt")
PAM280=open("PAM/EPAM280.txt")
PAM290=open("PAM/EPAM290.txt")
PAM300=open("PAM/EPAM300.txt")
PAM310=open("PAM/EPAM310.txt")
PAM320=open("PAM/EPAM320.txt")
PAM330=open("PAM/EPAM330.txt")
PAM340=open("PAM/EPAM340.txt")
PAM350=open("PAM/EPAM350.txt")
PAM360=open("PAM/EPAM360.txt")
PAM370=open("PAM/EPAM370.txt")
PAM380=open("PAM/EPAM380.txt")
PAM390=open("PAM/EPAM390.txt")
PAM400=open("PAM/EPAM400.txt")
PAM410=open("PAM/EPAM410.txt")
PAM420=open("PAM/EPAM420.txt")
PAM430=open("PAM/EPAM430.txt")
PAM440=open("PAM/EPAM440.txt")
PAM450=open("PAM/EPAM450.txt")
PAM460=open("PAM/EPAM460.txt")
PAM470=open("PAM/EPAM470.txt")
PAM480=open("PAM/EPAM480.txt")
PAM490=open("PAM/EPAM490.txt")
PAM500=open("PAM/EPAM500.txt")
#opening of the file which will be writed
mat=open ("matrixlist.json","w")
#creation of the beginning of the file
mat.write("matrixlist=")
mat.write("{")
def parser(matrix1):
#reading of the matrix file, line by line
matrix=[]
name=""
content= matrix1.read()
lines= content.split("\n")
#for each line, delete spaces, write the matrix name and, after, scores into the matrix
for i in lines:
j=i.split(" ")
for k in range(len(j)):
if j[0]=="#":
if re.match(r"blosum[0-9]{,4}\.iij", j[k])or re.match(r"blosum[0-9]{,4}\_[0-9]\.iij", j[k]) or re.match(r"blosumn", j[k]) :
l=j[k].split(".")
name=str(l[0])
elif j[k]=="PAM" and re.match(r"[0-9]{,4}",j[k+1]):
name=str(j[k]+j[k+1])
if j[0]!="#":
if re.match(r"[-][0-9]",j[k]) or re.match(r"[0-9]",j[k]):
matrix.append(float(j[k]))
#convert the Python list in JSON object
matrix2=json.dumps(matrix)
#writing in the JSON document of the matrix
mat.write(name)
mat.write(":")
mat.write(matrix2)
mat.write(",\n")
#execution of the parser for all matrices
parser(blosum30)
parser(blosum35)
parser(blosum40)
parser(blosum45)
parser(blosum50)
parser(blosum55)
parser(blosum60)
parser(blosum62)
parser(blosum6212)
parser(blosum65)
parser(blosum70)
parser(blosum75)
parser(blosum80)
parser(blosum85)
parser(blosum90)
parser(blosumn)
parser(PAM10)
parser(PAM20)
parser(PAM30)
parser(PAM40)
parser(PAM50)
parser(PAM60)
parser(PAM70)
parser(PAM80)
parser(PAM90)
parser(PAM100)
parser(PAM110)
parser(PAM120)
parser(PAM130)
parser(PAM140)
parser(PAM150)
parser(PAM160)
parser(PAM170)
parser(PAM180)
parser(PAM190)
parser(PAM200)
parser(PAM210)
parser(PAM220)
parser(PAM230)
parser(PAM240)
parser(PAM250)
parser(PAM260)
parser(PAM270)
parser(PAM280)
parser(PAM290)
parser(PAM300)
parser(PAM310)
parser(PAM320)
parser(PAM330)
parser(PAM340)
parser(PAM350)
parser(PAM360)
parser(PAM370)
parser(PAM380)
parser(PAM390)
parser(PAM400)
parser(PAM410)
parser(PAM420)
parser(PAM430)
parser(PAM440)
parser(PAM450)
parser(PAM460)
parser(PAM470)
parser(PAM480)
parser(PAM490)
parser(PAM500)
# closing of all matrix files, writing the end of the JSON file et closing of this one
blosum30.close()
blosum35.close()
blosum40.close()
blosum45.close()
blosum50.close()
blosum55.close()
blosum60.close()
blosum62.close()
blosum6212.close()
blosum65.close()
blosum70.close()
blosum75.close()
blosum80.close()
blosum85.close()
blosum90.close()
blosumn.close()
PAM10.close()
PAM20.close()
PAM30.close()
PAM40.close()
PAM50.close()
PAM60.close()
PAM70.close()
PAM80.close()
PAM90.close()
PAM100.close()
PAM110.close()
PAM120.close()
PAM130.close()
PAM140.close()
PAM150.close()
PAM160.close()
PAM170.close()
PAM180.close()
PAM190.close()
PAM200.close()
PAM210.close()
PAM220.close()
PAM230.close()
PAM240.close()
PAM250.close()
PAM260.close()
PAM270.close()
PAM280.close()
PAM290.close()
PAM300.close()
PAM310.close()
PAM320.close()
PAM330.close()
PAM340.close()
PAM350.close()
PAM360.close()
PAM370.close()
PAM380.close()
PAM390.close()
PAM400.close()
PAM410.close()
PAM420.close()
PAM430.close()
PAM440.close()
PAM450.close()
PAM460.close()
PAM470.close()
PAM480.close()
PAM490.close()
PAM500.close()
mat.write("}")
mat.close()
|
bqbn/addons-server | refs/heads/master | src/olympia/addons/management/__init__.py | 12133432 | |
berkeley-stat159/project-alpha | refs/heads/master | code/utils/tests/__init__.py | 12133432 | |
lochiiconnectivity/libcloud | refs/heads/trunk | libcloud/common/nfsn.py | 29 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import random
import string
import time
from libcloud.common.base import ConnectionUserAndKey
from libcloud.common.base import JsonResponse
from libcloud.common.types import InvalidCredsError, ProviderError
from libcloud.utils.py3 import basestring, httplib, urlencode
SALT_CHARACTERS = string.ascii_letters + string.digits
class NFSNException(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(NFSNException, self).__init__(value, http_code, driver)
class NFSNResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('Invalid provider credentials')
body = self.parse_body()
if isinstance(body, basestring):
return body + ' (HTTP Code: %d)' % self.status
error = body.get('error', None)
debug = body.get('debug', None)
# If we only have one of "error" or "debug", use the one that we have.
# If we have both, use both, with a space character in between them.
value = 'No message specified'
if error is not None:
value = error
if debug is not None:
value = debug
if error is not None and value is not None:
value = error + ' ' + value
value = value + ' (HTTP Code: %d)' % self.status
return value
class NFSNConnection(ConnectionUserAndKey):
host = 'api.nearlyfreespeech.net'
responseCls = NFSNResponse
allow_insecure = False
def _header(self, action, data):
""" Build the contents of the X-NFSN-Authentication HTTP header. See
https://members.nearlyfreespeech.net/wiki/API/Introduction for
more explanation. """
login = self.user_id
timestamp = self._timestamp()
salt = self._salt()
api_key = self.key
data = urlencode(data)
data_hash = hashlib.sha1(data.encode('utf-8')).hexdigest()
string = ';'.join((login, timestamp, salt, api_key, action, data_hash))
string_hash = hashlib.sha1(string.encode('utf-8')).hexdigest()
return ';'.join((login, timestamp, salt, string_hash))
def request(self, action, params=None, data='', headers=None,
method='GET'):
""" Add the X-NFSN-Authentication header to an HTTP request. """
if not headers:
headers = {}
if not params:
params = {}
header = self._header(action, data)
headers['X-NFSN-Authentication'] = header
if method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
return ConnectionUserAndKey.request(self, action, params, data,
headers, method)
def encode_data(self, data):
""" NFSN expects the body to be regular key-value pairs that are not
JSON-encoded. """
if data:
data = urlencode(data)
return data
def _salt(self):
""" Return a 16-character alphanumeric string. """
r = random.SystemRandom()
return ''.join(r.choice(SALT_CHARACTERS) for _ in range(16))
def _timestamp(self):
""" Return the current number of seconds since the Unix epoch,
as a string. """
return str(int(time.time()))
|
mfherbst/spack | refs/heads/develop | var/spack/repos/builtin/packages/py-traceback2/package.py | 5 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyTraceback2(PythonPackage):
"""Backports of the traceback module"""
homepage = "https://github.com/testing-cabal/traceback2"
url = "https://pypi.io/packages/source/t/traceback2/traceback2-1.4.0.tar.gz"
version('1.4.0', '9e9723f4d70bfc6308fa992dd193c400')
depends_on('py-setuptools', type='build')
depends_on('py-linecache2', type=('build', 'run'))
depends_on('py-pbr', type=('build', 'run'))
|
sdague/home-assistant | refs/heads/dev | homeassistant/components/maxcube/__init__.py | 16 | """Support for the MAX! Cube LAN Gateway."""
import logging
from socket import timeout
from threading import Lock
import time
from maxcube.connection import MaxCubeConnection
from maxcube.cube import MaxCube
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_SCAN_INTERVAL
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 62910
DOMAIN = "maxcube"
DATA_KEY = "maxcube"
NOTIFICATION_ID = "maxcube_notification"
NOTIFICATION_TITLE = "Max!Cube gateway setup"
CONF_GATEWAYS = "gateways"
CONFIG_GATEWAY = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SCAN_INTERVAL, default=300): cv.time_period,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_GATEWAYS, default={}): vol.All(
cv.ensure_list, [CONFIG_GATEWAY]
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Establish connection to MAX! Cube."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
connection_failed = 0
gateways = config[DOMAIN][CONF_GATEWAYS]
for gateway in gateways:
host = gateway[CONF_HOST]
port = gateway[CONF_PORT]
scan_interval = gateway[CONF_SCAN_INTERVAL].total_seconds()
try:
cube = MaxCube(MaxCubeConnection(host, port))
hass.data[DATA_KEY][host] = MaxCubeHandle(cube, scan_interval)
except timeout as ex:
_LOGGER.error("Unable to connect to Max!Cube gateway: %s", str(ex))
hass.components.persistent_notification.create(
f"Error: {ex}<br />You will need to restart Home Assistant after fixing.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
connection_failed += 1
if connection_failed >= len(gateways):
return False
load_platform(hass, "climate", DOMAIN, {}, config)
load_platform(hass, "binary_sensor", DOMAIN, {}, config)
return True
class MaxCubeHandle:
"""Keep the cube instance in one place and centralize the update."""
def __init__(self, cube, scan_interval):
"""Initialize the Cube Handle."""
self.cube = cube
self.scan_interval = scan_interval
self.mutex = Lock()
self._updatets = time.monotonic()
def update(self):
"""Pull the latest data from the MAX! Cube."""
# Acquire mutex to prevent simultaneous update from multiple threads
with self.mutex:
# Only update every update_interval
if (time.monotonic() - self._updatets) >= self.scan_interval:
_LOGGER.debug("Updating")
try:
self.cube.update()
except timeout:
_LOGGER.error("Max!Cube connection failed")
return False
self._updatets = time.monotonic()
else:
_LOGGER.debug("Skipping update")
|
mxOBS/deb-pkg_trusty_chromium-browser | refs/heads/master | third_party/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py | 499 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write('sub/plain_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
|
leeseulstack/openstack | refs/heads/master | neutron/plugins/vmware/vshield/tasks/constants.py | 36 | # Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class TaskStatus(object):
"""Task running status.
This is used by execution/status callback function to notify the
task manager what's the status of current task, and also used for
indication the final task execution result.
"""
PENDING = 1
COMPLETED = 2
ERROR = 3
ABORT = 4
class TaskState(object):
"""Current state of a task.
This is to keep track of the current state of a task.
NONE: the task is still in the queue
START: the task is pull out from the queue and is about to be executed
EXECUTED: the task has been executed
STATUS: we're running periodic status check for this task
RESULT: the task has finished and result is ready
"""
NONE = -1
START = 0
EXECUTED = 1
STATUS = 2
RESULT = 3
|
patrick-winter-knime/deep-learning-on-molecules | refs/heads/master | smiles-vhts/prepare_multitarget.py | 1 | import re
import h5py
import argparse
from util import preprocess, partition_ref, oversample_ref, shuffle, actives_counter
from data_structures import reference_data_set
def get_arguments():
parser = argparse.ArgumentParser(description='Prepares the given data set for training')
parser.add_argument('data', type=str, help='The data set containing the SMILES, classes and partitions')
parser.add_argument('--oversample', action='store_true',
help='Oversample underrepresented classes in the training dataset (default: False)')
parser.add_argument('--shuffle', action='store_true', help='Shuffle the training data sets (default: False)')
return parser.parse_args()
args = get_arguments()
prefix = args.data[:args.data.rfind('.')]
preprocess.preprocess(args.data, prefix + '-indices.h5', prefix + '-smiles_matrices.h5')
ids = []
source_hdf5 = h5py.File(args.data, 'r')
regex = re.compile('[0-9]+-classes')
for data_set in source_hdf5.keys():
data_set = str(data_set)
if regex.match(data_set):
ids.append(data_set[:-8])
source_hdf5.close()
for ident in ids:
partition_ref.write_partitions(args.data, {1: 'train', 2: 'validate'}, ident)
if args.oversample:
for ident in ids:
oversample_ref.oversample(prefix + '-' + ident + '-train.h5', args.data, ident)
if args.shuffle:
for ident in ids:
shuffle.shuffle(prefix + '-' + ident + '-train.h5')
classes_h5 = h5py.File(args.data)
for ident in ids:
val_h5 = h5py.File(prefix + '-' + ident + '-validate.h5', 'a')
val_classes = reference_data_set.ReferenceDataSet(val_h5['ref'], classes_h5[ident + '-classes'])
print(ident)
val_h5.attrs['actives'] = actives_counter.count(val_classes)
val_h5.close()
classes_h5.close()
|
Workday/OpenFrame | refs/heads/master | third_party/tlslite/tlslite/integration/tlsasyncdispatchermixin.py | 113 | # Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite + asyncore."""
import asyncore
from tlslite.tlsconnection import TLSConnection
from .asyncstatemachine import AsyncStateMachine
class TLSAsyncDispatcherMixIn(AsyncStateMachine):
"""This class can be "mixed in" with an
L{asyncore.dispatcher} to add TLS support.
This class essentially sits between the dispatcher and the select
loop, intercepting events and only calling the dispatcher when
applicable.
In the case of handle_read(), a read operation will be activated,
and when it completes, the bytes will be placed in a buffer where
the dispatcher can retrieve them by calling recv(), and the
dispatcher's handle_read() will be called.
In the case of handle_write(), the dispatcher's handle_write() will
be called, and when it calls send(), a write operation will be
activated.
To use this class, you must combine it with an asyncore.dispatcher,
and pass in a handshake operation with setServerHandshakeOp().
Below is an example of using this class with medusa. This class is
mixed in with http_channel to create http_tls_channel. Note:
1. the mix-in is listed first in the inheritance list
2. the input buffer size must be at least 16K, otherwise the
dispatcher might not read all the bytes from the TLS layer,
leaving some bytes in limbo.
3. IE seems to have a problem receiving a whole HTTP response in a
single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't
be displayed on IE.
Add the following text into 'start_medusa.py', in the 'HTTP Server'
section::
from tlslite import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
class http_tls_channel(TLSAsyncDispatcherMixIn,
http_server.http_channel):
ac_in_buffer_size = 16384
def __init__ (self, server, conn, addr):
http_server.http_channel.__init__(self, server, conn, addr)
TLSAsyncDispatcherMixIn.__init__(self, conn)
self.tlsConnection.ignoreAbruptClose = True
self.setServerHandshakeOp(certChain=certChain,
privateKey=privateKey)
hs.channel_class = http_tls_channel
If the TLS layer raises an exception, the exception will be caught
in asyncore.dispatcher, which will call close() on this class. The
TLS layer always closes the TLS connection before raising an
exception, so the close operation will complete right away, causing
asyncore.dispatcher.close() to be called, which closes the socket
and removes this instance from the asyncore loop.
"""
def __init__(self, sock=None):
AsyncStateMachine.__init__(self)
if sock:
self.tlsConnection = TLSConnection(sock)
#Calculate the sibling I'm being mixed in with.
#This is necessary since we override functions
#like readable(), handle_read(), etc., but we
#also want to call the sibling's versions.
for cl in self.__class__.__bases__:
if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine:
self.siblingClass = cl
break
else:
raise AssertionError()
def readable(self):
result = self.wantsReadEvent()
if result != None:
return result
return self.siblingClass.readable(self)
def writable(self):
result = self.wantsWriteEvent()
if result != None:
return result
return self.siblingClass.writable(self)
def handle_read(self):
self.inReadEvent()
def handle_write(self):
self.inWriteEvent()
def outConnectEvent(self):
self.siblingClass.handle_connect(self)
def outCloseEvent(self):
asyncore.dispatcher.close(self)
def outReadEvent(self, readBuffer):
self.readBuffer = readBuffer
self.siblingClass.handle_read(self)
def outWriteEvent(self):
self.siblingClass.handle_write(self)
def recv(self, bufferSize=16384):
if bufferSize < 16384 or self.readBuffer == None:
raise AssertionError()
returnValue = self.readBuffer
self.readBuffer = None
return returnValue
def send(self, writeBuffer):
self.setWriteOp(writeBuffer)
return len(writeBuffer)
def close(self):
if hasattr(self, "tlsConnection"):
self.setCloseOp()
else:
asyncore.dispatcher.close(self)
|
NicoSantangelo/sublime-text-trello | refs/heads/master | lib/requests/packages/charade/charsetgroupprober.py | 206 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
|
lizzha/azure-linux-extensions | refs/heads/master | RDMAUpdate/main/Common.py | 5 | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
class CommonVariables:
azure_path = 'main/azure'
utils_path_name = 'Utils'
extension_name = 'RDMAUpdateForLinux'
extension_version = "0.1.0.8"
extension_type = extension_name
extension_media_link = 'https://andliu.blob.core.windows.net/extensions/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure RDMA Update Extension for Linux IaaS'
extension_description = extension_label
"""
configurations
"""
wrapper_package_name = 'msft-rdma-drivers'
"""
error code definitions
"""
process_success = 0
common_failed = 1
install_hv_utils_failed = 2
nd_driver_detect_error = 3
driver_version_not_found = 4
unknown_error = 5
package_not_found = 6
package_install_failed = 7
"""
logs related
"""
InfoLevel = 'Info'
WarningLevel = 'Warning'
ErrorLevel = 'Error'
"""
check_rdma_result
"""
UpToDate = 0
OutOfDate = 1
DriverVersionNotFound = 3
Unknown = -1
|
AdamBSteele/yournewhomepage | refs/heads/master | fbone/modules/frontend/__init__.py | 25 | # -*- coding: utf-8 -*-
from .views import frontend
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.