repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
LordDamionDevil/Lony
|
refs/heads/master
|
lib/youtube_dl/extractor/normalboots.py
|
67
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .jwplatform import JWPlatformIE
from ..utils import (
unified_strdate,
)
class NormalbootsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?normalboots\.com/video/(?P<id>[0-9a-z-]*)/?$'
_TEST = {
'url': 'http://normalboots.com/video/home-alone-games-jontron/',
'info_dict': {
'id': 'home-alone-games-jontron',
'ext': 'mp4',
'title': 'Home Alone Games - JonTron - NormalBoots',
'description': 'Jon is late for Christmas. Typical. Thanks to: Paul Ritchey for Co-Writing/Filming: http://www.youtube.com/user/ContinueShow Michael Azzi for Christmas Intro Animation: http://michafrar.tumblr.com/ Jerrod Waters for Christmas Intro Music: http://www.youtube.com/user/xXJerryTerryXx Casey Ormond for ‘Tense Battle Theme’:\xa0http://www.youtube.com/Kiamet/',
'uploader': 'JonTron',
'upload_date': '20140125',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['JWPlatform'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_uploader = self._html_search_regex(
r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>',
webpage, 'uploader', fatal=False)
video_upload_date = unified_strdate(self._html_search_regex(
r'<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>',
webpage, 'date', fatal=False))
jwplatform_url = JWPlatformIE._extract_url(webpage)
return {
'_type': 'url_transparent',
'id': video_id,
'url': jwplatform_url,
'ie_key': JWPlatformIE.ie_key(),
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader': video_uploader,
'upload_date': video_upload_date,
}
|
Elico-Corp/odoo_OCB
|
refs/heads/9.0
|
addons/l10n_hr/__openerp__.py
|
18
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Author: Goran Kliska
# mail: goran.kliska(AT)slobodni-programi.hr
# Copyright (C) 2011- Slobodni programi d.o.o., Zagreb
# Contributions:
# Tomislav Bošnjaković, Storm Computers d.o.o. :
# - account types
{
"name": "Croatia - Accounting (RRIF 2012)",
"description": """
Croatian localisation.
======================
Author: Goran Kliska, Slobodni programi d.o.o., Zagreb
https://www.slobodni-programi.hr
Contributions:
Tomislav Bošnjaković, Storm Computers: tipovi konta
Ivan Vađić, Slobodni programi: tipovi konta
Description:
Croatian Chart of Accounts (RRIF ver.2012)
RRIF-ov računski plan za poduzetnike za 2012.
Vrste konta
Kontni plan prema RRIF-u, dorađen u smislu kraćenja naziva i dodavanja analitika
Porezne grupe prema poreznoj prijavi
Porezi PDV obrasca
Ostali porezi
Osnovne fiskalne pozicije
Izvori podataka:
https://www.rrif.hr/dok/preuzimanje/rrif-rp2011.rar
https://www.rrif.hr/dok/preuzimanje/rrif-rp2012.rar
""",
"version": "13.0",
"author": "OpenERP Croatian Community",
'category': 'Localization',
"website": "https://code.launchpad.net/openobject-croatia",
'depends': [
'account',
],
'data': [
'data/account_chart_template.xml',
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account_chart_tag.xml',
'data/account.tax.template.csv',
'data/fiscal_position_template.xml',
'data/account_chart_template.yml',
],
"demo": [],
'test': [],
"active": False,
"installable": True,
}
|
rue89-tech/edx-platform
|
refs/heads/master
|
common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py
|
189
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TrackingLog.host'
db.add_column('track_trackinglog', 'host',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
# Changing field 'TrackingLog.event_type'
db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'TrackingLog.page'
db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=512, null=True))
def backwards(self, orm):
# Deleting field 'TrackingLog.host'
db.delete_column('track_trackinglog', 'host')
# Changing field 'TrackingLog.event_type'
db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'TrackingLog.page'
db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True))
models = {
'track.trackinglog': {
'Meta': {'object_name': 'TrackingLog'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['track']
|
vvuk/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/executors/executorservo.py
|
35
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import base64
import hashlib
import httplib
import json
import os
import subprocess
import tempfile
import threading
import traceback
import urlparse
import uuid
from collections import defaultdict
from mozprocess import ProcessHandler
from .base import (ExecutorException,
Protocol,
RefTestImplementation,
testharness_result_converter,
reftest_result_converter,
WdspecExecutor)
from .process import ProcessTestExecutor
from ..browsers.base import browser_command
from ..wpttest import WdspecResult, WdspecSubtestResult
from ..webdriver_server import ServoDriverServer
from .executormarionette import WdspecRun
pytestrunner = None
render_arg = None
webdriver = None
extra_timeout = 5 # seconds
def do_delayed_imports():
global render_arg
from ..browsers.servo import render_arg
hosts_text = """127.0.0.1 web-platform.test
127.0.0.1 www.web-platform.test
127.0.0.1 www1.web-platform.test
127.0.0.1 www2.web-platform.test
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
127.0.0.1 xn--lve-6lad.web-platform.test
"""
def make_hosts_file():
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(hosts_text)
return hosts_path
class ServoTestharnessExecutor(ProcessTestExecutor):
convert_result = testharness_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
pause_after_test=False):
do_delayed_imports()
ProcessTestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.result_data = None
self.result_flag = None
self.protocol = Protocol(self, browser)
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
ProcessTestExecutor.teardown(self)
def do_test(self, test):
self.result_data = None
self.result_flag = threading.Event()
args = [render_arg(self.browser.render_backend), "--hard-fail", "-u", "Servo/wptrunner",
"-Z", "replace-surrogates", "-z", self.test_url(test)]
for stylesheet in self.browser.user_stylesheets:
args += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
args += ["--pref", "%s=%s" % (pref, value)]
args += self.browser.binary_args
debug_args, command = browser_command(self.binary, args, self.debug_info)
self.command = command
if self.pause_after_test:
self.command.remove("-z")
self.command = debug_args + self.command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
onFinish=self.on_finish,
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
try:
timeout = test.timeout * self.timeout_multiplier
# Now wait to get the output we expect, or until we reach the timeout
if not self.interactive and not self.pause_after_test:
wait_timeout = timeout + 5
self.result_flag.wait(wait_timeout)
else:
wait_timeout = None
self.proc.wait()
proc_is_running = True
if self.result_flag.is_set():
if self.result_data is not None:
result = self.convert_result(test, self.result_data)
else:
self.proc.wait()
result = (test.result_cls("CRASH", None), [])
proc_is_running = False
else:
result = (test.result_cls("TIMEOUT", None), [])
if proc_is_running:
if self.pause_after_test:
self.logger.info("Pausing until the browser exits")
self.proc.wait()
else:
self.proc.kill()
except KeyboardInterrupt:
self.proc.kill()
raise
return result
def on_output(self, line):
prefix = "ALERT: RESULT: "
line = line.decode("utf8", "replace")
if line.startswith(prefix):
self.result_data = json.loads(line[len(prefix):])
self.result_flag.set()
else:
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
def on_finish(self):
self.result_flag.set()
class TempFilename(object):
def __init__(self, directory):
self.directory = directory
self.path = None
def __enter__(self):
self.path = os.path.join(self.directory, str(uuid.uuid4()))
return self.path
def __exit__(self, *args, **kwargs):
try:
os.unlink(self.path)
except OSError:
pass
class ServoRefTestExecutor(ProcessTestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False):
do_delayed_imports()
ProcessTestExecutor.__init__(self,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = Protocol(self, browser)
self.screenshot_cache = screenshot_cache
self.implementation = RefTestImplementation(self)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
os.rmdir(self.tempdir)
ProcessTestExecutor.teardown(self)
def screenshot(self, test, viewport_size, dpi):
full_url = self.test_url(test)
with TempFilename(self.tempdir) as output_path:
debug_args, command = browser_command(
self.binary,
[render_arg(self.browser.render_backend), "--hard-fail", "--exit",
"-u", "Servo/wptrunner", "-Z", "disable-text-aa,load-webfonts-synchronously,replace-surrogates",
"--output=%s" % output_path, full_url] + self.browser.binary_args,
self.debug_info)
for stylesheet in self.browser.user_stylesheets:
command += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
command += ["--pref", "%s=%s" % (pref, value)]
command += ["--resolution", viewport_size or "800x600"]
if dpi:
command += ["--device-pixel-ratio", dpi]
# Run ref tests in headless mode
command += ["-z"]
self.command = debug_args + command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env)
try:
self.proc.run()
timeout = test.timeout * self.timeout_multiplier + 5
rv = self.proc.wait(timeout=timeout)
except KeyboardInterrupt:
self.proc.kill()
raise
else:
self.proc = subprocess.Popen(self.command,
env=env)
try:
rv = self.proc.wait()
except KeyboardInterrupt:
self.proc.kill()
raise
if rv is None:
self.proc.kill()
return False, ("EXTERNAL-TIMEOUT", None)
if rv != 0 or not os.path.exists(output_path):
return False, ("CRASH", None)
with open(output_path) as f:
# Might need to strip variable headers or something here
data = f.read()
return True, base64.b64encode(data)
def do_test(self, test):
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def on_output(self, line):
line = line.decode("utf8", "replace")
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
class ServoWdspecProtocol(Protocol):
def __init__(self, executor, browser):
self.do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.session = None
self.server = None
def setup(self, runner):
try:
self.server = ServoDriverServer(self.logger, binary=self.browser.binary, binary_args=self.browser.binary_args, render_backend=self.browser.render_backend)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.logger.info(
"Establishing new WebDriver session with %s" % self.server.url)
self.session = webdriver.Session(
self.server.host, self.server.port, self.server.base_path)
except Exception:
self.logger.error(traceback.format_exc())
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
if self.server is not None:
try:
if self.session.session_id is not None:
self.session.end()
except Exception:
pass
if self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
conn = httplib.HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
def do_delayed_imports(self):
global pytestrunner, webdriver
from . import pytestrunner
import webdriver
class ServoWdspecExecutor(WdspecExecutor):
def __init__(self, browser, server_config,
timeout_multiplier=1, close_after_done=True, debug_info=None,
**kwargs):
WdspecExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ServoWdspecProtocol(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session,
test.path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session, path, timeout):
harness_result = ("OK", None)
subtest_results = pytestrunner.run(path, session, timeout=timeout)
return (harness_result, subtest_results)
|
ktsitsikas/odemis
|
refs/heads/master
|
src/odemis/gui/comp/radio.py
|
2
|
# -*- coding: utf-8 -*-
"""
@author: Rinze de Laat
Copyright © 2012 Rinze de Laat, Delmic
Custom (graphical) radio button control.
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
import logging
from odemis import gui
from odemis.gui.comp.buttons import GraphicRadioButton
import wx
class GraphicalRadioButtonControl(wx.Panel):
def __init__(self, *args, **kwargs):
#self.bnt_width = kwargs.pop("bnt_width", 32)
self.choices = kwargs.pop("choices", [])
self.buttons = []
self.labels = kwargs.pop("labels", [])
self.units = kwargs.pop("units", None)
wx.Panel.__init__(self, *args, **kwargs)
self.SetBackgroundColour(self.Parent.GetBackgroundColour())
sizer = wx.BoxSizer(wx.HORIZONTAL)
for choice, label in zip(self.choices, self.labels):
btn = GraphicRadioButton(self, value=choice, style=wx.ALIGN_CENTER, label=label,
height=16)
btn.SetForegroundColour("#111111")
self.buttons.append(btn)
sizer.Add(btn, flag=wx.RIGHT, border=5)
btn.Bind(wx.EVT_BUTTON, self.OnClick)
btn.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
if self.units:
lbl = wx.StaticText(self, -1, self.units)
lbl.SetForegroundColour(gui.FG_COLOUR_MAIN)
sizer.Add(lbl, flag=wx.RIGHT, border=5)
self.SetSizer(sizer)
def _reset_buttons(self, btn=None):
for button in self.buttons:
if button != btn:
button.SetToggle(False)
def SetValue(self, value):
logging.debug("Set radio button control to %s", value)
for btn in self.buttons:
btn.SetToggle(btn.value == value)
def GetValue(self):
for btn in self.buttons:
if btn.GetToggle():
return btn.value
def OnKeyUp(self, evt):
btn = evt.GetEventObject()
if btn.hasFocus and evt.GetKeyCode() == ord(" "):
self._reset_buttons(btn)
btn.up = False
btn.Notify()
btn.Refresh()
def OnClick(self, evt):
btn = evt.GetEventObject()
self._reset_buttons(btn)
#if not btn.GetToggle():
evt.Skip()
|
avoinsystems/account-financial-tools
|
refs/heads/12.0
|
account_chart_update/__manifest__.py
|
1
|
# Copyright 2016 Jairo Llopis <jairo.llopis@tecnativa.com>
# Copyright 2016 Jacques-Etienne Baudoux <je@bcim.be>
# Copyright 2016 Sylvain Van Hoof <sylvain@okia.be>
# Copyright 2015-2018 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Detect changes and update the Account Chart from a template",
"summary": "Wizard to update a company's account chart from a template",
"version": "12.0.1.0.0",
"author": "Tecnativa, "
"BCIM, "
"Okia, "
"Odoo Community Association (OCA)",
"website": "http://github.com/OCA/account-financial-tools",
"depends": ["account"],
"category": "Accounting",
"license": "AGPL-3",
"data": [
"wizard/wizard_chart_update_view.xml",
"views/account_config_settings_view.xml",
],
"installable": True,
}
|
PeterFaiman/ruby-grpc-minimal
|
refs/heads/v1.8.x-minimal
|
src/python/grpcio_tests/tests/interop/_intraop_test_case.py
|
16
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for unit tests of the interoperability test code."""
from tests.interop import methods
class IntraopTestCase(object):
"""Unit test methods.
This class must be mixed in with unittest.TestCase and a class that defines
setUp and tearDown methods that manage a stub attribute.
"""
def testEmptyUnary(self):
methods.TestCase.EMPTY_UNARY.test_interoperability(self.stub, None)
def testLargeUnary(self):
methods.TestCase.LARGE_UNARY.test_interoperability(self.stub, None)
def testServerStreaming(self):
methods.TestCase.SERVER_STREAMING.test_interoperability(self.stub, None)
def testClientStreaming(self):
methods.TestCase.CLIENT_STREAMING.test_interoperability(self.stub, None)
def testPingPong(self):
methods.TestCase.PING_PONG.test_interoperability(self.stub, None)
def testCancelAfterBegin(self):
methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(self.stub,
None)
def testCancelAfterFirstResponse(self):
methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE.test_interoperability(
self.stub, None)
def testTimeoutOnSleepingServer(self):
methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER.test_interoperability(
self.stub, None)
|
SUSE/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-compute/azure/mgmt/compute/compute/v2016_04_30_preview/models/image_data_disk.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageDataDisk(Model):
"""Describes a data disk.
:param lun: The logical unit number.
:type lun: int
:param snapshot: The snapshot.
:type snapshot: :class:`SubResource
<azure.mgmt.compute.compute.v2016_04_30_preview.models.SubResource>`
:param managed_disk: The managedDisk.
:type managed_disk: :class:`SubResource
<azure.mgmt.compute.compute.v2016_04_30_preview.models.SubResource>`
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: The caching type. Possible values include: 'None',
'ReadOnly', 'ReadWrite'
:type caching: str or :class:`CachingTypes
<azure.mgmt.compute.compute.v2016_04_30_preview.models.CachingTypes>`
:param disk_size_gb: The initial disk size in GB for blank data disks, and
the new desired size for existing OS and Data disks.
:type disk_size_gb: int
"""
_validation = {
'lun': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
}
def __init__(self, lun, snapshot=None, managed_disk=None, blob_uri=None, caching=None, disk_size_gb=None):
self.lun = lun
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
|
kailIII/geraldo
|
refs/heads/master
|
site/newsite/django_1_0/django/core/management/commands/runfcgi.py
|
674
|
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs this project as a FastCGI application. Requires flup."
args = '[various KEY=val options, use `runfcgi help` for help]'
def handle(self, *args, **options):
from django.conf import settings
from django.utils import translation
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
from django.core.servers.fastcgi import runfastcgi
runfastcgi(args)
def usage(self, subcommand):
from django.core.servers.fastcgi import FASTCGI_HELP
return FASTCGI_HELP
|
keisuke-umezawa/chainer
|
refs/heads/master
|
chainer/functions/array/get_item.py
|
8
|
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
from chainer import variable
import chainerx
_numpy_supports_0d_bool_index = \
numpy.lib.NumpyVersion(numpy.__version__) >= '1.13.0'
class GetItem(function_node.FunctionNode):
"""Function that slices array and extract elements."""
def __init__(self, slices):
if isinstance(slices, list):
if all([isinstance(s, int) for s in slices]):
slices = slices,
slices = tuple(slices)
elif not isinstance(slices, tuple):
slices = slices,
if chainer.is_debug():
n_ellipses = 0
for s in slices:
if s is Ellipsis:
n_ellipses += 1
if n_ellipses > 1:
raise ValueError('Only one Ellipsis is allowed')
self.slices = slices
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, xs):
slices = tuple([
backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s
for s in self.slices])
return utils.force_array(xs[0][slices]),
def backward(self, indexes, gy):
return GetItemGrad(
self.slices, self.inputs[0].shape).apply(gy)
class GetItemGrad(function_node.FunctionNode):
def __init__(self, slices, in_shape):
self.slices = slices
self._in_shape = in_shape
def forward(self, inputs):
slices = tuple([
backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s
for s in self.slices])
gy, = inputs
xp = backend.get_array_module(*inputs)
gx = xp.zeros(self._in_shape, gy.dtype)
if xp is numpy:
try:
numpy.add.at(gx, slices, gy)
except IndexError:
done = False
# In numpy<1.13, 0-dim boolean index is not supported in
# numpy.add.at and it's supported for 0-dim arr in
# arr.__getitem__.
if not _numpy_supports_0d_bool_index and len(slices) == 1:
idx = numpy.asanyarray(slices[0])
if idx.dtype == numpy.dtype(bool):
# Convert the array and the mask to 1-dim.
# numpy.add.at with them is supported in older numpy.
numpy.add.at(gx[None], idx[None], gy)
done = True
if not done:
msg = '''
GetItem does not support backward for this slices. The slices argument is not
supported by numpy.add.at, while it is supported by numpy.ndarray.__getitem__.
Please report this error to the issue tracker with the stack trace,
the information of your environment, and your script:
https://github.com/chainer/chainer/issues/new.
'''
raise IndexError(msg)
else:
gx.scatter_add(slices, inputs[0])
return gx,
def backward(self, indexes, ggx):
return GetItem(self.slices).apply(ggx)
def get_item(x, slices):
"""Extract elements from array with specified shape, axes and offsets.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable to be sliced.
slices (int, slice, Ellipsis, None, integer array-like, boolean\
array-like or tuple of them):
An object to specify the selection of elements.
Returns:
A :class:`~chainer.Variable` object which contains sliced array of
``x``.
.. note::
It only supports types that are supported by CUDA's atomicAdd when
an integer array is included in ``slices``.
The supported types are ``numpy.float32``, ``numpy.int32``,
``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``.
.. note::
It does not support ``slices`` that contains multiple boolean arrays.
.. note::
See NumPy documentation for details of `indexing
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.
.. admonition:: Example
>>> x = np.arange(12).reshape((2, 2, 3))
>>> x
array([[[ 0, 1, 2],
[ 3, 4, 5]],
<BLANKLINE>
[[ 6, 7, 8],
[ 9, 10, 11]]])
>>> F.get_item(x, 0)
variable([[0, 1, 2],
[3, 4, 5]])
>>> F.get_item(x, (0, 0, slice(0, 2, 1))) # equals x[0, 0, 0:2:1]
variable([0, 1])
>>> F.get_item(x, (Ellipsis, 2)) # equals x[..., 2]
variable([[ 2, 5],
[ 8, 11]])
>>> F.get_item(x, (1, np.newaxis, 1, 0)) # equals x[1, None, 1, 0]
variable([9])
"""
return GetItem(slices).apply((x,))[0]
def install_variable_get_item():
variable.Variable.__getitem__ = get_item
|
SurfasJones/icecream-info
|
refs/heads/master
|
icecream/lib/python2.7/site-packages/djangocms_file/__init__.py
|
279
|
__version__ = '0.0.1'
|
paulsmith/geodjango
|
refs/heads/master
|
django/contrib/gis/gdal/prototypes/errcheck.py
|
7
|
"""
This module houses the error-checking routines used by the GDAL
ctypes prototypes.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.gdal.error import check_err, OGRException, SRSException
from django.contrib.gis.gdal.libgdal import lgdal
# Helper routines for retrieving pointers and/or values from
# arguments passed in by reference.
def arg_byref(args, offset=-1):
"Returns the pointer argument's by-refernece value."
return args[offset]._obj.value
def ptr_byref(args, offset=-1):
"Returns the pointer argument passed in by-reference."
return args[offset]._obj
def check_bool(result, func, cargs):
"Returns the boolean evaluation of the value."
if bool(result): return True
else: return False
### String checking Routines ###
def check_const_string(result, func, cargs, offset=None):
"""
Similar functionality to `check_string`, but does not free the pointer.
"""
if offset:
check_err(result)
ptr = ptr_byref(cargs, offset)
return ptr.value
else:
return result
def check_string(result, func, cargs, offset=-1, str_result=False):
"""
Checks the string output returned from the given function, and frees
the string pointer allocated by OGR. The `str_result` keyword
may be used when the result is the string pointer, otherwise
the OGR error code is assumed. The `offset` keyword may be used
to extract the string pointer passed in by-reference at the given
slice offset in the function arguments.
"""
if str_result:
# For routines that return a string.
ptr = result
if not ptr: s = None
else: s = string_at(result)
else:
# Error-code return specified.
check_err(result)
ptr = ptr_byref(cargs, offset)
# Getting the string value
s = ptr.value
# Correctly freeing the allocated memory beind GDAL pointer
# w/the VSIFree routine.
if ptr: lgdal.VSIFree(ptr)
return s
### DataSource, Layer error-checking ###
### Envelope checking ###
def check_envelope(result, func, cargs, offset=-1):
"Checks a function that returns an OGR Envelope by reference."
env = ptr_byref(cargs, offset)
return env
### Geometry error-checking routines ###
def check_geom(result, func, cargs):
"Checks a function that returns a geometry."
# OGR_G_Clone may return an integer, even though the
# restype is set to c_void_p
if isinstance(result, int):
result = c_void_p(result)
if not result:
raise OGRException('Invalid geometry pointer returned from "%s".' % func.__name__)
return result
def check_geom_offset(result, func, cargs, offset=-1):
"Chcks the geometry at the given offset in the C parameter list."
check_err(result)
geom = ptr_byref(cargs, offset=offset)
return check_geom(geom, func, cargs)
### Spatial Reference error-checking routines ###
def check_srs(result, func, cargs):
if isinstance(result, int):
result = c_void_p(result)
if not result:
raise SRSException('Invalid spatial reference pointer returned from "%s".' % func.__name__)
return result
### Other error-checking routines ###
def check_arg_errcode(result, func, cargs):
"""
The error code is returned in the last argument, by reference.
Check its value with `check_err` before returning the result.
"""
check_err(arg_byref(cargs))
return result
def check_errcode(result, func, cargs):
"""
Check the error code returned (c_int).
"""
check_err(result)
return
def check_pointer(result, func, cargs):
"Makes sure the result pointer is valid."
if bool(result):
return result
else:
raise OGRException('Invalid pointer returned from "%s"' % func.__name__)
def check_str_arg(result, func, cargs):
"""
This is for the OSRGet[Angular|Linear]Units functions, which
require that the returned string pointer not be freed. This
returns both the double and tring values.
"""
dbl = result
ptr = cargs[-1]._obj
return dbl, ptr.value
|
cemoody/chainer
|
refs/heads/master
|
cupy/linalg/solve.py
|
20
|
# flake8: NOQA
# "flake8: NOQA" to suppress warning "H104 File contains nothing but comments"
# TODO(okuta): Implement solve
# TODO(okuta): Implement tensorsolve
# TODO(okuta): Implement lstsq
# TODO(okuta): Implement inv
# TODO(okuta): Implement pinv
# TODO(okuta): Implement tensorinv
|
QuantConnect/Lean
|
refs/heads/master
|
Algorithm.Python/InsightWeightingFrameworkAlgorithm.py
|
3
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Test algorithm using 'InsightWeightingPortfolioConstructionModel' and 'ConstantAlphaModel'
### generating a constant 'Insight' with a 0.25 weight
### </summary>
class InsightWeightingFrameworkAlgorithm(QCAlgorithm):
def Initialize(self):
''' Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
# Set requested data resolution
self.UniverseSettings.Resolution = Resolution.Minute
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
symbols = [ Symbol.Create("SPY", SecurityType.Equity, Market.USA) ]
# set algorithm framework models
self.SetUniverseSelection(ManualUniverseSelectionModel(symbols))
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(minutes = 20), 0.025, None, 0.25))
self.SetPortfolioConstruction(InsightWeightingPortfolioConstructionModel())
self.SetExecution(ImmediateExecutionModel())
def OnEndOfAlgorithm(self):
# holdings value should be 0.25 - to avoid price fluctuation issue we compare with 0.28 and 0.23
if (self.Portfolio.TotalHoldingsValue > self.Portfolio.TotalPortfolioValue * 0.28
or self.Portfolio.TotalHoldingsValue < self.Portfolio.TotalPortfolioValue * 0.23):
raise ValueError("Unexpected Total Holdings Value: " + str(self.Portfolio.TotalHoldingsValue))
|
jaggu303619/asylum-v2.0
|
refs/heads/master
|
openerp/addons/google_base_account/wizard/google_login.py
|
53
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
try:
import gdata.contacts.service
import gdata.contacts.client
import gdata.calendar.service
except ImportError:
raise osv.except_osv(_('Google Contacts Import Error!'), _('Please install gdata-python-client from http://code.google.com/p/gdata-python-client/downloads/list'))
class google_login(osv.osv_memory):
_description ='Google Contact'
_name = 'google.login'
_columns = {
'user': fields.char('Google Username', size=64, required=True),
'password': fields.char('Google Password', size=64),
}
def google_login(self, user, password, type='', context=None):
if type == 'group':
gd_client = gdata.contacts.service.ContactsService()
elif type == 'contact':
gd_client = gdata.contacts.service.ContactsService()
elif type == 'calendar':
gd_client = gdata.calendar.service.CalendarService()
elif type =='docs_client':
gd_client = gdata.docs.client.DocsClient()
else:
gd_client = gdata.contacts.service.ContactsService()
try:
gd_client.ClientLogin(user, password, gd_client.source)
except Exception:
return False
return gd_client
def default_get(self, cr, uid, fields, context=None):
res = super(google_login, self).default_get(cr, uid, fields, context=context)
user_obj = self.pool.get('res.users').browse(cr, uid, uid)
if 'user' in fields:
res.update({'user': user_obj.gmail_user})
if 'password' in fields:
res.update({'password': user_obj.gmail_password})
return res
def login(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids)[0]
user = data['user']
password = data['password']
if self.google_login(user, password):
res = {
'gmail_user': user,
'gmail_password': password
}
self.pool.get('res.users').write(cr, uid, uid, res, context=context)
else:
raise osv.except_osv(_('Error!'), _("Authentication failed. Check the user and password."))
return self._get_next_action(cr, uid, context=context)
def _get_next_action(self, cr, uid, context=None):
return {'type': 'ir.actions.act_window_close'}
google_login()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tersmitten/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/aws_direct_connect_connection.py
|
39
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_direct_connect_connection
short_description: Creates, deletes, modifies a DirectConnect connection
description:
- Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location.
Upon creation the connection may be added to a link aggregation group or established as a standalone connection.
The connection may later be associated or disassociated with a link aggregation group.
version_added: "2.4"
author: "Sloane Hertel (@s-hertel)"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
options:
state:
description:
- The state of the Direct Connect connection.
choices:
- present
- absent
name:
description:
- The name of the Direct Connect connection. This is required to create a
new connection. To recreate or delete a connection I(name) or I(connection_id)
is required.
connection_id:
description:
- The ID of the Direct Connect connection. I(name) or I(connection_id) is
required to recreate or delete a connection. Modifying attributes of a
connection with I(forced_update) will result in a new Direct Connect connection ID.
location:
description:
- Where the Direct Connect connection is located. Required when I(state=present).
bandwidth:
description:
- The bandwidth of the Direct Connect connection. Required when I(state=present).
choices:
- 1Gbps
- 10Gbps
link_aggregation_group:
description:
- The ID of the link aggregation group you want to associate with the connection.
This is optional in case a stand-alone connection is desired.
forced_update:
description:
- To modify bandwidth or location the connection will need to be deleted and recreated.
By default this will not happen - this option must be set to True.
type: bool
"""
EXAMPLES = """
# create a Direct Connect connection
- aws_direct_connect_connection:
name: ansible-test-connection
state: present
location: EqDC2
link_aggregation_group: dxlag-xxxxxxxx
bandwidth: 1Gbps
register: dc
# disassociate the LAG from the connection
- aws_direct_connect_connection:
state: present
connection_id: dc.connection.connection_id
location: EqDC2
bandwidth: 1Gbps
# replace the connection with one with more bandwidth
- aws_direct_connect_connection:
state: present
name: ansible-test-connection
location: EqDC2
bandwidth: 10Gbps
forced_update: True
# delete the connection
- aws_direct_connect_connection:
state: absent
name: ansible-test-connection
"""
RETURN = """
connection:
description: The attributes of the direct connect connection.
type: complex
returned: I(state=present)
contains:
aws_device:
description: The endpoint which the physical connection terminates on.
returned: when the requested state is no longer 'requested'
type: str
sample: EqDC2-12pmo7hemtz1z
bandwidth:
description: The bandwidth of the connection.
returned: always
type: str
sample: 1Gbps
connection_id:
description: The ID of the connection.
returned: always
type: str
sample: dxcon-ffy9ywed
connection_name:
description: The name of the connection.
returned: always
type: str
sample: ansible-test-connection
connection_state:
description: The state of the connection.
returned: always
type: str
sample: pending
loa_issue_time:
description: The issue time of the connection's Letter of Authorization - Connecting Facility Assignment.
returned: when the LOA-CFA has been issued (the connection state will no longer be 'requested')
type: str
sample: '2018-03-20T17:36:26-04:00'
location:
description: The location of the connection.
returned: always
type: str
sample: EqDC2
owner_account:
description: The account that owns the direct connect connection.
returned: always
type: str
sample: '123456789012'
region:
description: The region in which the connection exists.
returned: always
type: str
sample: us-east-1
"""
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry)
from ansible.module_utils.aws.direct_connect import (DirectConnectError, delete_connection,
associate_connection_and_lag, disassociate_connection_and_lag)
try:
from botocore.exceptions import BotoCoreError, ClientError
except Exception:
pass
# handled by imported AnsibleAWSModule
retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
def connection_status(client, connection_id):
return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False)
def connection_exists(client, connection_id=None, connection_name=None, verify=True):
params = {}
if connection_id:
params['connectionId'] = connection_id
try:
response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params)
except (BotoCoreError, ClientError) as e:
if connection_id:
msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
else:
msg = "Failed to describe DirectConnect connections"
raise DirectConnectError(msg=msg,
last_traceback=traceback.format_exc(),
exception=e)
match = []
connection = []
# look for matching connections
if len(response.get('connections', [])) == 1 and connection_id:
if response['connections'][0]['connectionState'] != 'deleted':
match.append(response['connections'][0]['connectionId'])
connection.extend(response['connections'])
for conn in response.get('connections', []):
if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
match.append(conn['connectionId'])
connection.append(conn)
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
return match[0]
elif verify:
return False
# not verifying if the connection exists; just return current connection info
elif len(connection) == 1:
return {'connection': connection[0]}
return {'connection': {}}
def create_connection(client, location, bandwidth, name, lag_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
params = {
'location': location,
'bandwidth': bandwidth,
'connectionName': name,
}
if lag_id:
params['lagId'] = lag_id
try:
connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params)
except (BotoCoreError, ClientError) as e:
raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
last_traceback=traceback.format_exc(),
exception=e)
return connection['connectionId']
def changed_properties(current_status, location, bandwidth):
current_bandwidth = current_status['bandwidth']
current_location = current_status['location']
return current_bandwidth != bandwidth or current_location != location
@AWSRetry.backoff(**retry_params)
def update_associations(client, latest_state, connection_id, lag_id):
changed = False
if 'lagId' in latest_state and lag_id != latest_state['lagId']:
disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
changed = True
if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
associate_connection_and_lag(client, connection_id, lag_id)
changed = True
return changed
def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
# the connection is found; get the latest state and see if it needs to be updated
if connection_id:
latest_state = connection_status(client, connection_id=connection_id)['connection']
if changed_properties(latest_state, location, bandwidth) and forced_update:
ensure_absent(client, connection_id)
return ensure_present(client=client,
connection_id=None,
connection_name=connection_name,
location=location,
bandwidth=bandwidth,
lag_id=lag_id,
forced_update=forced_update)
elif update_associations(client, latest_state, connection_id, lag_id):
return True, connection_id
# no connection found; create a new one
else:
return True, create_connection(client, location, bandwidth, connection_name, lag_id)
return False, connection_id
@AWSRetry.backoff(**retry_params)
def ensure_absent(client, connection_id):
changed = False
if connection_id:
delete_connection(client, connection_id)
changed = True
return changed
def main():
argument_spec = dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(),
location=dict(),
bandwidth=dict(choices=['1Gbps', '10Gbps']),
link_aggregation_group=dict(),
connection_id=dict(),
forced_update=dict(type='bool', default=False)
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
required_one_of=[('connection_id', 'name')],
required_if=[('state', 'present', ('location', 'bandwidth'))]
)
connection = module.client('directconnect')
state = module.params.get('state')
try:
connection_id = connection_exists(
connection,
connection_id=module.params.get('connection_id'),
connection_name=module.params.get('name')
)
if not connection_id and module.params.get('connection_id'):
module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
if state == 'present':
changed, connection_id = ensure_present(connection,
connection_id=connection_id,
connection_name=module.params.get('name'),
location=module.params.get('location'),
bandwidth=module.params.get('bandwidth'),
lag_id=module.params.get('link_aggregation_group'),
forced_update=module.params.get('forced_update'))
response = connection_status(connection, connection_id)
elif state == 'absent':
changed = ensure_absent(connection, connection_id)
response = {}
except DirectConnectError as e:
if e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
if __name__ == '__main__':
main()
|
stopstalk/stopstalk-deployment
|
refs/heads/master
|
aws_lambda/spoj_aws_lambda_function/lambda_code/lxml/html/diff.py
|
71
|
import difflib
from lxml import etree
from lxml.html import fragment_fromstring
import re
__all__ = ['html_annotate', 'htmldiff']
try:
from html import escape as html_escape
except ImportError:
from cgi import escape as html_escape
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
try:
basestring
except NameError:
# Python 3
basestring = str
############################################################
## Annotation
############################################################
def default_markup(text, version):
return '<span title="%s">%s</span>' % (
html_escape(_unicode(version), 1), text)
def html_annotate(doclist, markup=default_markup):
"""
doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(version1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span>
"""
# The basic strategy we have is to split the documents up into
# logical tokens (which are words with attached markup). We then
# do diffs of each of the versions to track when a token first
# appeared in the document; the annotation attached to the token
# is the version where it first appeared.
tokenlist = [tokenize_annotated(doc, version)
for doc, version in doclist]
cur_tokens = tokenlist[0]
for tokens in tokenlist[1:]:
html_annotate_merge_annotations(cur_tokens, tokens)
cur_tokens = tokens
# After we've tracked all the tokens, we can combine spans of text
# that are adjacent and have the same annotation
cur_tokens = compress_tokens(cur_tokens)
# And finally add markup
result = markup_serialize_tokens(cur_tokens, markup)
return ''.join(result).strip()
def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens
def html_annotate_merge_annotations(tokens_old, tokens_new):
"""Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document.
"""
s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
commands = s.get_opcodes()
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
eq_old = tokens_old[i1:i2]
eq_new = tokens_new[j1:j2]
copy_annotations(eq_old, eq_new)
def copy_annotations(src, dest):
"""
Copy annotations from the tokens listed in src to the tokens in dest
"""
assert len(src) == len(dest)
for src_tok, dest_tok in zip(src, dest):
dest_tok.annotation = src_tok.annotation
def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result
def compress_merge_back(tokens, tok):
""" Merge tok into the last element of tokens (modifying the list of
tokens in-place). """
last = tokens[-1]
if type(last) is not token or type(tok) is not token:
tokens.append(tok)
else:
text = _unicode(last)
if last.trailing_whitespace:
text += last.trailing_whitespace
text += tok
merged = token(text,
pre_tags=last.pre_tags,
post_tags=tok.post_tags,
trailing_whitespace=tok.trailing_whitespace)
merged.annotation = last.annotation
tokens[-1] = merged
def markup_serialize_tokens(tokens, markup_func):
"""
Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
html = token.html()
html = markup_func(html, token.annotation)
if token.trailing_whitespace:
html += token.trailing_whitespace
yield html
for post in token.post_tags:
yield post
############################################################
## HTML Diffs
############################################################
def htmldiff(old_html, new_html):
## FIXME: this should take parsed documents too, and use their body
## or other content.
""" Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes.
"""
old_html_tokens = tokenize(old_html)
new_html_tokens = tokenize(new_html)
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result)
def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result
def expand_tokens(tokens, equal=False):
"""Given a list of tokens, return a generator of the chunks of
text for the data in the tokens.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
if not equal or not token.hide_when_equal:
if token.trailing_whitespace:
yield token.html() + token.trailing_whitespace
else:
yield token.html()
for post in token.post_tags:
yield post
def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end)
# These are sentinals to represent the start and end of a <del>
# segment, until we do the cleanup phase to turn them into proper
# markup:
class DEL_START:
pass
class DEL_END:
pass
class NoDeletes(Exception):
""" Raised when the document no longer contains any pending deletes
(DEL_START/DEL_END) """
def merge_delete(del_chunks, doc):
""" Adds the text chunks in del_chunks to the document doc (another
list of text chunks) with marker to show it is a delete.
cleanup_delete later resolves these markers into <del> tags."""
doc.append(DEL_START)
doc.extend(del_chunks)
doc.append(DEL_END)
def cleanup_delete(chunks):
""" Cleans up any DEL_START/DEL_END markers in the document, replacing
them with <del></del>. To do this while keeping the document
valid, it may need to drop some tags (either start or end tags).
It may also move the del into adjacent tags to try to move it to a
similar location where it was originally located (e.g., moving a
delete into preceding <div> tag, if the del looks like (DEL_START,
'Text</div>', DEL_END)"""
while 1:
# Find a pending DEL_START/DEL_END, splitting the document
# into stuff-preceding-DEL_START, stuff-inside, and
# stuff-following-DEL_END
try:
pre_delete, delete, post_delete = split_delete(chunks)
except NoDeletes:
# Nothing found, we've cleaned up the entire doc
break
# The stuff-inside-DEL_START/END may not be well balanced
# markup. First we figure out what unbalanced portions there are:
unbalanced_start, balanced, unbalanced_end = split_unbalanced(delete)
# Then we move the span forward and/or backward based on these
# unbalanced portions:
locate_unbalanced_start(unbalanced_start, pre_delete, post_delete)
locate_unbalanced_end(unbalanced_end, pre_delete, post_delete)
doc = pre_delete
if doc and not doc[-1].endswith(' '):
# Fix up case where the word before us didn't have a trailing space
doc[-1] += ' '
doc.append('<del>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </del>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</del> ')
doc.extend(post_delete)
chunks = doc
return chunks
def split_unbalanced(chunks):
"""Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks."""
start = []
end = []
tag_stack = []
balanced = []
for chunk in chunks:
if not chunk.startswith('<'):
balanced.append(chunk)
continue
endtag = chunk[1] == '/'
name = chunk.split()[0].strip('<>/')
if name in empty_tags:
balanced.append(chunk)
continue
if endtag:
if tag_stack and tag_stack[-1][0] == name:
balanced.append(chunk)
name, pos, tag = tag_stack.pop()
balanced[pos] = tag
elif tag_stack:
start.extend([tag for name, pos, tag in tag_stack])
tag_stack = []
end.append(chunk)
else:
end.append(chunk)
else:
tag_stack.append((name, len(balanced), chunk))
balanced.append(None)
start.extend(
[chunk for name, pos, chunk in tag_stack])
balanced = [chunk for chunk in balanced if chunk is not None]
return start, balanced, end
def split_delete(chunks):
""" Returns (stuff_before_DEL_START, stuff_inside_DEL_START_END,
stuff_after_DEL_END). Returns the first case found (there may be
more DEL_STARTs in stuff_after_DEL_END). Raises NoDeletes if
there's no DEL_START found. """
try:
pos = chunks.index(DEL_START)
except ValueError:
raise NoDeletes
pos2 = chunks.index(DEL_END)
return chunks[:pos], chunks[pos+1:pos2], chunks[pos2+1:]
def locate_unbalanced_start(unbalanced_start, pre_delete, post_delete):
""" pre_delete and post_delete implicitly point to a place in the
document (where the two were split). This moves that point (by
popping items from one and pushing them onto the other). It moves
the point to try to find a place where unbalanced_start applies.
As an example::
>>> unbalanced_start = ['<div>']
>>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>']
>>> pre, post = doc[:3], doc[3:]
>>> pre, post
(['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>'])
>>> locate_unbalanced_start(unbalanced_start, pre, post)
>>> pre, post
(['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>'])
As you can see, we moved the point so that the dangling <div> that
we found will be effectively replaced by the div in the original
document. If this doesn't work out, we just throw away
unbalanced_start without doing anything.
"""
while 1:
if not unbalanced_start:
# We have totally succeded in finding the position
break
finding = unbalanced_start[0]
finding_name = finding.split()[0].strip('<>')
if not post_delete:
break
next = post_delete[0]
if next is DEL_START or not next.startswith('<'):
# Reached a word, we can't move the delete text forward
break
if next[1] == '/':
# Reached a closing tag, can we go further? Maybe not...
break
name = next.split()[0].strip('<>')
if name == 'ins':
# Can't move into an insert
break
assert name != 'del', (
"Unexpected delete tag: %r" % next)
if name == finding_name:
unbalanced_start.pop(0)
pre_delete.append(post_delete.pop(0))
else:
# Found a tag that doesn't match
break
def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete):
""" like locate_unbalanced_start, except handling end tags and
possibly moving the point earlier in the document. """
while 1:
if not unbalanced_end:
# Success
break
finding = unbalanced_end[-1]
finding_name = finding.split()[0].strip('<>/')
if not pre_delete:
break
next = pre_delete[-1]
if next is DEL_END or not next.startswith('</'):
# A word or a start tag
break
name = next.split()[0].strip('<>/')
if name == 'ins' or name == 'del':
# Can't move into an insert or delete
break
if name == finding_name:
unbalanced_end.pop()
post_delete.insert(0, pre_delete.pop())
else:
# Found a tag that doesn't match
break
class token(_unicode):
""" Represents a diffable token, generally a word that is displayed to
the user. Opening tags are attached to this token when they are
adjacent (pre_tags) and closing tags that follow the word
(post_tags). Some exceptions occur when there are empty tags
adjacent to a word, so there may be close tags in pre_tags, or
open tags in post_tags.
We also keep track of whether the word was originally followed by
whitespace, even though we do not want to treat the word as
equivalent to a similar word that does not have a trailing
space."""
# When this is true, the token will be eliminated from the
# displayed diff if no change has occurred:
hide_when_equal = False
def __new__(cls, text, pre_tags=None, post_tags=None, trailing_whitespace=""):
obj = _unicode.__new__(cls, text)
if pre_tags is not None:
obj.pre_tags = pre_tags
else:
obj.pre_tags = []
if post_tags is not None:
obj.post_tags = post_tags
else:
obj.post_tags = []
obj.trailing_whitespace = trailing_whitespace
return obj
def __repr__(self):
return 'token(%s, %r, %r, %r)' % (_unicode.__repr__(self), self.pre_tags,
self.post_tags, self.trailing_whitespace)
def html(self):
return _unicode(self)
class tag_token(token):
""" Represents a token that is actually a tag. Currently this is just
the <img> tag, which takes up visible space just like a word but
is only represented in a document by a tag. """
def __new__(cls, tag, data, html_repr, pre_tags=None,
post_tags=None, trailing_whitespace=""):
obj = token.__new__(cls, "%s: %s" % (type, data),
pre_tags=pre_tags,
post_tags=post_tags,
trailing_whitespace=trailing_whitespace)
obj.tag = tag
obj.data = data
obj.html_repr = html_repr
return obj
def __repr__(self):
return 'tag_token(%s, %s, html_repr=%s, post_tags=%r, pre_tags=%r, trailing_whitespace=%r)' % (
self.tag,
self.data,
self.html_repr,
self.pre_tags,
self.post_tags,
self.trailing_whitespace)
def html(self):
return self.html_repr
class href_token(token):
""" Represents the href in an anchor tag. Unlike other words, we only
show the href when it changes. """
hide_when_equal = True
def html(self):
return ' Link: %s' % self
def tokenize(html, include_hrefs=True):
"""
Parse the given HTML and returns token objects (words with attached tags).
This parses only the content of a page; anything in the head is
ignored, and the <head> and <body> elements are themselves
optional. The content is then parsed by lxml, which ensures the
validity of the resulting parsed document (though lxml may make
incorrect guesses when the markup is particular bad).
<ins> and <del> tags are also eliminated from the document, as
that gets confusing.
If include_hrefs is true, then the href attribute of <a> tags is
included as a special kind of diffable token."""
if etree.iselement(html):
body_el = html
else:
body_el = parse_html(html, cleanup=True)
# Then we split the document into text chunks for each tag, word, and end tag:
chunks = flatten_el(body_el, skip_tag=True, include_hrefs=include_hrefs)
# Finally re-joining them into token objects:
return fixup_chunks(chunks)
def parse_html(html, cleanup=True):
"""
Parses an HTML fragment, returning an lxml element. Note that the HTML will be
wrapped in a <div> tag that was not in the original document.
If cleanup is true, make sure there's no <head> or <body>, and get
rid of any <ins> and <del> tags.
"""
if cleanup:
# This removes any extra markup or structure like <head>:
html = cleanup_html(html)
return fragment_fromstring(html, create_parent=True)
_body_re = re.compile(r'<body.*?>', re.I|re.S)
_end_body_re = re.compile(r'</body.*?>', re.I|re.S)
_ins_del_re = re.compile(r'</?(ins|del).*?>', re.I|re.S)
def cleanup_html(html):
""" This 'cleans' the HTML, meaning that any page structure is removed
(only the contents of <body> are used, if there is any <body).
Also <ins> and <del> tags are removed. """
match = _body_re.search(html)
if match:
html = html[match.end():]
match = _end_body_re.search(html)
if match:
html = html[:match.start()]
html = _ins_del_re.sub('', html)
return html
end_whitespace_re = re.compile(r'[ \t\n\r]$')
def split_trailing_whitespace(word):
"""
This function takes a word, such as 'test\n\n' and returns ('test','\n\n')
"""
stripped_length = len(word.rstrip())
return word[0:stripped_length], word[stripped_length:]
def fixup_chunks(chunks):
"""
This function takes a list of chunks and produces a list of tokens.
"""
tag_accum = []
cur_word = None
result = []
for chunk in chunks:
if isinstance(chunk, tuple):
if chunk[0] == 'img':
src = chunk[1]
tag, trailing_whitespace = split_trailing_whitespace(chunk[2])
cur_word = tag_token('img', src, html_repr=tag,
pre_tags=tag_accum,
trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif chunk[0] == 'href':
href = chunk[1]
cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=" ")
tag_accum = []
result.append(cur_word)
continue
if is_word(chunk):
chunk, trailing_whitespace = split_trailing_whitespace(chunk)
cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace)
tag_accum = []
result.append(cur_word)
elif is_start_tag(chunk):
tag_accum.append(chunk)
elif is_end_tag(chunk):
if tag_accum:
tag_accum.append(chunk)
else:
assert cur_word, (
"Weird state, cur_word=%r, result=%r, chunks=%r of %r"
% (cur_word, result, chunk, chunks))
cur_word.post_tags.append(chunk)
else:
assert(0)
if not result:
return [token('', pre_tags=tag_accum)]
else:
result[-1].post_tags.extend(tag_accum)
return result
# All the tags in HTML that don't require end tags:
empty_tags = (
'param', 'img', 'area', 'br', 'basefont', 'input',
'base', 'meta', 'link', 'col')
block_level_tags = (
'address',
'blockquote',
'center',
'dir',
'div',
'dl',
'fieldset',
'form',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'isindex',
'menu',
'noframes',
'noscript',
'ol',
'p',
'pre',
'table',
'ul',
)
block_level_container_tags = (
'dd',
'dt',
'frameset',
'li',
'tbody',
'td',
'tfoot',
'th',
'thead',
'tr',
)
def flatten_el(el, include_hrefs, skip_tag=False):
""" Takes an lxml element el, and generates all the text chunks for
that tag. Each start tag is a chunk, each word is a chunk, and each
end tag is a chunk.
If skip_tag is true, then the outermost container tag is
not returned (just its contents)."""
if not skip_tag:
if el.tag == 'img':
yield ('img', el.get('src'), start_tag(el))
else:
yield start_tag(el)
if el.tag in empty_tags and not el.text and not len(el) and not el.tail:
return
start_words = split_words(el.text)
for word in start_words:
yield html_escape(word)
for child in el:
for item in flatten_el(child, include_hrefs=include_hrefs):
yield item
if el.tag == 'a' and el.get('href') and include_hrefs:
yield ('href', el.get('href'))
if not skip_tag:
yield end_tag(el)
end_words = split_words(el.tail)
for word in end_words:
yield html_escape(word)
split_words_re = re.compile(r'\S+(?:\s+|$)', re.U)
def split_words(text):
""" Splits some text into words. Includes trailing whitespace
on each word when appropriate. """
if not text or not text.strip():
return []
words = split_words_re.findall(text)
return words
start_whitespace_re = re.compile(r'^[ \t\n\r]')
def start_tag(el):
"""
The text representation of the start tag for a tag.
"""
return '<%s%s>' % (
el.tag, ''.join([' %s="%s"' % (name, html_escape(value, True))
for name, value in el.attrib.items()]))
def end_tag(el):
""" The text representation of an end tag for a tag. Includes
trailing whitespace when appropriate. """
if el.tail and start_whitespace_re.search(el.tail):
extra = ' '
else:
extra = ''
return '</%s>%s' % (el.tag, extra)
def is_word(tok):
return not tok.startswith('<')
def is_end_tag(tok):
return tok.startswith('</')
def is_start_tag(tok):
return tok.startswith('<') and not tok.startswith('</')
def fixup_ins_del_tags(html):
""" Given an html string, move any <ins> or <del> tags inside of any
block-level elements, e.g. transform <ins><p>word</p></ins> to
<p><ins>word</ins></p> """
doc = parse_html(html, cleanup=False)
_fixup_ins_del_tags(doc)
html = serialize_html_fragment(doc, skip_outer=True)
return html
def serialize_html_fragment(el, skip_outer=False):
""" Serialize a single lxml element as HTML. The serialized form
includes the elements tail.
If skip_outer is true, then don't serialize the outermost tag
"""
assert not isinstance(el, basestring), (
"You should pass in an element, not a string like %r" % el)
html = etree.tostring(el, method="html", encoding=_unicode)
if skip_outer:
# Get rid of the extra starting tag:
html = html[html.find('>')+1:]
# Get rid of the extra end tag:
html = html[:html.rfind('<')]
return html.strip()
else:
return html
def _fixup_ins_del_tags(doc):
"""fixup_ins_del_tags that works on an lxml document in-place
"""
for tag in ['ins', 'del']:
for el in doc.xpath('descendant-or-self::%s' % tag):
if not _contains_block_level_tag(el):
continue
_move_el_inside_block(el, tag=tag)
el.drop_tag()
#_merge_element_contents(el)
def _contains_block_level_tag(el):
"""True if the element contains any block-level elements, like <p>, <td>, etc.
"""
if el.tag in block_level_tags or el.tag in block_level_container_tags:
return True
for child in el:
if _contains_block_level_tag(child):
return True
return False
def _move_el_inside_block(el, tag):
""" helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
and moves them inside any block-level tags. """
for child in el:
if _contains_block_level_tag(child):
break
else:
import sys
# No block-level tags in any child
children_tag = etree.Element(tag)
children_tag.text = el.text
el.text = None
children_tag.extend(list(el))
el[:] = [children_tag]
return
for child in list(el):
if _contains_block_level_tag(child):
_move_el_inside_block(child, tag)
if child.tail:
tail_tag = etree.Element(tag)
tail_tag.text = child.tail
child.tail = None
el.insert(el.index(child)+1, tail_tag)
else:
child_tag = etree.Element(tag)
el.replace(child, child_tag)
child_tag.append(child)
if el.text:
text_tag = etree.Element(tag)
text_tag.text = el.text
el.text = None
el.insert(0, text_tag)
def _merge_element_contents(el):
"""
Removes an element, but merges its contents into its place, e.g.,
given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
<p>Hi there!</p>
"""
parent = el.getparent()
text = el.text or ''
if el.tail:
if not len(el):
text += el.tail
else:
if el[-1].tail:
el[-1].tail += el.tail
else:
el[-1].tail = el.tail
index = parent.index(el)
if text:
if index == 0:
previous = None
else:
previous = parent[index-1]
if previous is None:
if parent.text:
parent.text += text
else:
parent.text = text
else:
if previous.tail:
previous.tail += text
else:
previous.tail = text
parent[index:index+1] = el.getchildren()
class InsensitiveSequenceMatcher(difflib.SequenceMatcher):
"""
Acts like SequenceMatcher, but tries not to find very small equal
blocks amidst large spans of changes
"""
threshold = 2
def get_matching_blocks(self):
size = min(len(self.b), len(self.b))
threshold = min(self.threshold, size / 4)
actual = difflib.SequenceMatcher.get_matching_blocks(self)
return [item for item in actual
if item[2] > threshold
or not item[2]]
if __name__ == '__main__':
from lxml.html import _diffcommand
_diffcommand.main()
|
teodoc/home-assistant
|
refs/heads/master
|
homeassistant/components/sensor/systemmonitor.py
|
8
|
"""
homeassistant.components.sensor.systemmonitor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Shows system monitor values such as: disk, memory and processor use
Configuration:
To use the System monitor sensor you will need to add something like the
following to your config/configuration.yaml
sensor:
platform: systemmonitor
resources:
- type: 'disk_use_percent'
arg: '/'
- type: 'disk_use'
arg: '/home'
- type: 'disk_free'
arg: '/'
- type: 'memory_use_percent'
- type: 'memory_use'
- type: 'memory_free'
- type: 'swap_use_percent'
- type: 'swap_use'
- type: 'swap_free'
- type: 'network_in'
arg: 'eth0'
- type: 'network_out'
arg: 'eth0'
- type: 'packets_in'
arg: 'eth0'
- type: 'packets_out'
arg: 'eth0'
- type: 'ipv4_address'
arg: 'eth0'
- type: 'ipv6_address'
arg: 'eth0'
- type: 'processor_use'
- type: 'process'
arg: 'octave-cli'
- type: 'last_boot'
- type: 'since_last_boot'
Variables:
resources
*Required
An array specifying the variables to monitor.
These are the variables for the resources array:
type
*Required
The variable you wish to monitor, see the configuration example above for a
sample list of variables.
arg
*Optional
Additional details for the type, eg. path, binary name, etc.
"""
import logging
import psutil
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
from homeassistant.const import STATE_ON, STATE_OFF
REQUIREMENTS = ['psutil>=3.0.0']
SENSOR_TYPES = {
'disk_use_percent': ['Disk Use', '%'],
'disk_use': ['Disk Use', 'GiB'],
'disk_free': ['Disk Free', 'GiB'],
'memory_use_percent': ['RAM Use', '%'],
'memory_use': ['RAM Use', 'MiB'],
'memory_free': ['RAM Free', 'MiB'],
'processor_use': ['CPU Use', '%'],
'process': ['Process', ''],
'swap_use_percent': ['Swap Use', '%'],
'swap_use': ['Swap Use', 'GiB'],
'swap_free': ['Swap Free', 'GiB'],
'network_out': ['Sent', 'MiB'],
'network_in': ['Recieved', 'MiB'],
'packets_out': ['Packets sent', ''],
'packets_in': ['Packets recieved', ''],
'ipv4_address': ['IPv4 address', ''],
'ipv6_address': ['IPv6 address', ''],
'last_boot': ['Last Boot', ''],
'since_last_boot': ['Since Last Boot', '']
}
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the sensors. """
dev = []
for resource in config['resources']:
if 'arg' not in resource:
resource['arg'] = ''
if resource['type'] not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', resource['type'])
else:
dev.append(SystemMonitorSensor(resource['type'], resource['arg']))
add_devices(dev)
class SystemMonitorSensor(Entity):
""" A system monitor sensor. """
def __init__(self, sensor_type, argument=''):
self._name = SENSOR_TYPES[sensor_type][0] + ' ' + argument
self.argument = argument
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
return self._name
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
# pylint: disable=too-many-branches
def update(self):
if self.type == 'disk_use_percent':
self._state = psutil.disk_usage(self.argument).percent
elif self.type == 'disk_use':
self._state = round(psutil.disk_usage(self.argument).used /
1024**3, 1)
elif self.type == 'disk_free':
self._state = round(psutil.disk_usage(self.argument).free /
1024**3, 1)
elif self.type == 'memory_use_percent':
self._state = psutil.virtual_memory().percent
elif self.type == 'memory_use':
self._state = round((psutil.virtual_memory().total -
psutil.virtual_memory().available) /
1024**2, 1)
elif self.type == 'memory_free':
self._state = round(psutil.virtual_memory().available / 1024**2, 1)
elif self.type == 'swap_use_percent':
self._state = psutil.swap_memory().percent
elif self.type == 'swap_use':
self._state = round(psutil.swap_memory().used / 1024**3, 1)
elif self.type == 'swap_free':
self._state = round(psutil.swap_memory().free / 1024**3, 1)
elif self.type == 'processor_use':
self._state = round(psutil.cpu_percent(interval=None))
elif self.type == 'process':
if any(self.argument in l.name() for l in psutil.process_iter()):
self._state = STATE_ON
else:
self._state = STATE_OFF
elif self.type == 'network_out':
self._state = round(psutil.net_io_counters(pernic=True)
[self.argument][0] / 1024**2, 1)
elif self.type == 'network_in':
self._state = round(psutil.net_io_counters(pernic=True)
[self.argument][1] / 1024**2, 1)
elif self.type == 'packets_out':
self._state = psutil.net_io_counters(pernic=True)[self.argument][2]
elif self.type == 'packets_in':
self._state = psutil.net_io_counters(pernic=True)[self.argument][3]
elif self.type == 'ipv4_address':
self._state = psutil.net_if_addrs()[self.argument][0][1]
elif self.type == 'ipv6_address':
self._state = psutil.net_if_addrs()[self.argument][1][1]
elif self.type == 'last_boot':
self._state = dt_util.datetime_to_date_str(
dt_util.as_local(
dt_util.utc_from_timestamp(psutil.boot_time())))
elif self.type == 'since_last_boot':
self._state = dt_util.utcnow() - dt_util.utc_from_timestamp(
psutil.boot_time())
|
mumuwoyou/vnpy-master
|
refs/heads/master
|
vnpy/trader/app/spreadTrading/stAlgo.py
|
4
|
# encoding: UTF-8
from math import floor
from vnpy.trader.vtConstant import (EMPTY_INT, EMPTY_FLOAT,
EMPTY_STRING, EMPTY_UNICODE,
DIRECTION_LONG, DIRECTION_SHORT,
STATUS_ALLTRADED, STATUS_CANCELLED, STATUS_REJECTED)
########################################################################
class StAlgoTemplate(object):
"""价差算法交易模板"""
MODE_LONGSHORT = u'双向'
MODE_LONGONLY = u'做多'
MODE_SHORTONLY = u'做空'
SPREAD_LONG = 1
SPREAD_SHORT = 2
#----------------------------------------------------------------------
def __init__(self, algoEngine, spread):
"""Constructor"""
self.algoEngine = algoEngine # 算法引擎
self.spreadName = spread.name # 价差名称
self.spread = spread # 价差对象
self.algoName = EMPTY_STRING # 算法名称
self.active = False # 工作状态
self.mode = self.MODE_LONGSHORT # 工作模式
self.buyPrice = EMPTY_FLOAT # 开平仓价格
self.sellPrice = EMPTY_FLOAT
self.shortPrice = EMPTY_FLOAT
self.coverPrice = EMPTY_FLOAT
self.maxPosSize = EMPTY_INT # 最大单边持仓量
self.maxOrderSize = EMPTY_INT # 最大单笔委托量
#----------------------------------------------------------------------
def updateSpreadTick(self, spread):
""""""
raise NotImplementedError
#----------------------------------------------------------------------
def updateSpreadPos(self, spread):
""""""
raise NotImplementedError
#----------------------------------------------------------------------
def updateTrade(self, trade):
""""""
raise NotImplementedError
#----------------------------------------------------------------------
def updateOrder(self, order):
""""""
raise NotImplementedError
#----------------------------------------------------------------------
def updateTimer(self):
""""""
raise NotImplementedError
#----------------------------------------------------------------------
def start(self):
""""""
raise NotImplementedError
#----------------------------------------------------------------------
def stop(self):
""""""
raise NotImplementedError
#----------------------------------------------------------------------
def setBuyPrice(self, buyPrice):
"""设置买开的价格"""
self.buyPrice = buyPrice
#----------------------------------------------------------------------
def setSellPrice(self, sellPrice):
"""设置卖平的价格"""
self.sellPrice = sellPrice
#----------------------------------------------------------------------
def setShortPrice(self, shortPrice):
"""设置卖开的价格"""
self.shortPrice = shortPrice
#----------------------------------------------------------------------
def setCoverPrice(self, coverPrice):
"""设置买平的价格"""
self.coverPrice = coverPrice
#----------------------------------------------------------------------
def setMode(self, mode):
"""设置算法交易方向"""
self.mode = mode
#----------------------------------------------------------------------
def setMaxOrderSize(self, maxOrderSize):
"""设置最大单笔委托数量"""
self.maxOrderSize = maxOrderSize
#----------------------------------------------------------------------
def setMaxPosSize(self, maxPosSize):
"""设置最大持仓数量"""
self.maxPosSize = maxPosSize
#----------------------------------------------------------------------
def putEvent(self):
"""发出算法更新事件"""
self.algoEngine.putAlgoEvent(self)
#----------------------------------------------------------------------
def writeLog(self, content):
"""输出算法日志"""
prefix = ' '.join([self.spreadName, self.algoName])
content = ':'.join([prefix, content])
self.algoEngine.writeLog(content)
#----------------------------------------------------------------------
def getAlgoParams(self):
"""获取算法参数"""
d = {
"spreadName": self.spreadName,
"algoName": self.algoName,
"buyPrice": self.buyPrice,
"sellPrice": self.sellPrice,
"shortPrice": self.shortPrice,
"coverPrice": self.coverPrice,
"maxOrderSize": self.maxOrderSize,
"maxPosSize": self.maxPosSize,
"mode": self.mode
}
return d
#----------------------------------------------------------------------
def setAlgoParams(self, d):
"""设置算法参数"""
self.buyPrice = d.get('buyPrice', EMPTY_FLOAT)
self.sellPrice = d.get('sellPrice', EMPTY_FLOAT)
self.shortPrice = d.get('shortPrice', EMPTY_FLOAT)
self.coverPrice = d.get('coverPrice', EMPTY_FLOAT)
self.maxOrderSize = d.get('maxOrderSize', EMPTY_INT)
self.maxPosSize = d.get('maxPosSize', EMPTY_INT)
self.mode = d.get('mode', self.MODE_LONGSHORT)
########################################################################
class SniperAlgo(StAlgoTemplate):
"""狙击算法(市价委托)"""
FINISHED_STATUS = [STATUS_ALLTRADED, STATUS_CANCELLED, STATUS_REJECTED]
#----------------------------------------------------------------------
def __init__(self, algoEngine, spread):
"""Constructor"""
super(SniperAlgo, self).__init__(algoEngine, spread)
self.algoName = u'Sniper'
self.quoteInterval = 2 # 主动腿报价撤单再发前等待的时间
self.quoteCount = 0 # 报价计数
self.hedgeInterval = 2 # 对冲腿对冲撤单再发前的等待时间
self.hedgeCount = 0 # 对冲计数
self.activeVtSymbol = spread.activeLeg.vtSymbol # 主动腿代码
self.passiveVtSymbols = [leg.vtSymbol for leg in spread.passiveLegs] # 被动腿代码列表
# 缓存每条腿对象的字典
self.legDict = {}
self.legDict[spread.activeLeg.vtSymbol] = spread.activeLeg
for leg in spread.passiveLegs:
self.legDict[leg.vtSymbol] = leg
self.hedgingTaskDict = {} # 被动腿需要对冲的数量字典 vtSymbol:volume
self.legOrderDict = {} # vtSymbol: list of vtOrderID
self.orderTradedDict = {} # vtOrderID: tradedVolume
#----------------------------------------------------------------------
def updateSpreadTick(self, spread):
"""价差行情更新"""
self.spread = spread
# 若算法没有启动则直接返回
if not self.active:
return
# 若当前已有主动腿委托则直接返回
if (self.activeVtSymbol in self.legOrderDict and
self.legOrderDict[self.activeVtSymbol]):
return
# 允许做多
if self.mode == self.MODE_LONGSHORT or self.mode == self.MODE_LONGONLY:
# 买入
if (spread.netPos >= 0 and
spread.netPos < self.maxPosSize and
spread.askPrice <= self.buyPrice):
self.quoteActiveLeg(self.SPREAD_LONG)
self.writeLog(u'买入开仓')
# 卖出
elif (spread.netPos > 0 and
spread.bidPrice >= self.sellPrice):
self.quoteActiveLeg(self.SPREAD_SHORT)
self.writeLog(u'卖出平仓')
# 允许做空
if self.mode == self.MODE_LONGSHORT or self.mode == self.MODE_SHORTONLY:
# 做空
if (spread.netPos <= 0 and
spread.netPos > -self.maxPosSize and
spread.bidPrice >= self.shortPrice):
self.quoteActiveLeg(self.SPREAD_SHORT)
self.writeLog(u'卖出开仓')
# 平空
elif (spread.netPos < 0 and
spread.askPrice <= self.coverPrice):
self.quoteActiveLeg(self.SPREAD_LONG)
self.writeLog(u'买入平仓')
#----------------------------------------------------------------------
def updateSpreadPos(self, spread):
"""价差持仓更新"""
self.spread = spread
#----------------------------------------------------------------------
def updateTrade(self, trade):
"""成交更新"""
pass
#----------------------------------------------------------------------
def updateOrder(self, order):
"""委托更新"""
if not self.active:
return
vtOrderID = order.vtOrderID
vtSymbol = order.vtSymbol
newTradedVolume = order.tradedVolume
lastTradedVolume = self.orderTradedDict.get(vtOrderID, 0)
# 检查是否有新的成交
if newTradedVolume > lastTradedVolume:
self.orderTradedDict[vtOrderID] = newTradedVolume # 缓存委托已经成交数量
volume = newTradedVolume - lastTradedVolume # 计算本次成交数量
if vtSymbol == self.activeVtSymbol:
self.newActiveLegTrade(vtSymbol, order.direction, volume)
else:
self.newPassiveLegTrade(vtSymbol, order.direction, volume)
# 处理完成委托
if order.status in self.FINISHED_STATUS:
vtOrderID = order.vtOrderID
vtSymbol = order.vtSymbol
# 从委托列表中移除该委托
orderList = self.legOrderDict.get(vtSymbol, None)
if orderList and vtOrderID in orderList:
orderList.remove(vtOrderID)
# 检查若是被动腿,且已经没有未完成委托,则执行对冲
if not orderList and vtSymbol in self.passiveVtSymbols:
self.hedgePassiveLeg(vtSymbol)
#----------------------------------------------------------------------
def updateTimer(self):
"""计时更新"""
if not self.active:
return
self.quoteCount += 1
self.hedgeCount += 1
# 计时到达报价间隔后,则对尚未成交的主动腿委托全部撤单
# 收到撤单回报后清空委托列表,等待下次价差更新再发单
if self.quoteCount > self.quoteInterval:
self.cancelLegOrder(self.activeVtSymbol)
self.quoteCount = 0
# 计时到达对冲间隔后,则对尚未成交的全部被动腿委托全部撤单
# 收到撤单回报后,会自动发送新的对冲委托
if self.hedgeCount > self.hedgeInterval:
self.cancelAllPassiveLegOrders()
self.hedgeCount = 0
#----------------------------------------------------------------------
def checkPrice(self):
"""检查价格"""
# 做多检查
if self.mode != self.MODE_SHORTONLY:
if self.buyPrice >= self.sellPrice:
self.writeLog(u'启动失败,允许多头交易时BuyPrice必须小于SellPrice')
return False
# 做空检查
if self.mode != self.MODE_LONGONLY:
if self.shortPrice <= self.coverPrice:
self.writeLog(u'启动失败,允许空头交易时ShortPrice必须大于CoverPrice')
return False
# 多空检查
if self.mode == self.MODE_LONGSHORT:
if self.buyPrice >= self.coverPrice:
self.writeLog(u'启动失败,允许双向交易时BuyPrice必须小于CoverPrice')
return False
if self.shortPrice <= self.sellPrice:
self.writeLog(u'启动失败,允许双向交易时ShortPrice必须大于SellPrice')
return False
return True
#----------------------------------------------------------------------
def start(self):
"""启动"""
# 如果已经运行则直接返回状态
if self.active:
return self.active
# 检查价格安全性
if not self.checkPrice():
return False
# 启动算法
self.quoteCount = 0
self.hedgeCount = 0
self.active = True
self.writeLog(u'算法启动')
return self.active
#----------------------------------------------------------------------
def stop(self):
"""停止"""
if self.active:
self.hedgingTaskDict.clear()
self.cancelAllOrders()
self.active = False
self.writeLog(u'算法停止')
return self.active
#----------------------------------------------------------------------
def sendLegOrder(self, leg, legVolume):
"""发送每条腿的委托"""
vtSymbol = leg.vtSymbol
volume = abs(legVolume)
payup = leg.payup
# 发送委托
if legVolume > 0:
price = leg.askPrice
if leg.shortPos > 0:
orderList = self.algoEngine.cover(vtSymbol, price, volume, payup)
else:
orderList = self.algoEngine.buy(vtSymbol, price, volume, payup)
elif legVolume < 0:
price = leg.bidPrice
if leg.longPos > 0:
orderList = self.algoEngine.sell(vtSymbol, price, volume, payup)
else:
orderList = self.algoEngine.short(vtSymbol, price, volume, payup)
# 保存到字典中
if vtSymbol not in self.legOrderDict:
self.legOrderDict[vtSymbol] = orderList
else:
self.legOrderDict[vtSymbol].extend(orderList)
#----------------------------------------------------------------------
def quoteActiveLeg(self, direction):
"""发出主动腿"""
spread = self.spread
# 首先计算不带正负号的价差委托量
if direction == self.SPREAD_LONG:
spreadVolume = min(spread.askVolume,
self.maxPosSize - spread.netPos,
self.maxOrderSize)
# 有价差空头持仓的情况下,则本次委托最多平完空头
if spread.shortPos > 0:
spreadVolume = min(spreadVolume, spread.shortPos)
else:
spreadVolume = min(spread.bidVolume,
self.maxPosSize + spread.netPos,
self.maxOrderSize)
# 有价差多头持仓的情况下,则本次委托最多平完多头
if spread.longPos > 0:
spreadVolume = min(spreadVolume, spread.longPos)
if spreadVolume <= 0:
return
# 加上价差方向
if direction == self.SPREAD_SHORT:
spreadVolume = -spreadVolume
# 计算主动腿委托量
leg = self.legDict[self.activeVtSymbol]
legVolume = spreadVolume * leg.ratio
self.sendLegOrder(leg, legVolume)
self.writeLog(u'发出新的主动腿%s狙击单' %self.activeVtSymbol)
self.quoteCount = 0 # 重置主动腿报价撤单等待计数
#----------------------------------------------------------------------
def hedgePassiveLeg(self, vtSymbol):
"""被动腿对冲"""
if vtSymbol not in self.hedgingTaskDict:
return
orderList = self.legOrderDict.get(vtSymbol, [])
if orderList:
return
legVolume = self.hedgingTaskDict[vtSymbol]
leg = self.legDict[vtSymbol]
self.sendLegOrder(leg, legVolume)
self.writeLog(u'发出新的被动腿%s对冲单' %vtSymbol)
#----------------------------------------------------------------------
def hedgeAllPassiveLegs(self):
"""执行所有被动腿对冲"""
for vtSymbol in self.hedgingTaskDict.keys():
self.hedgePassiveLeg(vtSymbol)
self.hedgeCount = 0 # 重置被动腿对冲撤单等待计数
#----------------------------------------------------------------------
def newActiveLegTrade(self, vtSymbol, direction, volume):
"""新的主动腿成交"""
# 输出日志
self.writeLog(u'主动腿%s成交,方向%s,数量%s' %(vtSymbol, direction, volume))
# 将主动腿成交带上方向
if direction == DIRECTION_SHORT:
volume = -volume
# 计算主动腿成交后,对应的价差仓位
spread = self.spread
activeRatio = spread.activeLeg.ratio
spreadVolume = round(volume / activeRatio) # 四舍五入求主动腿成交量对应的价差份数
# 计算价差新仓位,对应的被动腿需要对冲部分
for leg in self.spread.passiveLegs:
newHedgingTask = leg.ratio * spreadVolume
if leg.vtSymbol not in self.hedgingTaskDict:
self.hedgingTaskDict[leg.vtSymbol] = newHedgingTask
else:
self.hedgingTaskDict[leg.vtSymbol] += newHedgingTask
# 发出被动腿对冲委托
self.hedgeAllPassiveLegs()
#----------------------------------------------------------------------
def newPassiveLegTrade(self, vtSymbol, direction, volume):
"""新的被动腿成交"""
if vtSymbol in self.hedgingTaskDict:
# 计算完成的对冲数量
if direction == DIRECTION_LONG:
hedgedVolume = volume
else:
hedgedVolume = -volume
# 计算剩余尚未完成的数量
self.hedgingTaskDict[vtSymbol] -= hedgedVolume
# 如果已全部完成,则从字典中移除
if not self.hedgingTaskDict[vtSymbol]:
del self.hedgingTaskDict[vtSymbol]
# 输出日志
self.writeLog(u'被动腿%s成交,方向%s,数量%s' %(vtSymbol, direction, volume))
#----------------------------------------------------------------------
def cancelLegOrder(self, vtSymbol):
"""撤销某条腿的委托"""
if vtSymbol not in self.legOrderDict:
return
orderList = self.legOrderDict[vtSymbol]
if not orderList:
return
for vtOrderID in orderList:
self.algoEngine.cancelOrder(vtOrderID)
self.writeLog(u'撤单%s的所有委托' %vtSymbol)
#----------------------------------------------------------------------
def cancelAllOrders(self):
"""撤销全部委托"""
for orderList in self.legOrderDict.values():
for vtOrderID in orderList:
self.algoEngine.cancelOrder(vtOrderID)
self.writeLog(u'全部撤单')
#----------------------------------------------------------------------
def cancelAllPassiveLegOrders(self):
"""撤销全部被动腿委托"""
cancelPassive = False
for vtSymbol in self.passiveVtSymbols:
if vtSymbol in self.legOrderDict and self.legOrderDict[vtSymbol]:
self.cancelLegOrder(vtSymbol)
cancelPassive = True
# 只有确实发出撤单委托时,才输出信息
if cancelPassive:
self.writeLog(u'被动腿全撤')
|
caterinaurban/Typpete
|
refs/heads/master
|
typpete/src/stubs/functions.py
|
1
|
"""Stub file for built in functions"""
from typing import TypeVar, List, Tuple, Dict, Set, Union, Type, Callable, Generic
from sys import IO
Tf = TypeVar("Tf")
Uf = TypeVar("Uf")
Num = TypeVar("Num", bound=complex)
IntOrFloat = TypeVar("IntOrFloat", int, float)
Str = TypeVar("Str", str, bytes)
Seq = TypeVar("Seq", Str, List[Tf])
NumOrStr = TypeVar("NumOrStr", Num, Str)
NumOrStrNoComplex = TypeVar("NumOrStr", bool, int, float, Str)
class Exception():
pass
Titer = TypeVar("Titer")
class Iterator(Generic[Titer]):
pass
def abs(x: Num) -> Num:
"""Return the absolute value of the argument. """
pass
def all(_: Seq) -> bool:
"""
Return True if bool(x) is True for all values x in the iterable.
If the iterable is empty, return True.
"""
pass
def any(_: Seq) -> bool:
"""
Return True if bool(x) is True for any x in the iterable.
If the iterable is empty, return False.
"""
pass
def ascii(_: object) -> str:
"""
Return an ASCII-only representation of an object.
As repr(), return a string containing a printable representation of an
object, but escape the non-ASCII characters in the string returned by
repr() using \\x, \\u or \\U escapes. This generates a string similar
to that returned by repr() in Python 2.
"""
pass
def bin(_: int) -> str:
"""
Return the binary representation of an integer.
>>> bin(2796202)
'0b1010101010101010101010'
"""
pass
def bool(_: object = None) -> bool:
"""Convert a value to a Boolean."""
pass
def bytes(_: object = None) -> bytes:
"""Return a new "bytes" object."""
pass
def callable(_: object) -> bool:
"""
Return whether the object is callable (i.e., some kind of function).
Note that classes are callable, as are instances of classes with a
__call__() method.
"""
pass
def chr(_: int) -> str:
""" Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff. """
pass
def complex(_: NumOrStr = None) -> complex:
"""Create a complex number"""
pass
def dict(_: Dict[Tf, Uf] = None) -> Dict[Tf, Uf]:
"""Create a new dictionary.
Make argument of type Mappable after implementing interfaces
"""
pass
def dir(_: object = None) -> List[str]:
"""Return the list of names in the current local scope."""
pass
def divmod(_: IntOrFloat, __: IntOrFloat) -> Tuple[IntOrFloat, IntOrFloat]:
""" Return the tuple (x//y, x%y). Invariant: div*y + mod == x. """
pass
def enumerate(l: List[Tf]) -> List[Tuple[int, Tf]]:
"""Iterate over a list with key and value"""
pass
def float(_: NumOrStrNoComplex = None) -> float:
"""Convert a string or a number to floating point."""
pass
def format(_: object) -> str:
"""Convert a value to a "formatted" representation."""
pass
def getattr(o: object, name: str) -> object:
"""
Return the value of the named attribute of object.
Name must be a string. If the string is the name of one of the object’s attributes,
the result is the value of that attribute.
For example, getattr(x, 'foobar') is equivalent to x.foobar.
"""
pass
def hasattr(o: object, attr: str) -> bool:
"""Check if an object has an attribute"""
pass
def hash(_: object) -> int:
"""
Return the hash value for the given object.
Two objects that compare equal must also have the same hash value, but the
reverse is not necessarily true."""
pass
def hex(_: int) -> str:
"""
Return the hexadecimal representation of an integer.
>>> hex(12648430)
'0xc0ffee'
"""
pass
def id(_: object) -> int:
"""
Return the identity of an object.
This is guaranteed to be unique among simultaneously existing objects.
(CPython uses the object's memory address.)
"""
pass
def input(_: object = None) -> str:
"""
Read a string from standard input. The trailing newline is stripped.
The prompt string, if given, is printed to standard output without a
trailing newline before reading input.
If the user hits EOF (*nix: Ctrl-D, Windows: Ctrl-Z+Return), raise EOFError.
On *nix systems, readline is used if available.
"""
pass
def int(_: NumOrStrNoComplex = None, base: int = 10) -> int:
"""Convert a number or string to an integer."""
pass
def isinstance(x: object, y: object) -> bool:
"""Check if object x is an instance of type y"""
pass
def iter(l: List[Tf]) -> Iterator[Tf]:
pass
def next(l: Iterator[Tf]) -> Tf:
pass
def len(_: object) -> int:
""" Return the number of items in a container. """
pass
def list(_: Union[List[Tf], Set[Tf], Dict[Tf, Uf], Iterator[Tf]]) -> List[Tf]:
pass
def min(_: List[Tf]) -> Tf:
"""Return the minimum object from the list
TODO:
- Verify that the argument is comparable
- Add support for different iterable objects
"""
pass
def max(_: List[Tf]) -> Tf:
"""Return the maximum object from the list any
TODO:
- Verify that the argument is comparable
- Add support for different iterable objects
"""
pass
def object() -> object:
"""Return a new featureless object."""
pass
def oct(_: int) -> str:
"""
Return the octal representation of an integer.
>>> oct(342391)
'0o1234567'
"""
pass
def open(file: Union[str, bytes, int], mode: str = None,
buffering: int = None, encoding: str = None, errors: str = None, newline: str = None, closefd: bool = None) -> IO:
"""Open a file stream"""
pass
def pow(x, y):
"""Equivalent to x**y"""
return x ** y
def print(_: object) -> None:
"""Print an object"""
pass
def range(x: int, y:int=None, z:int=None) -> List[int]:
"""Return a list of int from 0 (inclusive) to `x` (exclusive)
TODO: make it RangeObject after implementing interfaces
"""
pass
def repr(_: object) -> str:
"""
Return the canonical string representation of the object.
For many object types, including most builtins, eval(repr(obj)) == obj.
"""
pass
def reversed(_: List[Tf]) -> Iterator[Tf]:
"""Return a reversed version of the input list
TODO: make it return reversed object after implementing interfaces
"""
pass
def round(_: float) -> int:
"""
round(number[, ndigits]) -> number
Round a number to a given precision in decimal digits (default 0 digits).
This returns an int when called with one argument, otherwise the
same type as the number. ndigits may be negative.
"""
pass
def set(l: List[Tf] = None) -> Set[Tf]:
"""Create a set of unique elements
If the parameter is provided, use the elements in the list to create the set,
otherwise, create an empty set."""
pass
def setattr(o: object, name: str, val: object) -> None:
"""Assign `val` to the attribute, provided the object allows it.
For example, setattr(x, 'foobar', 123) is equivalent to x.foobar = 123.
"""
pass
def sorted(_: List[Tf], key: Callable[[Tf], float]=lambda x: 1.0, reverse: bool=None) -> List[Tf]:
"""Return the input list in a sorted order
TODO: Add support for different iterable objects
"""
pass
def str(_: object = None) -> str:
"""Return a str version of object."""
pass
def sum(_: List[Num]) -> Num:
"""Return the sum of numbers in a list
TODO: Add support for different iterable objects
"""
pass
def type(o: Tf) -> Type[Tf]:
pass
def zip(x: Union[List[Tf], str], y: List[Uf]) -> List[Tuple[Tf, Uf]]:
"""This function returns a list of tuples,
where the i-th tuple contains the i-th element from each of the argument lists"""
pass
|
edx/ansible
|
refs/heads/stable-1.9-plus-edx
|
v2/ansible/utils/cli.py
|
8
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import operator
import optparse
import os
import time
import yaml
from ansible import __version__
from ansible import constants as C
# FIXME: documentation for methods here, which have mostly been
# copied directly over from the old utils/__init__.py
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def base_parser(usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
default=C.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password')
parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
help='ask for su password')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true",
dest='sudo', help="run operations with sudo (nopasswd)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root)') # Can't default to root because we need to detect when this option was given
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER,
dest='remote_user', help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
parser.add_option('-S', '--su', default=C.DEFAULT_SU,
action='store_true', help='run operations with su')
parser.add_option('-R', '--su-user', help='run operations with su as this '
'user (default=%s)' % C.DEFAULT_SU_USER)
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
def version_info(gitinfo=False):
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
|
vlegoff/tsunami
|
refs/heads/master
|
src/primaires/objet/types/indefini.py
|
1
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type Indefini."""
from .base import BaseType
class Indefini(BaseType):
"""Type d'objet: indéfini.
"""
nom_type = "indéfini"
|
saguas/frappe
|
refs/heads/develop
|
frappe/geo/doctype/currency/currency.py
|
69
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: See license.txt
from __future__ import unicode_literals
import frappe
from frappe import throw, _
from frappe.model.document import Document
class Currency(Document):
def validate(self):
if not frappe.flags.in_install_app:
frappe.clear_cache()
|
40223114/2015_g4
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py
|
733
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
dine1987/Docker
|
refs/heads/master
|
tests/test_run_gunicorn.py
|
37
|
# -*- coding: utf-8 -*-
import unittest
import docker_registry.run as run
import mock
class TestRunGunicorn(unittest.TestCase):
@mock.patch('argparse.ArgumentParser.parse_args')
@mock.patch('os.execl')
def test_exec_gunicorn(self, mock_execl, mock_parse_args):
run.run_gunicorn()
self.assertEqual(mock_execl.call_count, 1)
# ensure that the executable's path ends with 'gunicorn', so we have
# some confidence that it called the correct executable
self.assertTrue(mock_execl.call_args[0][0].endswith('gunicorn'))
@mock.patch('argparse.ArgumentParser.parse_args')
@mock.patch('os.execl')
def test_parses_args(self, mock_execl, mock_parse_args):
run.run_gunicorn()
# ensure that argument parsing is happening
mock_parse_args.assert_called_once_with()
@mock.patch('sys.exit')
@mock.patch('distutils.spawn.find_executable', autospec=True)
@mock.patch('argparse.ArgumentParser.parse_args')
@mock.patch('os.execl')
def test_gunicorn_not_found(self, mock_execl, mock_parse_args,
mock_find_exec, mock_exit):
mock_find_exec.return_value = None
run.run_gunicorn()
# ensure that sys.exit was called
mock_exit.assert_called_once_with(1)
|
damianrusinek/classes-pas
|
refs/heads/master
|
kik/client.py
|
1
|
import sys
import socket
from kikutils import *
MY_SIGN = 'x'
THEIR_SIGN = 'o'
if __name__ == "__main__":
if len(sys.argv) != 3:
sys.stderr.write("usage: %s ip port\n" % (sys.argv[0], ))
exit(1)
try:
addr = sys.argv[1]
port = int(sys.argv[2])
assert port > 0
except:
sys.stderr.write("error: invalid port\n")
exit(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((addr, port))
board = '?' * 9
sock.send(pad_msg('START'))
resp = sock.recv(MSG_SIZE).strip()
if resp != 'OK':
print 'Invalid response:', resp
sock.close()
exit(1)
while True:
# Select field
while True:
print_board(board, numbers=True)
try:
fieldnb = int(raw_input('What is your field number? '))
if fieldnb < 0 or fieldnb > 8:
raise ValueError
if board[fieldnb] != '?':
print 'This field is not empty'
else:
break
except ValueError:
print 'Invalid field number'
board[fieldnb] = MY_SIGN
sock.send(pad_msg('CHECK ' + str(fieldnb)))
if sock.recv(MSG_SIZE).strip() != 'OK':
print 'Invalid repsonse:', resp
sock.close()
break
cmd = sock.recv(MSG_SIZE).strip()
if cmd == 'END' or cmd == '':
print_board(board)
winner = check_winner(board)
if winner is None:
print 'Game ended unexpectedly.'
if winner == MY_SIGN:
print 'You won!'
else:
print 'You lost!'
sock.close()
break
if cmd.startswith('CHECK '):
try:
fieldnb = int(cmd.split(' ')[1])
if fieldnb < 0 or fieldnb > 8:
raise ValueError
except ValueError:
print 'Invalid command'
sock.send(pad_msg('ERROR'))
sock.close()
break
if board[fieldnb] != '?':
print 'Field taken'
sock.send(pad_msg('ERROR'))
sock.close()
break
board[fieldnb] = THEIR_SIGN
winner = check_winner(board)
if winner in [MY_SIGN, THEIR_SIGN]:
print_board(board)
if winner == MY_SIGN:
print 'You won!'
else:
print 'You lost!'
client.send(pad_msg('END'))
client.close()
break
print 'Invalid command'
client.send(pad_msg('ERROR'))
client.close()
break
|
cubaeurokatmic/gui
|
refs/heads/master
|
lib/python/Components/Renderer/VolumeText.py
|
52
|
#######################################################################
#
#
# Volume Text Renderer for Dreambox/Enigma-2
# Coded by Vali (c)2010
# Support: www.dreambox-tools.info
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Components.VariableText import VariableText
from enigma import eLabel, eDVBVolumecontrol, eTimer
from Renderer import Renderer
class VolumeText(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
self.vol_timer = eTimer()
self.vol_timer.callback.append(self.pollme)
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
self.text = str(eDVBVolumecontrol.getInstance().getVolume())
def pollme(self):
self.changed(None)
def onShow(self):
self.suspended = False
self.vol_timer.start(200)
def onHide(self):
self.suspended = True
self.vol_timer.stop()
|
karllessard/tensorflow
|
refs/heads/master
|
tensorflow/lite/testing/op_tests/identity.py
|
16
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for identity."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import array_ops
@register_make_test_function()
def make_identity_tests(options):
"""Make a set of tests to do identity."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [3, 3]],
"op_to_use": [
"identity", "identity_n", "snapshot", "identity_n_with_2_inputs"
],
}]
def build_graph(parameters):
"""Make a set of tests to do identity."""
input_tensors = []
input_count = (2 if parameters["op_to_use"] == "identity_n_with_2_inputs"
else 1)
input_tensors = [
tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
for _ in range(input_count)
]
# We add the Multiply before Identity just as a walk-around to make the test
# pass when input_shape is scalar.
# During graph transformation, TOCO will replace the Identity op with
# Reshape when input has shape. However, currently TOCO can't distinguish
# between missing shape and scalar shape. As a result, when input has scalar
# shape, this conversion still fails.
# TODO(b/129197312), remove the walk-around code once the bug is fixed.
inputs_doubled = [input_tensor * 2.0 for input_tensor in input_tensors]
if parameters["op_to_use"] == "identity":
identity_outputs = [tf.identity(inputs_doubled[0])]
elif parameters["op_to_use"] == "snapshot":
identity_outputs = [array_ops.snapshot(inputs_doubled[0])]
elif parameters["op_to_use"] in ("identity_n", "identity_n_with_2_inputs"):
identity_outputs = tf.identity_n(inputs_doubled)
return input_tensors, identity_outputs
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
for _ in range(len(inputs))
]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
robovm/robovm-studio
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/sshserver.py
|
93
|
# sshserver.py - ssh protocol server support for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import util, hook, wireproto, changegroup
import os, sys
class sshserver(object):
def __init__(self, ui, repo):
self.ui = ui
self.repo = repo
self.lock = None
self.fin = ui.fin
self.fout = ui.fout
hook.redirect(True)
ui.fout = repo.ui.fout = ui.ferr
# Prevent insertion/deletion of CRs
util.setbinary(self.fin)
util.setbinary(self.fout)
def getargs(self, args):
data = {}
keys = args.split()
for n in xrange(len(keys)):
argline = self.fin.readline()[:-1]
arg, l = argline.split()
if arg not in keys:
raise util.Abort("unexpected parameter %r" % arg)
if arg == '*':
star = {}
for k in xrange(int(l)):
argline = self.fin.readline()[:-1]
arg, l = argline.split()
val = self.fin.read(int(l))
star[arg] = val
data['*'] = star
else:
val = self.fin.read(int(l))
data[arg] = val
return [data[k] for k in keys]
def getarg(self, name):
return self.getargs(name)[0]
def getfile(self, fpout):
self.sendresponse('')
count = int(self.fin.readline())
while count:
fpout.write(self.fin.read(count))
count = int(self.fin.readline())
def redirect(self):
pass
def groupchunks(self, changegroup):
while True:
d = changegroup.read(4096)
if not d:
break
yield d
def sendresponse(self, v):
self.fout.write("%d\n" % len(v))
self.fout.write(v)
self.fout.flush()
def sendstream(self, source):
write = self.fout.write
for chunk in source.gen:
write(chunk)
self.fout.flush()
def sendpushresponse(self, rsp):
self.sendresponse('')
self.sendresponse(str(rsp.res))
def sendpusherror(self, rsp):
self.sendresponse(rsp.res)
def sendooberror(self, rsp):
self.ui.ferr.write('%s\n-\n' % rsp.message)
self.ui.ferr.flush()
self.fout.write('\n')
self.fout.flush()
def serve_forever(self):
try:
while self.serve_one():
pass
finally:
if self.lock is not None:
self.lock.release()
sys.exit(0)
handlers = {
str: sendresponse,
wireproto.streamres: sendstream,
wireproto.pushres: sendpushresponse,
wireproto.pusherr: sendpusherror,
wireproto.ooberror: sendooberror,
}
def serve_one(self):
cmd = self.fin.readline()[:-1]
if cmd and cmd in wireproto.commands:
rsp = wireproto.dispatch(self.repo, self, cmd)
self.handlers[rsp.__class__](self, rsp)
elif cmd:
impl = getattr(self, 'do_' + cmd, None)
if impl:
r = impl()
if r is not None:
self.sendresponse(r)
else: self.sendresponse("")
return cmd != ''
def do_lock(self):
'''DEPRECATED - allowing remote client to lock repo is not safe'''
self.lock = self.repo.lock()
return ""
def do_unlock(self):
'''DEPRECATED'''
if self.lock:
self.lock.release()
self.lock = None
return ""
def do_addchangegroup(self):
'''DEPRECATED'''
if not self.lock:
self.sendresponse("not locked")
return
self.sendresponse("")
cg = changegroup.unbundle10(self.fin, "UN")
r = self.repo.addchangegroup(cg, 'serve', self._client())
self.lock.release()
return str(r)
def _client(self):
client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
return 'remote:ssh:' + client
|
magicrub/MissionPlanner
|
refs/heads/master
|
Lib/encodings/iso8859_3.py
|
93
|
""" Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
u'\u02d8' # 0xA2 -> BREVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\ufffe'
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
duke2007/koy
|
refs/heads/master
|
blog/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
nerosketch/djing
|
refs/heads/master
|
gw_app/urls.py
|
2
|
from django.urls import path
from gw_app import views
app_name = 'gw_app'
urlpatterns = [
path('', view=views.NasListView.as_view(), name='home'),
path('add/', view=views.NasCreateView.as_view(), name='add'),
path('<int:nas_id>/del/', views.NasDeleteView.as_view(), name='del'),
path('<int:nas_id>/edit/', views.NasUpdateView.as_view(), name='edit'),
]
|
srishanbhattarai/Harbor-CLI
|
refs/heads/dev
|
packages/cli/tests/utils/test_branch.py
|
1
|
''' Test for branch utility. '''
from lib.utils.branch import branch
def test_branch_v0():
''' It works correctly for truthy predicate. '''
truthy_predicate = lambda *args: True
string_1 = 'Hello world'
string_2 = 'Foo bar'
value = branch(truthy_predicate)(string_1, string_2)
assert value == string_1
def test_branch_v1():
''' It works correctly for falsy predicate. '''
falsy_predicate = lambda *args: False
string_1 = 'Hello world'
string_2 = 'Foo bar'
value = branch(falsy_predicate)(string_1, string_2)
assert value == string_2
def test_branch_v2():
''' It works correctly for truthy non-function predicates'''
truthy_predicate = True
string_1 = 'Hello world'
string_2 = 'Foo bar'
value = branch(truthy_predicate)(string_1, string_2)
assert value == string_1
def test_branch_v3():
''' It works correctly for falsy non-function predicates'''
falsy_predicate = False
string_1 = 'Hello world'
string_2 = 'Foo bar'
value = branch(falsy_predicate)(string_1, string_2)
assert value == string_2
|
LeonBondeLarsen/ATS
|
refs/heads/master
|
box1monitor/bin/get_temperature.py
|
2
|
from sense_hat import SenseHat
sense = SenseHat()
print(round(sense.get_temperature(),1))
|
selaux/ck2launcher-gtk
|
refs/heads/master
|
ck2launcher.py
|
1
|
#!/usr/bin/python
# This is an alternative launcher for Crusader Kings 2
# Dependencies: pygobject, appdirs
import os
import subprocess
import json
import glob
import re
import appdirs
from gi.repository import Gtk
from gi.repository import Gdk
APP_NAME = 'ck2-launcher'
DLC_DEFAULT_SELECTION = True
MOD_DEFAULT_SELECTION = False
def get_config_path():
"""
Gets config path four our little launcher
"""
return os.path.join(appdirs.user_data_dir('ck2-launcher'), 'config.json')
def default_ck2_game_path():
"""
Tries to guess the correct path for CK2 intstall on each OS
"""
paths = {
'posix': os.path.join(os.path.expanduser('~'), '.steam/steam/SteamApps/common/Crusader Kings II/'),
'mac': os.path.join(os.path.expanduser('~'), 'Library/Application Support/Steam/SteamApps/common/Crusader Kings II/'),
'nt': os.path.join(os.environ.get('ProgramFiles(x86)', ''), 'Steam/SteamApps/common/Crusader Kings II/'),
}
return paths[os.name]
def default_ck2_config_path():
"""
Tries to guess the correct CK2 config path for each os
"""
linux_and_osx = os.path.join(os.path.expanduser('~'), 'Documents/Paradox Interactive/Crusader Kings II/')
paths = {
'posix': linux_and_osx,
'mac': linux_and_osx,
'nt': appdirs.user_data_dir('Crusader Kings II', 'Paradox Interactive'),
}
return paths[os.name]
def get_binary_path(config):
return os.path.join(config['game_dir'], 'ck2')
def all_paths_exist(config):
return os.path.exists(config['game_dir']) \
and os.path.exists(get_binary_path(config)) \
and os.path.exists(config['config_dir'])
def read_name_from_ini(path):
"""
Reads mod/dlc name from provided .dlc or .mod file
"""
f = open(path, 'r')
regex = 'name\s*=\s*"([^"]+)"'
content = f.read()
return re.match(regex, content).group(1)
def get_extension(dir, type, config):
"""
Reads all mods/dlcs from path
"""
path = os.path.join(dir, type + '/')
glob_pattern = os.path.join(path, '*.' + type)
l = []
for file_path in glob.glob(glob_pattern):
l.append({
'name': read_name_from_ini(file_path),
'file_name': os.path.basename(file_path)
})
return l
def get_dlcs(config):
return sorted(get_extension(config['game_dir'], 'dlc', config), key=lambda m: m['name'])
def get_mods(config):
return sorted(get_extension(config['config_dir'], 'mod', config), key=lambda m: m['name'])
def read_config():
"""
Reads config for our little launcher
"""
config_path = get_config_path()
if not os.path.exists(config_path):
return None
f = open(config_path, 'r')
config = json.loads(f.read())
f.close()
if not all_paths_exist(config):
return None
return config
def write_config(config):
"""
Writes config for our little launcher
"""
config_path = get_config_path()
if not os.path.exists(os.path.dirname(config_path)):
os.makedirs(os.path.dirname(config_path))
f = open(config_path, 'w')
f.write(json.dumps(config))
f.close()
class MainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Crusader Kings 2 Launcher")
self.set_name('main')
self.config = read_config()
if not self.config:
self.get_initial_config()
self.checkboxes_to_mods = {}
self.checkboxes_to_dlcs = {}
self.set_background()
self.initialize_layout()
self.connect_signals()
def initialize_layout(self):
main_box = Gtk.Box(spacing=20, orientation=Gtk.Orientation.VERTICAL)
image_path = os.path.join(self.config['game_dir'], 'launcher/launcher_bg2.jpg')
if (os.path.exists(image_path)):
image = Gtk.Image()
image.set_from_file(image_path)
main_box.pack_start(image, True, True, 0)
checkboxes_box = Gtk.Box(spacing=20)
mod_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
mod_scroll = Gtk.ScrolledWindow()
mod_scroll.set_size_request(340, 200);
mod_scroll.add(mod_box)
mod_label = Gtk.Label()
mod_label.set_markup('<b>Mods</b>')
mod_label.set_justify(Gtk.Justification.LEFT)
mod_box.pack_start(mod_label, False, False, 0)
mods = get_mods(self.config)
for mod in mods:
cb = Gtk.CheckButton(mod['name'])
cb.set_active(self.config['mods'].get(mod['file_name'], MOD_DEFAULT_SELECTION))
self.checkboxes_to_mods[cb] = mod
mod_box.pack_start(cb, False, False, 0)
checkboxes_box.pack_start(mod_scroll, True, True, 0)
dlc_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
dlc_scroll = Gtk.ScrolledWindow()
dlc_scroll.set_size_request(340, 200);
dlc_scroll.add(dlc_box)
dlc_label = Gtk.Label()
dlc_label.set_markup('<b>DLCs</b>')
dlc_box.pack_start(dlc_label, False, False, 0)
dlcs = get_dlcs(self.config)
for it, dlc in enumerate(dlcs):
cb = Gtk.CheckButton(dlc['name'])
cb.set_active(self.config['dlcs'].get(dlc['file_name'], DLC_DEFAULT_SELECTION))
self.checkboxes_to_dlcs[cb] = dlc
dlc_box.pack_start(cb, False, False, 0)
checkboxes_box.pack_start(dlc_scroll, True, True, 0)
main_box.pack_start(checkboxes_box, True, True, 0)
self.start_button = Gtk.Button('Start Crusader Kings 2')
self.start_button.set_size_request(700, 80);
self.start_button.set_name('start_button')
main_box.pack_start(self.start_button, True, True, 0)
self.add(main_box)
def connect_signals(self):
self.connect("delete-event", self.close)
self.start_button.connect("clicked", self.start_ck2)
for cb in self.checkboxes_to_mods:
cb.connect("toggled", self.mod_checkbox_clicked)
for cb in self.checkboxes_to_dlcs:
cb.connect("toggled", self.dlc_checkbox_clicked)
def mod_checkbox_clicked(self, checkbox, *args):
mod_filename = self.checkboxes_to_mods[checkbox]['file_name']
self.config['mods'][mod_filename] = checkbox.get_active()
write_config(self.config)
def dlc_checkbox_clicked(self, checkbox, *args):
dlc_filename = self.checkboxes_to_dlcs[checkbox]['file_name']
self.config['dlcs'][dlc_filename] = checkbox.get_active()
write_config(self.config)
def set_background(self):
style_provider = Gtk.CssProvider()
background_path = os.path.join(self.config['game_dir'], 'launcher/background.jpg')
if (os.path.exists(background_path)):
css= str.encode("""
#main, #start_button {{
background-image: url('{0:s}');
}}
""".format(background_path))
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def close(self, *args):
write_config(self.config)
Gtk.main_quit()
def get_initial_config(self):
"""
Shows dialog to perform initial setup
"""
self.config = {}
game_dir_dialog = Gtk.FileChooserDialog(
"Please choose the game directory of Crusader Kings 2",
self,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK)
)
game_dir_dialog.set_filename(default_ck2_game_path())
game_dir_response = game_dir_dialog.run()
self.config['game_dir'] = game_dir_dialog.get_filename()
game_dir_dialog.destroy()
if game_dir_response != Gtk.ResponseType.OK:
if not self.config:
Gtk.main_quit()
return
config_dir_dialog = Gtk.FileChooserDialog(
"Please choose the configuration directory of Crusader Kings 2",
self,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK)
)
config_dir_dialog.set_filename(default_ck2_config_path())
config_dir_response = config_dir_dialog.run()
self.config['config_dir'] = config_dir_dialog.get_filename()
config_dir_dialog.destroy()
if config_dir_response != Gtk.ResponseType.OK:
if not self.config:
Gtk.main_quit()
return
self.config['mods'] = {}
self.config['dlcs'] = {}
write_config(self.config)
def start_ck2(self, *args):
"""
Start Crusader Kings
"""
command = [ get_binary_path(self.config), '-skiplauncher' ]
for mod, selected in self.config['mods'].items():
if selected:
command.append('-mod=mod/{0}'.format(mod))
for dlc, selected in self.config['dlcs'].items():
if not selected:
command.append('-exclude_dlc=dlc/{0}'.format(dlc))
print('Starting CK2: {0}'.format(' '.join(command)))
subprocess.Popen(command).pid
self.close()
win = MainWindow()
win.show_all()
Gtk.main()
|
erudit/eruditorg
|
refs/heads/master
|
eruditorg/erudit/fedora/cache.py
|
1
|
# -*- coding: utf-8 -*-
import typing
import random
import requests
import structlog
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import get_language
from requests.exceptions import HTTPError, ConnectionError
from sentry_sdk import configure_scope
from erudit.cache import cache_set
logger = structlog.getLogger(__name__)
def cache_fedora_result(method, duration=settings.LONG_TTL):
"""Cache the result of a method called on a FedoraMixin object
Assumes that the method is bound to a FedoraMixin object, or at least that the object has a
``localidentifier`` attribute.
If the value of ``localidentifier`` is ``None``, cache will not be queried and the decorated
method will be called directly.
This decorator assumes that the localidentifier is unique for ALL Fedora objects.
Will cache the result for the value of ``duration``, plus or minus ``duration`` * 0.25. This
is to avoid expiring all the cached resources at the same time.
:param method: the method to decorate
:param duration: expected duration of result cache
:return: the decorated method
"""
def wrapper(self, *args, **kwargs):
if not self.localidentifier:
return method(self, *args, **kwargs)
key = "fedora_result-{lang}-{localidentifier}-{method_name}".format(
lang=get_language(), localidentifier=self.localidentifier, method_name=method.__name__
)
val = cache.get(key)
if not val:
duration_deviation = random.randint(-(duration // 4), duration // 4)
val = method(self, *args, **kwargs)
cache_set(
cache,
key,
val,
duration + duration_deviation,
pids=[self.pid],
)
return val
return wrapper
def get_cached_datastream_content(
pid: str, datastream_name: str, cache_key: typing.Optional[str] = None
) -> typing.Optional[bytes]:
"""
Given an object pid and a datastream name, returns the content of the datastream.
The content may be fetched from the cache, if it was previously cached, or directly from Fedora.
If the content was not already cached, it will now be cached using the optional cache key
argument, if provided, or with a unique generated a cache key using the object pid and the
datastream name.
If there is a client error (4xx HTTPError), this function will return None.
If there is a server error (5xx HTTPError) or if there is a ConnectionError, this function
will raise the exception.
"""
content_key = f"erudit-fedora-file-{pid}-{datastream_name}" if not cache_key else cache_key
content = cache.get(content_key)
# If content is already cached, return it.
if content is not None:
return content
try:
# Otherwise, get the content from Fedora and cache it for future use.
response = requests.get(
settings.FEDORA_ROOT + f"objects/{pid}/datastreams/{datastream_name}/content",
)
response.raise_for_status()
content = response.content
cache_set(
cache,
content_key,
content,
settings.FEDORA_CACHE_TIMEOUT,
pids=[pid],
)
return content
except HTTPError as e:
# If there is a client error, return None.
if 400 <= e.response.status_code < 500:
with configure_scope() as scope:
scope.fingerprint = ["fedora.warning"]
logger.warning("fedora.warning", message=str(e))
return None
# If there is a server error, raise a HTTPError.
elif 500 <= e.response.status_code < 600:
with configure_scope() as scope:
scope.fingerprint = ["fedora.server-error"]
logger.error("fedora.server-error", message=str(e))
raise
except ConnectionError as e:
# If Fedora is unreachable, raise a ConnectionError.
with configure_scope() as scope:
scope.fingerprint = ["fedora.connection-error"]
logger.error("fedora.connection-error", message=str(e))
raise
|
wenhuizhang/neutron
|
refs/heads/master
|
neutron/ipam/requests.py
|
25
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.ipam import exceptions as ipam_exc
@six.add_metaclass(abc.ABCMeta)
class SubnetPool(object):
"""Represents a pool of IPs available inside an address scope."""
@six.add_metaclass(abc.ABCMeta)
class SubnetRequest(object):
"""Carries the data needed to make a subnet request
The data validated and carried by an instance of this class is the data
that is common to any type of request. This class shouldn't be
instantiated on its own. Rather, a subclass of this class should be used.
"""
def __init__(self, tenant_id, subnet_id,
gateway_ip=None, allocation_pools=None):
"""Initialize and validate
:param tenant_id: The tenant id who will own the subnet
:type tenant_id: str uuid
:param subnet_id: Neutron's subnet ID
:type subnet_id: str uuid
:param gateway_ip: An IP to reserve for the subnet gateway.
:type gateway_ip: None or convertible to netaddr.IPAddress
:param allocation_pools: The pool from which IPAM should allocate
addresses. The allocator *may* allow allocating addresses outside
of this range if specifically requested.
:type allocation_pools: A list of netaddr.IPRange. None if not
specified.
"""
self._tenant_id = tenant_id
self._subnet_id = subnet_id
self._gateway_ip = None
self._allocation_pools = None
if gateway_ip is not None:
self._gateway_ip = netaddr.IPAddress(gateway_ip)
if allocation_pools is not None:
allocation_pools = sorted(allocation_pools)
previous = None
for pool in allocation_pools:
if not isinstance(pool, netaddr.ip.IPRange):
raise TypeError("Ranges must be netaddr.IPRange")
if previous and pool.first <= previous.last:
raise ValueError("Ranges must not overlap")
previous = pool
if 1 < len(allocation_pools):
# Checks that all the ranges are in the same IP version.
# IPRange sorts first by ip version so we can get by with just
# checking the first and the last range having sorted them
# above.
first_version = allocation_pools[0].version
last_version = allocation_pools[-1].version
if first_version != last_version:
raise ValueError("Ranges must be in the same IP version")
self._allocation_pools = allocation_pools
if self.gateway_ip and self.allocation_pools:
if self.gateway_ip.version != self.allocation_pools[0].version:
raise ValueError("Gateway IP version inconsistent with "
"allocation pool version")
@property
def tenant_id(self):
return self._tenant_id
@property
def subnet_id(self):
return self._subnet_id
@property
def gateway_ip(self):
return self._gateway_ip
@property
def allocation_pools(self):
return self._allocation_pools
def _validate_with_subnet(self, subnet_cidr):
if self.gateway_ip and cfg.CONF.force_gateway_on_subnet:
gw_ip = netaddr.IPAddress(self.gateway_ip)
if (gw_ip.version == 4 or (gw_ip.version == 6
and not gw_ip.is_link_local())):
if self.gateway_ip not in subnet_cidr:
raise ValueError("gateway_ip is not in the subnet")
if self.allocation_pools:
if subnet_cidr.version != self.allocation_pools[0].version:
raise ValueError("allocation_pools use the wrong ip version")
for pool in self.allocation_pools:
if pool not in subnet_cidr:
raise ValueError("allocation_pools are not in the subnet")
class AnySubnetRequest(SubnetRequest):
"""A template for allocating an unspecified subnet from IPAM
A driver may not implement this type of request. For example, The initial
reference implementation will not support this. The API has no way of
creating a subnet without a specific address until subnet-allocation is
implemented.
"""
WILDCARDS = {constants.IPv4: '0.0.0.0',
constants.IPv6: '::'}
def __init__(self, tenant_id, subnet_id, version, prefixlen,
gateway_ip=None, allocation_pools=None):
"""
:param version: Either constants.IPv4 or constants.IPv6
:param prefixlen: The prefix len requested. Must be within the min and
max allowed.
:type prefixlen: int
"""
super(AnySubnetRequest, self).__init__(
tenant_id=tenant_id,
subnet_id=subnet_id,
gateway_ip=gateway_ip,
allocation_pools=allocation_pools)
net = netaddr.IPNetwork(self.WILDCARDS[version] + '/' + str(prefixlen))
self._validate_with_subnet(net)
self._prefixlen = prefixlen
@property
def prefixlen(self):
return self._prefixlen
class SpecificSubnetRequest(SubnetRequest):
"""A template for allocating a specified subnet from IPAM
The initial reference implementation will probably just allow any
allocation, even overlapping ones. This can be expanded on by future
blueprints.
"""
def __init__(self, tenant_id, subnet_id, subnet_cidr,
gateway_ip=None, allocation_pools=None):
"""
:param subnet: The subnet requested. Can be IPv4 or IPv6. However,
when IPAM tries to fulfill this request, the IP version must match
the version of the address scope being used.
:type subnet: netaddr.IPNetwork or convertible to one
"""
super(SpecificSubnetRequest, self).__init__(
tenant_id=tenant_id,
subnet_id=subnet_id,
gateway_ip=gateway_ip,
allocation_pools=allocation_pools)
self._subnet_cidr = netaddr.IPNetwork(subnet_cidr)
self._validate_with_subnet(self._subnet_cidr)
@property
def subnet_cidr(self):
return self._subnet_cidr
@property
def prefixlen(self):
return self._subnet_cidr.prefixlen
@six.add_metaclass(abc.ABCMeta)
class AddressRequest(object):
"""Abstract base class for address requests"""
class SpecificAddressRequest(AddressRequest):
"""For requesting a specified address from IPAM"""
def __init__(self, address):
"""
:param address: The address being requested
:type address: A netaddr.IPAddress or convertible to one.
"""
super(SpecificAddressRequest, self).__init__()
self._address = netaddr.IPAddress(address)
@property
def address(self):
return self._address
class AnyAddressRequest(AddressRequest):
"""Used to request any available address from the pool."""
class AutomaticAddressRequest(SpecificAddressRequest):
"""Used to create auto generated addresses, such as EUI64"""
EUI64 = 'eui64'
def _generate_eui64_address(self, **kwargs):
if set(kwargs) != set(['prefix', 'mac']):
raise ipam_exc.AddressCalculationFailure(
address_type='eui-64',
reason='must provide exactly 2 arguments - cidr and MAC')
prefix = kwargs['prefix']
mac_address = kwargs['mac']
return ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac_address)
_address_generators = {EUI64: _generate_eui64_address}
def __init__(self, address_type=EUI64, **kwargs):
"""
This constructor builds an automatic IP address. Parameter needed for
generating it can be passed as optional keyword arguments.
:param address_type: the type of address to generate.
It could be a eui-64 address, a random IPv6 address, or
a ipv4 link-local address.
For the Kilo release only eui-64 addresses will be supported.
"""
address_generator = self._address_generators.get(address_type)
if not address_generator:
raise ipam_exc.InvalidAddressType(address_type=address_type)
address = address_generator(self, **kwargs)
super(AutomaticAddressRequest, self).__init__(address)
class RouterGatewayAddressRequest(AddressRequest):
"""Used to request allocating the special router gateway address."""
class AddressRequestFactory(object):
"""Builds request using ip info
Additional parameters(port and context) are not used in default
implementation, but planned to be used in sub-classes
provided by specific ipam driver,
"""
@classmethod
def get_request(cls, context, port, ip_dict):
"""
:param context: context (not used here, but can be used in sub-classes)
:param port: port dict (not used here, but can be used in sub-classes)
:param ip_dict: dict that can contain 'ip_address', 'mac' and
'subnet_cidr' keys. Request to generate is selected depending on
this ip_dict keys.
:return: returns prepared AddressRequest (specific or any)
"""
if ip_dict.get('ip_address'):
return SpecificAddressRequest(ip_dict['ip_address'])
elif ip_dict.get('eui64_address'):
return AutomaticAddressRequest(prefix=ip_dict['subnet_cidr'],
mac=ip_dict['mac'])
else:
return AnyAddressRequest()
class SubnetRequestFactory(object):
"""Builds request using subnet info"""
@classmethod
def get_request(cls, context, subnet, subnetpool):
cidr = subnet.get('cidr')
subnet_id = subnet.get('id', uuidutils.generate_uuid())
is_any_subnetpool_request = not attributes.is_attr_set(cidr)
if is_any_subnetpool_request:
prefixlen = subnet['prefixlen']
if not attributes.is_attr_set(prefixlen):
prefixlen = int(subnetpool['default_prefixlen'])
return AnySubnetRequest(
subnet['tenant_id'],
subnet_id,
common_utils.ip_version_from_int(subnetpool['ip_version']),
prefixlen)
else:
return SpecificSubnetRequest(subnet['tenant_id'],
subnet_id,
cidr,
subnet.get('gateway_ip'),
subnet.get('allocation_pools'))
|
amasiero/approach_control
|
refs/heads/master
|
approach_control_sm/nodes/sm_test_04.py
|
1
|
#!/usr/bin/env python
import rospy
import roslib; roslib.load_manifest('approach_control_sm')
import smach
import smach_ros
import time
from approach_control_speech import Say, Recognizer
from approach_control_people.skeleton import CheckDistance
from approach_control_navigation import GoToLocation, SetInitialPosition
from approach_control_manipulator import GestureAction
from approach_control_robot_face import PublishFace
from approach_control_people import GetDistance, GetDistanceInvert
from approach_control_head import Tilt
from approach_control_movement.walk import Walk
def setup_sm():
sm = smach.StateMachine(outcomes=['Done'])
with sm:
smach.StateMachine.add('RECOGNIZE', Recognizer.Recognizer(spec = ['Robot'], time_out = 100),
transitions={'Robot' : 'HELLO', 'fail' : 'SORRY'})
smach.StateMachine.add('HELLO', Say.Say("Hello"),
transitions={'spoke' : 'START', 'mute' : 'Done'})
smach.StateMachine.add('SORRY', Say.Say("Sorry, repeat please?"),
transitions={'spoke' : 'RECOGNIZE', 'mute' : 'Done'})
smach.StateMachine.add('START', Recognizer.Recognizer(spec = ['Start'], time_out = 100),
transitions={'Start' : 'OK', 'fail' : 'SORRY'})
smach.StateMachine.add('OK', Say.Say("Okay!"),
transitions={'spoke' : 'SET_INITIAL_POSITION', 'mute' : 'Done'})
smach.StateMachine.add('SET_INITIAL_POSITION', SetInitialPosition.SetInitialPosition(local='jardim'),
transitions={'success':'GO_ARMARIO','fail':'Done'})
smach.StateMachine.add('GO_ARMARIO', GoToLocation.GoToLocation('armario'),
transitions={'success':'HEAD','fail':'Done'})
smach.StateMachine.add('HEAD', GestureAction.GestureAction('head2'),
transitions={'success':'WHERE_IS_BOTTLE','fail':'Done'})
smach.StateMachine.add('WHERE_IS_BOTTLE', Say.Say("I left my bottle here."),
transitions={'spoke' : 'HALL', 'mute' : 'Done'})
smach.StateMachine.add('HALL', Say.Say("Maybe did I let in the hall?"),
transitions={'spoke' : 'TABLE', 'mute' : 'Done'})
smach.StateMachine.add('TABLE', GoToLocation.GoToLocation('mesa'),
transitions={'success':'CON_1','fail':'Done'})
sm_con_1 = smach.Concurrence(outcomes=['success', 'fail'],
default_outcome = 'fail',
outcome_map = {'success' :
{'DAMMED' : 'spoke',
'ANGRY_FACE' : 'success'}})
with sm_con_1:
smach.Concurrence.add('ANGRY_FACE', PublishFace.PublishFace('angry_red'))
smach.Concurrence.add('DAMMED', Say.Say("Dammed! It is not here too!"))
smach.StateMachine.add('CON_1', sm_con_1,
transitions={'success':'SOFA_F', 'fail':'CON_1'})
smach.StateMachine.add('SOFA_F', GoToLocation.GoToLocation('pessoa_longe'),
transitions={'success':'SURPRISE_FACE','fail':'Done'})
smach.StateMachine.add('SURPRISE_FACE', PublishFace.PublishFace('surprise_blured'),
transitions={'success':'HAPPY_FACE','fail':'Done'})
smach.StateMachine.add('HAPPY_FACE', PublishFace.PublishFace('happy'),
transitions={'success':'CON_2','fail':'Done'})
sm_con_2 = smach.Concurrence(outcomes=['success', 'fail'],
default_outcome = 'fail',
outcome_map = {'success' :
{'HI' : 'spoke',
'HELLO_GESTURE' : 'success'}})
with sm_con_2:
smach.Concurrence.add('HELLO_GESTURE', GestureAction.GestureAction('long_hello'))
smach.Concurrence.add('HI', Say.Say("Hi!"))
smach.StateMachine.add('CON_2', sm_con_2,
transitions={'success':'SOFA_C', 'fail':'CON_2'})
smach.StateMachine.add('SOFA_C', GoToLocation.GoToLocation('pessoa_perto'),
transitions={'success':'QUESTION','fail':'Done'})
smach.StateMachine.add('QUESTION', Say.Say("Could you please help me find my bottle?"),
transitions={'spoke' : 'YES', 'mute' : 'Done'})
smach.StateMachine.add('YES', Recognizer.Recognizer(spec = ['Yes'], time_out = 100),
transitions={'Yes' : 'FOLLOW', 'fail' : 'YES'})
smach.StateMachine.add('FOLLOW', Say.Say("Please follow me"),
transitions={'spoke' : 'EXIT', 'mute' : 'Done'})
smach.StateMachine.add('EXIT', GoToLocation.GoToLocation('saida'),
transitions={'success':'THANKS','fail':'Done'})
smach.StateMachine.add('THANKS', Say.Say("Thanks for your cooperation"),
transitions={'spoke' : 'END', 'mute' : 'Done'})
smach.StateMachine.add('END', Say.Say("Lets do it again!"),
transitions={'spoke' : 'BACK_HOME', 'mute' : 'Done'})
smach.StateMachine.add('BACK_HOME', GoToLocation.GoToLocation('jardim'),
transitions={'success':'Done','fail':'Done'})
sis = smach_ros.IntrospectionServer('Judith_StateMachineServer', sm, '/SM_JUDITH')
sis.start()
outcome = sm.execute()
rospy.spin()
sis.stop()
if __name__ == '__main__':
rospy.init_node('test_02_sm')
setup_sm()
|
anthonydillon/horizon
|
refs/heads/master
|
doc/source/conf.py
|
17
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Horizon documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 27 11:38:59 2011.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import django
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'openstack_dashboard.settings')
import horizon.version
django.setup()
def write_autodoc_index():
def find_autodoc_modules(module_name, sourcedir):
"""returns a list of modules in the SOURCE directory."""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print("SEARCHING %s" % sourcedir)
for root, dirs, files in os.walk("."):
for filename in files:
if filename == 'tests.py':
continue
if filename.endswith(".py"):
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
# print result
modlist.append(result)
return modlist
RSTDIR = os.path.abspath(os.path.join(BASE_DIR, "sourcecode"))
SRCS = [('horizon', ROOT),
('openstack_dashboard', ROOT)]
EXCLUDED_MODULES = ('horizon.test',
'openstack_dashboard.enabled',
'openstack_dashboard.test',
'openstack_dashboard.openstack.common',
)
CURRENT_SOURCES = {}
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
CURRENT_SOURCES[RSTDIR] = ['autoindex.rst']
INDEXOUT = open(os.path.join(RSTDIR, "autoindex.rst"), "w")
INDEXOUT.write("""
=================
Source Code Index
=================
.. contents::
:depth: 1
:local:
""")
for modulename, path in SRCS:
sys.stdout.write("Generating source documentation for %s\n" %
modulename)
INDEXOUT.write("\n%s\n" % modulename.capitalize())
INDEXOUT.write("%s\n" % ("=" * len(modulename),))
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
MOD_DIR = os.path.join(RSTDIR, modulename)
CURRENT_SOURCES[MOD_DIR] = []
if not(os.path.exists(MOD_DIR)):
os.mkdir(MOD_DIR)
for module in find_autodoc_modules(modulename, path):
if any([module.startswith(exclude) for exclude
in EXCLUDED_MODULES]):
print("Excluded module %s." % module)
continue
mod_path = os.path.join(path, *module.split("."))
generated_file = os.path.join(MOD_DIR, "%s.rst" % module)
INDEXOUT.write(" %s/%s\n" % (modulename, module))
# Find the __init__.py module if this is a directory
if os.path.isdir(mod_path):
source_file = ".".join((os.path.join(mod_path, "__init__"),
"py",))
else:
source_file = ".".join((os.path.join(mod_path), "py"))
CURRENT_SOURCES[MOD_DIR].append("%s.rst" % module)
# Only generate a new file if the source has changed or we don't
# have a doc file to begin with.
if not os.access(generated_file, os.F_OK) or (
os.stat(generated_file).st_mtime <
os.stat(source_file).st_mtime):
print("Module %s updated, generating new documentation."
% module)
FILEOUT = open(generated_file, "w")
header = "The :mod:`%s` Module" % module
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write("%s\n" % header)
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.write(" :noindex:\n")
FILEOUT.close()
INDEXOUT.close()
# Delete auto-generated .rst files for sources which no longer exist
for directory, subdirs, files in list(os.walk(RSTDIR)):
for old_file in files:
if old_file not in CURRENT_SOURCES.get(directory, []):
print("Removing outdated file for %s" % old_file)
os.remove(os.path.join(directory, old_file))
write_autodoc_index()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'oslosphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Horizon'
copyright = u'2012, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = horizon.version.version_info.version_string()
# The full version, including alpha/beta/rc tags.
release = horizon.version.version_info.release_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['horizon.', 'openstack_dashboard.']
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": "false"
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Horizondoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Horizon.tex', u'Horizon Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'horizon', u'Horizon Documentation',
[u'OpenStack'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Horizon', u'Horizon Documentation', u'OpenStack',
'Horizon', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Horizon'
epub_author = u'OpenStack'
epub_publisher = u'OpenStack'
epub_copyright = u'2012, OpenStack'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
|
cyrustabatab/mptcp
|
refs/heads/master
|
examples/wireless/mixed-wireless.py
|
48
|
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# */
#
# This ns-3 example demonstrates the use of helper functions to ease
# the construction of simulation scenarios.
#
# The simulation topology consists of a mixed wired and wireless
# scenario in which a hierarchical mobility model is used.
#
# The simulation layout consists of N backbone routers interconnected
# by an ad hoc wifi network.
# Each backbone router also has a local 802.11 network and is connected
# to a local LAN. An additional set of(K-1) nodes are connected to
# this backbone. Finally, a local LAN is connected to each router
# on the backbone, with L-1 additional hosts.
#
# The nodes are populated with TCP/IP stacks, and OLSR unicast routing
# on the backbone. An example UDP transfer is shown. The simulator
# be configured to output tcpdumps or traces from different nodes.
#
#
# +--------------------------------------------------------+
# | |
# | 802.11 ad hoc, ns-2 mobility |
# | |
# +--------------------------------------------------------+
# | o o o(N backbone routers) |
# +--------+ +--------+
# wired LAN | mobile | wired LAN | mobile |
# -----------| router | -----------| router |
# --------- ---------
# | |
# +----------------+ +----------------+
# | 802.11 | | 802.11 |
# | net | | net |
# | K-1 hosts | | K-1 hosts |
# +----------------+ +----------------+
#
import ns.applications
import ns.core
import ns.csma
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
# #
# # This function will be used below as a trace sink
# #
# static void
# CourseChangeCallback(std.string path, Ptr<const MobilityModel> model)
# {
# Vector position = model.GetPosition();
# std.cout << "CourseChange " << path << " x=" << position.x << ", y=" << position.y << ", z=" << position.z << std.endl;
# }
def main(argv):
#
# First, we declare and initialize a few local variables that control some
# simulation parameters.
#
backboneNodes = 10
infraNodes = 5
lanNodes = 5
stopTime = 10
#
# Simulation defaults are typically set next, before command line
# arguments are parsed.
#
ns.core.Config.SetDefault("ns3::OnOffApplication::PacketSize", ns.core.StringValue("210"))
ns.core.Config.SetDefault("ns3::OnOffApplication::DataRate", ns.core.StringValue("448kb/s"))
#
# For convenience, we add the local variables to the command line argument
# system so that they can be overridden with flags such as
# "--backboneNodes=20"
#
cmd = ns.core.CommandLine()
#cmd.AddValue("backboneNodes", "number of backbone nodes", backboneNodes)
#cmd.AddValue("infraNodes", "number of leaf nodes", infraNodes)
#cmd.AddValue("lanNodes", "number of LAN nodes", lanNodes)
#cmd.AddValue("stopTime", "simulation stop time(seconds)", stopTime)
#
# The system global variables and the local values added to the argument
# system can be overridden by command line arguments by using this call.
#
cmd.Parse(argv)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Construct the backbone #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
#
# Create a container to manage the nodes of the adhoc(backbone) network.
# Later we'll create the rest of the nodes we'll need.
#
backbone = ns.network.NodeContainer()
backbone.Create(backboneNodes)
#
# Create the backbone wifi net devices and install them into the nodes in
# our container
#
wifi = ns.wifi.WifiHelper()
mac = ns.wifi.NqosWifiMacHelper.Default()
mac.SetType("ns3::AdhocWifiMac")
wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager",
"DataMode", ns.core.StringValue("OfdmRate54Mbps"))
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
backboneDevices = wifi.Install(wifiPhy, mac, backbone)
#
# Add the IPv4 protocol stack to the nodes in our container
#
print "Enabling OLSR routing on all backbone nodes"
internet = ns.internet.InternetStackHelper()
olsr = ns.olsr.OlsrHelper()
internet.SetRoutingHelper(olsr); # has effect on the next Install ()
internet.Install(backbone);
# re-initialize for non-olsr routing.
internet.Reset()
#
# Assign IPv4 addresses to the device drivers(actually to the associated
# IPv4 interfaces) we just created.
#
ipAddrs = ns.internet.Ipv4AddressHelper()
ipAddrs.SetBase(ns.network.Ipv4Address("192.168.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
ipAddrs.Assign(backboneDevices)
#
# The ad-hoc network nodes need a mobility model so we aggregate one to
# each of the nodes we just finished building.
#
mobility = ns.mobility.MobilityHelper()
positionAlloc = ns.mobility.ListPositionAllocator()
x = 0.0
for i in range(backboneNodes):
positionAlloc.Add(ns.core.Vector(x, 0.0, 0.0))
x += 5.0
mobility.SetPositionAllocator(positionAlloc)
mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel",
"Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(0, 1000, 0, 1000)),
"Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=2000]"),
"Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.2]"))
mobility.Install(backbone)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Construct the LANs #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# Reset the address base-- all of the CSMA networks will be in
# the "172.16 address space
ipAddrs.SetBase(ns.network.Ipv4Address("172.16.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
for i in range(backboneNodes):
print "Configuring local area network for backbone node ", i
#
# Create a container to manage the nodes of the LAN. We need
# two containers here; one with all of the new nodes, and one
# with all of the nodes including new and existing nodes
#
newLanNodes = ns.network.NodeContainer()
newLanNodes.Create(lanNodes - 1)
# Now, create the container with all nodes on this link
lan = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), newLanNodes)
#
# Create the CSMA net devices and install them into the nodes in our
# collection.
#
csma = ns.csma.CsmaHelper()
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)))
lanDevices = csma.Install(lan)
#
# Add the IPv4 protocol stack to the new LAN nodes
#
internet.Install(newLanNodes)
#
# Assign IPv4 addresses to the device drivers(actually to the
# associated IPv4 interfaces) we just created.
#
ipAddrs.Assign(lanDevices)
#
# Assign a new network prefix for the next LAN, according to the
# network mask initialized above
#
ipAddrs.NewNetwork()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Construct the mobile networks #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# Reset the address base-- all of the 802.11 networks will be in
# the "10.0" address space
ipAddrs.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
for i in range(backboneNodes):
print "Configuring wireless network for backbone node ", i
#
# Create a container to manage the nodes of the LAN. We need
# two containers here; one with all of the new nodes, and one
# with all of the nodes including new and existing nodes
#
stas = ns.network.NodeContainer()
stas.Create(infraNodes - 1)
# Now, create the container with all nodes on this link
infra = ns.network.NodeContainer(ns.network.NodeContainer(backbone.Get(i)), stas)
#
# Create another ad hoc network and devices
#
ssid = ns.wifi.Ssid('wifi-infra' + str(i))
wifiInfra = ns.wifi.WifiHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
wifiInfra.SetRemoteStationManager('ns3::ArfWifiManager')
macInfra = ns.wifi.NqosWifiMacHelper.Default();
macInfra.SetType("ns3::StaWifiMac",
"Ssid", ns.wifi.SsidValue(ssid),
"ActiveProbing", ns.core.BooleanValue(False))
# setup stas
staDevices = wifiInfra.Install(wifiPhy, macInfra, stas)
# setup ap.
macInfra.SetType("ns3::ApWifiMac",
"Ssid", ns.wifi.SsidValue(ssid),
"BeaconGeneration", ns.core.BooleanValue(True),
"BeaconInterval", ns.core.TimeValue(ns.core.Seconds(2.5)))
apDevices = wifiInfra.Install(wifiPhy, macInfra, backbone.Get(i))
# Collect all of these new devices
infraDevices = ns.network.NetDeviceContainer(apDevices, staDevices)
# Add the IPv4 protocol stack to the nodes in our container
#
internet.Install(stas)
#
# Assign IPv4 addresses to the device drivers(actually to the associated
# IPv4 interfaces) we just created.
#
ipAddrs.Assign(infraDevices)
#
# Assign a new network prefix for each mobile network, according to
# the network mask initialized above
#
ipAddrs.NewNetwork()
#
# The new wireless nodes need a mobility model so we aggregate one
# to each of the nodes we just finished building.
#
subnetAlloc = ns.mobility.ListPositionAllocator()
for j in range(infra.GetN()):
subnetAlloc.Add(ns.core.Vector(0.0, j, 0.0))
mobility.PushReferenceMobilityModel(backbone.Get(i))
mobility.SetPositionAllocator(subnetAlloc)
mobility.SetMobilityModel("ns3::RandomDirection2dMobilityModel",
"Bounds", ns.mobility.RectangleValue(ns.mobility.Rectangle(-25, 25, -25, 25)),
"Speed", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=30]"),
"Pause", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0.4]"))
mobility.Install(infra)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Application configuration #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# Create the OnOff application to send UDP datagrams of size
# 210 bytes at a rate of 448 Kb/s, between two nodes
print "Create Applications."
port = 9 # Discard port(RFC 863)
# Let's make sure that the user does not define too few LAN nodes
# to make this example work. We need lanNodes >= 5
assert(lanNodes >= 5)
appSource = ns.network.NodeList.GetNode(11)
appSink = ns.network.NodeList.GetNode(13)
remoteAddr = ns.network.Ipv4Address("172.16.0.5")
onoff = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(remoteAddr, port)))
onoff.SetConstantRate (ns.network.DataRate ("10kb/s"))
apps = onoff.Install(ns.network.NodeContainer(appSource))
apps.Start(ns.core.Seconds(3.0))
apps.Stop(ns.core.Seconds(20.0))
# Create a packet sink to receive these packets
sink = ns.applications.PacketSinkHelper("ns3::UdpSocketFactory",
ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), port))
apps = sink.Install(ns.network.NodeContainer(appSink))
apps.Start(ns.core.Seconds(3.0))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
# #
# Tracing configuration #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # /
print "Configure Tracing."
#
# Let's set up some ns-2-like ascii traces, using another helper class
#
#std.ofstream ascii
#ascii = ns.core.AsciiTraceHelper();
#stream = ascii.CreateFileStream("mixed-wireless.tr");
#wifiPhy.EnableAsciiAll(stream);
#csma.EnableAsciiAll(stream);
print "(tracing not done for Python)"
# Look at nodes 11, 13 only
# WifiHelper.EnableAscii(ascii, 11, 0);
# WifiHelper.EnableAscii(ascii, 13, 0);
# Let's do a pcap trace on the backbone devices
wifiPhy.EnablePcap("mixed-wireless", backboneDevices)
# Let's additionally trace the application Sink, ifIndex 0
csma = ns.csma.CsmaHelper()
csma.EnablePcapAll("mixed-wireless", False)
# #ifdef ENABLE_FOR_TRACING_EXAMPLE
# Config.Connect("/NodeList/*/$MobilityModel/CourseChange",
# MakeCallback(&CourseChangeCallback))
# #endif
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Run simulation #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
print "Run Simulation."
ns.core.Simulator.Stop(ns.core.Seconds(stopTime))
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
|
edosedgar/xs-pkg
|
refs/heads/master
|
algoCourse/hw5/solution_opt.py
|
1
|
def calc_wrapping(cost, N):
# To convert array of positions of new lines to
# number of lines
def get_num_of_lines(p, N):
if (p[N] == 1):
return 1
else:
return get_num_of_lines(p, p[N] - 1) + 1
# Generate array of position for each new line
# and buffer array for storing square num of spaces left till end on line
c = list([2**32 for x in range(N + 1)])
p = list([0 for x in range(N + 1)])
c[0] = 0
for j in range(1, N + 1):
c[j] = 2**32
for i in range(1, j + 1):
if c[i - 1] + cost[i][j] < c[j]:
c[j], p[j] = c[i - 1] + cost[i][j], i
return c[N], get_num_of_lines(p, N)
# Prepare
input_string = open("input.txt", "r")
answer_file = open("output.txt", "w")
L = int(input_string.readline())
words = input_string.readline().split()
N = len(words)
# Generate cost array
cost = list([list([2**32 for x in range(N + 1)]) for y in range(N + 1)])
extra = list([list([2**32 for x in range(N + 1)]) for y in range(N + 1)])
for i in range(1, N + 1):
extra[i][i] = L - len(words[i - 1])
for j in range(i + 1, N):
extra[i][j] = extra[i][j - 1] - len(words[j - 1]) - 1
for i in range(1, N + 1):
for j in range(i, N + 1):
if extra[i][j] < 0:
cost[i][j] = 2*32
elif j == N and extra[i][j] >= 0:
cost[i][j] = 0
else:
cost[i][j] = extra[i][j] ** 2
# Do all work
min_squares, num_of_line = calc_wrapping(cost, N)
# Print results
answer_file.write(str(min_squares) + '\n')
answer_file.write(str(num_of_line))
# Go away
answer_file.close()
input_string.close()
|
jmehnle/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_credential.py
|
31
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_credential
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower credential.
description:
- Create, update, or destroy Ansible Tower credentials. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the credential.
required: True
description:
description:
- The description to use for the credential.
user:
description:
- User that should own this credential.
required: False
default: null
team:
description:
- Team that should own this credential.
required: False
default: null
project:
description:
- Project that should for this credential.
required: False
default: null
organization:
description:
- Organization that should own the credential.
required: False
default: null
kind:
description:
- Type of credential being added.
required: True
choices: ["ssh", "net", "scm", "aws", "rax", "vmware", "satellite6", "cloudforms", "gce", "azure", "azure_rm", "openstack"]
host:
description:
- Host for this credential.
required: False
default: null
username:
description:
- Username for this credential. access_key for AWS.
required: False
default: null
password:
description:
- Password for this credential. Use ASK for prompting. secret_key for AWS. api_key for RAX.
required: False
default: null
ssh_key_data:
description:
- Path to SSH private key.
required: False
default: null
ssh_key_unlock:
description:
- Unlock password for ssh_key. Use ASK for prompting.
authorize:
description:
- Should use authroize for net type.
required: False
default: False
authorize_password:
description:
- Password for net credentials that require authroize.
required: False
default: null
client:
description:
- Client or application ID for azure_rm type.
required: False
default: null
secret:
description:
- Secret token for azure_rm type.
required: False
default: null
subscription:
description:
- Subscription ID for azure_rm type.
required: False
default: null
tenant:
description:
- Tenant ID for azure_rm type.
required: False
default: null
domain:
description:
- Domain for openstack type.
required: False
default: null
become_method:
description:
- Become method to Use for privledge escalation.
required: False
choices: ["None", "sudo", "su", "pbrun", "pfexec", "pmrun"]
default: "None"
become_username:
description:
- Become username. Use ASK for prompting.
required: False
default: null
become_password:
description:
- Become password. Use ASK for prompting.
required: False
default: null
vault_password:
description:
- Valut password. Use ASK for prompting.
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.2"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add tower credential
tower_credential:
name: Team Name
description: Team Description
organization: test-org
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import os
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
user=dict(),
team=dict(),
kind=dict(required=True,
choices=["ssh", "net", "scm", "aws", "rax", "vmware", "satellite6",
"cloudforms", "gce", "azure", "azure_rm", "openstack"]),
host=dict(),
username=dict(),
password=dict(no_log=True),
ssh_key_data=dict(no_log=True),
ssh_key_unlock=dict(no_log=True),
authorize=dict(type='bool', default=False),
authorize_password=dict(no_log=True),
client=dict(),
secret=dict(),
tenant=dict(),
subscription=dict(),
domain=dict(),
become_method=dict(),
become_username=dict(),
become_password=dict(no_log=True),
vault_password=dict(no_log=True),
description=dict(),
organization=dict(required=True),
project=dict(),
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
state=dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
organization = module.params.get('organization')
state = module.params.get('state')
json_output = {'credential': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
credential = tower_cli.get_resource('credential')
try:
params = module.params.copy()
params['create_on_missing'] = True
if organization:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
params['organization'] = org['id']
if params['ssh_key_data']:
filename = params['ssh_key_data']
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
module.fail_json(msg='file not found: %s' % filename)
if os.path.isdir(filename):
module.fail_json(msg='attempted to read contents of directory: %s' % filename)
with open(filename, 'rb') as f:
params['ssh_key_data'] = f.read()
if state == 'present':
result = credential.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = credential.delete(**params)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update credential, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update credential: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
weka-io/boto
|
refs/heads/develop
|
tests/integration/s3/mock_storage_service.py
|
108
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Provides basic mocks of core storage service classes, for unit testing:
ACL, Key, Bucket, Connection, and StorageUri. We implement a subset of
the interfaces defined in the real boto classes, but don't handle most
of the optional params (which we indicate with the constant "NOT_IMPL").
"""
import copy
import boto
import base64
import re
from hashlib import md5
from boto.utils import compute_md5
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
from boto.s3.prefix import Prefix
from boto.compat import six
NOT_IMPL = None
class MockAcl(object):
def __init__(self, parent=NOT_IMPL):
pass
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
pass
def to_xml(self):
return '<mock_ACL_XML/>'
class MockKey(object):
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.data = None
self.etag = None
self.size = None
self.closed = True
self.content_encoding = None
self.content_language = None
self.content_type = None
self.last_modified = 'Wed, 06 Oct 2010 05:11:54 GMT'
self.BufferSize = 8192
def __repr__(self):
if self.bucket:
return '<MockKey: %s,%s>' % (self.bucket.name, self.name)
else:
return '<MockKey: %s>' % self.name
def get_contents_as_string(self, headers=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL,
torrent=NOT_IMPL,
version_id=NOT_IMPL):
return self.data
def get_contents_to_file(self, fp, headers=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL,
torrent=NOT_IMPL,
version_id=NOT_IMPL,
res_download_handler=NOT_IMPL):
fp.write(self.data)
def get_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL, num_cb=NOT_IMPL,
torrent=NOT_IMPL, version_id=NOT_IMPL,
override_num_retries=NOT_IMPL):
fp.write(self.data)
def _handle_headers(self, headers):
if not headers:
return
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name('Content-Encoding',
headers)
if find_matching_headers('Content-Type', headers):
self.content_type = merge_headers_by_name('Content-Type', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name('Content-Language',
headers)
# Simplistic partial implementation for headers: Just supports range GETs
# of flavor 'Range: bytes=xyz-'.
def open_read(self, headers=None, query_args=NOT_IMPL,
override_num_retries=NOT_IMPL):
if self.closed:
self.read_pos = 0
self.closed = False
if headers and 'Range' in headers:
match = re.match('bytes=([0-9]+)-$', headers['Range'])
if match:
self.read_pos = int(match.group(1))
def close(self, fast=NOT_IMPL):
self.closed = True
def read(self, size=0):
self.open_read()
if size == 0:
data = self.data[self.read_pos:]
self.read_pos = self.size
else:
data = self.data[self.read_pos:self.read_pos+size]
self.read_pos += size
if not data:
self.close()
return data
def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL,
policy=NOT_IMPL, md5=NOT_IMPL,
res_upload_handler=NOT_IMPL):
self.data = fp.read()
self.set_etag()
self.size = len(self.data)
self._handle_headers(headers)
def set_contents_from_stream(self, fp, headers=None, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
reduced_redundancy=NOT_IMPL, query_args=NOT_IMPL,
size=NOT_IMPL):
self.data = ''
chunk = fp.read(self.BufferSize)
while chunk:
self.data += chunk
chunk = fp.read(self.BufferSize)
self.set_etag()
self.size = len(self.data)
self._handle_headers(headers)
def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, reduced_redundancy=NOT_IMPL):
self.data = copy.copy(s)
self.set_etag()
self.size = len(s)
self._handle_headers(headers)
def set_contents_from_filename(self, filename, headers=None,
replace=NOT_IMPL, cb=NOT_IMPL,
num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, res_upload_handler=NOT_IMPL):
fp = open(filename, 'rb')
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler)
fp.close()
def copy(self, dst_bucket_name, dst_key, metadata=NOT_IMPL,
reduced_redundancy=NOT_IMPL, preserve_acl=NOT_IMPL):
dst_bucket = self.bucket.connection.get_bucket(dst_bucket_name)
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata)
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def set_etag(self):
"""
Set etag attribute by generating hex MD5 checksum on current
contents of mock key.
"""
m = md5()
if not isinstance(self.data, bytes):
m.update(self.data.encode('utf-8'))
else:
m.update(self.data)
hex_md5 = m.hexdigest()
self.etag = hex_md5
def compute_md5(self, fp):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer
will be reset to the beginning of the file before the
method returns.
:rtype: tuple
:return: A tuple containing the hex digest version of the MD5 hash
as the first element and the base64 encoded version of the
plain digest as the second element.
"""
tup = compute_md5(fp)
# Returned values are MD5 hash, base64 encoded MD5 hash, and file size.
# The internal implementation of compute_md5() needs to return the
# file size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = tup[2]
return tup[0:2]
class MockBucket(object):
def __init__(self, connection=None, name=None, key_class=NOT_IMPL):
self.name = name
self.keys = {}
self.acls = {name: MockAcl()}
# default object ACLs are one per bucket and not supported for keys
self.def_acl = MockAcl()
self.subresources = {}
self.connection = connection
self.logging = False
def __repr__(self):
return 'MockBucket: %s' % self.name
def copy_key(self, new_key_name, src_bucket_name,
src_key_name, metadata=NOT_IMPL, src_version_id=NOT_IMPL,
storage_class=NOT_IMPL, preserve_acl=NOT_IMPL,
encrypt_key=NOT_IMPL, headers=NOT_IMPL, query_args=NOT_IMPL):
new_key = self.new_key(key_name=new_key_name)
src_key = self.connection.get_bucket(
src_bucket_name).get_key(src_key_name)
new_key.data = copy.copy(src_key.data)
new_key.size = len(new_key.data)
return new_key
def disable_logging(self):
self.logging = False
def enable_logging(self, target_bucket_prefix):
self.logging = True
def get_logging_config(self):
return {"Logging": {}}
def get_versioning_status(self, headers=NOT_IMPL):
return False
def get_acl(self, key_name='', headers=NOT_IMPL, version_id=NOT_IMPL):
if key_name:
# Return ACL for the key.
return self.acls[key_name]
else:
# Return ACL for the bucket.
return self.acls[self.name]
def get_def_acl(self, key_name=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
# Return default ACL for the bucket.
return self.def_acl
def get_subresource(self, subresource, key_name=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
if subresource in self.subresources:
return self.subresources[subresource]
else:
return '<Subresource/>'
def new_key(self, key_name=None):
mock_key = MockKey(self, key_name)
self.keys[key_name] = mock_key
self.acls[key_name] = MockAcl()
return mock_key
def delete_key(self, key_name, headers=NOT_IMPL,
version_id=NOT_IMPL, mfa_token=NOT_IMPL):
if key_name not in self.keys:
raise boto.exception.StorageResponseError(404, 'Not Found')
del self.keys[key_name]
def get_all_keys(self, headers=NOT_IMPL):
return six.itervalues(self.keys)
def get_key(self, key_name, headers=NOT_IMPL, version_id=NOT_IMPL):
# Emulate behavior of boto when get_key called with non-existent key.
if key_name not in self.keys:
return None
return self.keys[key_name]
def list(self, prefix='', delimiter='', marker=NOT_IMPL,
headers=NOT_IMPL):
prefix = prefix or '' # Turn None into '' for prefix match.
# Return list instead of using a generator so we don't get
# 'dictionary changed size during iteration' error when performing
# deletions while iterating (e.g., during test cleanup).
result = []
key_name_set = set()
for k in six.itervalues(self.keys):
if k.name.startswith(prefix):
k_name_past_prefix = k.name[len(prefix):]
if delimiter:
pos = k_name_past_prefix.find(delimiter)
else:
pos = -1
if (pos != -1):
key_or_prefix = Prefix(
bucket=self, name=k.name[:len(prefix)+pos+1])
else:
key_or_prefix = MockKey(bucket=self, name=k.name)
if key_or_prefix.name not in key_name_set:
key_name_set.add(key_or_prefix.name)
result.append(key_or_prefix)
return result
def set_acl(self, acl_or_str, key_name='', headers=NOT_IMPL,
version_id=NOT_IMPL):
# We only handle setting ACL XML here; if you pass a canned ACL
# the get_acl call will just return that string name.
if key_name:
# Set ACL for the key.
self.acls[key_name] = MockAcl(acl_or_str)
else:
# Set ACL for the bucket.
self.acls[self.name] = MockAcl(acl_or_str)
def set_def_acl(self, acl_or_str, key_name=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
# We only handle setting ACL XML here; if you pass a canned ACL
# the get_acl call will just return that string name.
# Set default ACL for the bucket.
self.def_acl = acl_or_str
def set_subresource(self, subresource, value, key_name=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.subresources[subresource] = value
class MockProvider(object):
def __init__(self, provider):
self.provider = provider
def get_provider_name(self):
return self.provider
class MockConnection(object):
def __init__(self, aws_access_key_id=NOT_IMPL,
aws_secret_access_key=NOT_IMPL, is_secure=NOT_IMPL,
port=NOT_IMPL, proxy=NOT_IMPL, proxy_port=NOT_IMPL,
proxy_user=NOT_IMPL, proxy_pass=NOT_IMPL,
host=NOT_IMPL, debug=NOT_IMPL,
https_connection_factory=NOT_IMPL,
calling_format=NOT_IMPL,
path=NOT_IMPL, provider='s3',
bucket_class=NOT_IMPL):
self.buckets = {}
self.provider = MockProvider(provider)
def create_bucket(self, bucket_name, headers=NOT_IMPL, location=NOT_IMPL,
policy=NOT_IMPL, storage_class=NOT_IMPL):
if bucket_name in self.buckets:
raise boto.exception.StorageCreateError(
409, 'BucketAlreadyOwnedByYou',
"<Message>Your previous request to create the named bucket "
"succeeded and you already own it.</Message>")
mock_bucket = MockBucket(name=bucket_name, connection=self)
self.buckets[bucket_name] = mock_bucket
return mock_bucket
def delete_bucket(self, bucket, headers=NOT_IMPL):
if bucket not in self.buckets:
raise boto.exception.StorageResponseError(
404, 'NoSuchBucket', '<Message>no such bucket</Message>')
del self.buckets[bucket]
def get_bucket(self, bucket_name, validate=NOT_IMPL, headers=NOT_IMPL):
if bucket_name not in self.buckets:
raise boto.exception.StorageResponseError(404, 'NoSuchBucket',
'Not Found')
return self.buckets[bucket_name]
def get_all_buckets(self, headers=NOT_IMPL):
return six.itervalues(self.buckets)
# We only mock a single provider/connection.
mock_connection = MockConnection()
class MockBucketStorageUri(object):
delim = '/'
def __init__(self, scheme, bucket_name=None, object_name=None,
debug=NOT_IMPL, suppress_consec_slashes=NOT_IMPL,
version_id=None, generation=None, is_latest=False):
self.scheme = scheme
self.bucket_name = bucket_name
self.object_name = object_name
self.suppress_consec_slashes = suppress_consec_slashes
if self.bucket_name and self.object_name:
self.uri = ('%s://%s/%s' % (self.scheme, self.bucket_name,
self.object_name))
elif self.bucket_name:
self.uri = ('%s://%s/' % (self.scheme, self.bucket_name))
else:
self.uri = ('%s://' % self.scheme)
self.version_id = version_id
self.generation = generation and int(generation)
self.is_version_specific = (bool(self.generation)
or bool(self.version_id))
self.is_latest = is_latest
if bucket_name and object_name:
self.versionless_uri = '%s://%s/%s' % (scheme, bucket_name,
object_name)
def __repr__(self):
"""Returns string representation of URI."""
return self.uri
def acl_class(self):
return MockAcl
def canned_acls(self):
return boto.provider.Provider('aws').canned_acls
def clone_replace_name(self, new_name):
return self.__class__(self.scheme, self.bucket_name, new_name)
def clone_replace_key(self, key):
return self.__class__(
key.provider.get_provider_name(),
bucket_name=key.bucket.name,
object_name=key.name,
suppress_consec_slashes=self.suppress_consec_slashes,
version_id=getattr(key, 'version_id', None),
generation=getattr(key, 'generation', None),
is_latest=getattr(key, 'is_latest', None))
def connect(self, access_key_id=NOT_IMPL, secret_access_key=NOT_IMPL):
return mock_connection
def create_bucket(self, headers=NOT_IMPL, location=NOT_IMPL,
policy=NOT_IMPL, storage_class=NOT_IMPL):
return self.connect().create_bucket(self.bucket_name)
def delete_bucket(self, headers=NOT_IMPL):
return self.connect().delete_bucket(self.bucket_name)
def get_versioning_config(self, headers=NOT_IMPL):
self.get_bucket().get_versioning_status(headers)
def has_version(self):
return (issubclass(type(self), MockBucketStorageUri)
and ((self.version_id is not None)
or (self.generation is not None)))
def delete_key(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL, mfa_token=NOT_IMPL):
self.get_bucket().delete_key(self.object_name)
def disable_logging(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
self.get_bucket().disable_logging()
def enable_logging(self, target_bucket, target_prefix, validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().enable_logging(target_bucket)
def get_logging_config(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_logging_config()
def equals(self, uri):
return self.uri == uri.uri
def get_acl(self, validate=NOT_IMPL, headers=NOT_IMPL, version_id=NOT_IMPL):
return self.get_bucket().get_acl(self.object_name)
def get_def_acl(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_def_acl(self.object_name)
def get_subresource(self, subresource, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_subresource(subresource, self.object_name)
def get_all_buckets(self, headers=NOT_IMPL):
return self.connect().get_all_buckets()
def get_all_keys(self, validate=NOT_IMPL, headers=NOT_IMPL):
return self.get_bucket().get_all_keys(self)
def list_bucket(self, prefix='', delimiter='', headers=NOT_IMPL,
all_versions=NOT_IMPL):
return self.get_bucket().list(prefix=prefix, delimiter=delimiter)
def get_bucket(self, validate=NOT_IMPL, headers=NOT_IMPL):
return self.connect().get_bucket(self.bucket_name)
def get_key(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_key(self.object_name)
def is_file_uri(self):
return False
def is_cloud_uri(self):
return True
def names_container(self):
return bool(not self.object_name)
def names_singleton(self):
return bool(self.object_name)
def names_directory(self):
return False
def names_provider(self):
return bool(not self.bucket_name)
def names_bucket(self):
return self.names_container()
def names_file(self):
return False
def names_object(self):
return not self.names_container()
def is_stream(self):
return False
def new_key(self, validate=NOT_IMPL, headers=NOT_IMPL):
bucket = self.get_bucket()
return bucket.new_key(self.object_name)
def set_acl(self, acl_or_str, key_name='', validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().set_acl(acl_or_str, key_name)
def set_def_acl(self, acl_or_str, key_name=NOT_IMPL, validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().set_def_acl(acl_or_str)
def set_subresource(self, subresource, value, validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().set_subresource(subresource, value, self.object_name)
def copy_key(self, src_bucket_name, src_key_name, metadata=NOT_IMPL,
src_version_id=NOT_IMPL, storage_class=NOT_IMPL,
preserve_acl=NOT_IMPL, encrypt_key=NOT_IMPL, headers=NOT_IMPL,
query_args=NOT_IMPL, src_generation=NOT_IMPL):
dst_bucket = self.get_bucket()
return dst_bucket.copy_key(new_key_name=self.object_name,
src_bucket_name=src_bucket_name,
src_key_name=src_key_name)
def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, reduced_redundancy=NOT_IMPL):
key = self.new_key()
key.set_contents_from_string(s)
def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, size=NOT_IMPL, rewind=NOT_IMPL,
res_upload_handler=NOT_IMPL):
key = self.new_key()
return key.set_contents_from_file(fp, headers=headers)
def set_contents_from_stream(self, fp, headers=NOT_IMPL, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
reduced_redundancy=NOT_IMPL,
query_args=NOT_IMPL, size=NOT_IMPL):
dst_key.set_contents_from_stream(fp)
def get_contents_to_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL,
num_cb=NOT_IMPL, torrent=NOT_IMPL,
version_id=NOT_IMPL, res_download_handler=NOT_IMPL,
response_headers=NOT_IMPL):
key = self.get_key()
key.get_contents_to_file(fp)
def get_contents_to_stream(self, fp, headers=NOT_IMPL, cb=NOT_IMPL,
num_cb=NOT_IMPL, version_id=NOT_IMPL):
key = self.get_key()
return key.get_contents_to_file(fp)
|
sparkslabs/kamaelia
|
refs/heads/master
|
Sketches/RJL/Packages/Kamaelia/Community/RJL/Kamaelia/Util/__init__.py
|
129
|
# -*- coding: utf-8 -*-
# Needed to allow import
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""
This is a doc string, will it be of use?
"""
|
Zaneh-/bearded-tribble-back
|
refs/heads/master
|
taiga/hooks/github/event_hooks.py
|
1
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext_lazy as _
from taiga.projects.models import Project, IssueStatus, TaskStatus, UserStoryStatus
from taiga.projects.issues.models import Issue
from taiga.projects.tasks.models import Task
from taiga.projects.userstories.models import UserStory
from taiga.projects.history.services import take_snapshot
from taiga.projects.notifications.services import send_notifications
from taiga.hooks.event_hooks import BaseEventHook
from taiga.hooks.exceptions import ActionSyntaxException
from .services import get_github_user
import re
class PushEventHook(BaseEventHook):
def process_event(self):
if self.payload is None:
return
github_user = self.payload.get('sender', {}).get('id', None)
commits = self.payload.get("commits", [])
for commit in commits:
message = commit.get("message", None)
self._process_message(message, github_user)
def _process_message(self, message, github_user):
"""
The message we will be looking for seems like
TG-XX #yyyyyy
Where:
XX: is the ref for us, issue or task
yyyyyy: is the status slug we are setting
"""
if message is None:
return
p = re.compile("tg-(\d+) +#([-\w]+)")
m = p.search(message.lower())
if m:
ref = m.group(1)
status_slug = m.group(2)
self._change_status(ref, status_slug, github_user)
def _change_status(self, ref, status_slug, github_user):
if Issue.objects.filter(project=self.project, ref=ref).exists():
modelClass = Issue
statusClass = IssueStatus
elif Task.objects.filter(project=self.project, ref=ref).exists():
modelClass = Task
statusClass = TaskStatus
elif UserStory.objects.filter(project=self.project, ref=ref).exists():
modelClass = UserStory
statusClass = UserStoryStatus
else:
raise ActionSyntaxException(_("The referenced element doesn't exist"))
element = modelClass.objects.get(project=self.project, ref=ref)
try:
status = statusClass.objects.get(project=self.project, slug=status_slug)
except statusClass.DoesNotExist:
raise ActionSyntaxException(_("The status doesn't exist"))
element.status = status
element.save()
snapshot = take_snapshot(element,
comment="Status changed from GitHub commit",
user=get_github_user(github_user))
send_notifications(element, history=snapshot)
def replace_github_references(project_url, wiki_text):
template = "\g<1>[GitHub#\g<2>]({}/issues/\g<2>)\g<3>".format(project_url)
return re.sub(r"(\s|^)#(\d+)(\s|$)", template, wiki_text, 0, re.M)
class IssuesEventHook(BaseEventHook):
def process_event(self):
if self.payload.get('action', None) != "opened":
return
subject = self.payload.get('issue', {}).get('title', None)
description = self.payload.get('issue', {}).get('body', None)
github_url = self.payload.get('issue', {}).get('html_url', None)
github_user = self.payload.get('issue', {}).get('user', {}).get('id', None)
project_url = self.payload.get('repository', {}).get('html_url', None)
if not all([subject, github_url, project_url]):
raise ActionSyntaxException(_("Invalid issue information"))
issue = Issue.objects.create(
project=self.project,
subject=subject,
description=replace_github_references(project_url, description),
status=self.project.default_issue_status,
type=self.project.default_issue_type,
severity=self.project.default_severity,
priority=self.project.default_priority,
external_reference=['github', github_url],
owner=get_github_user(github_user)
)
take_snapshot(issue, user=get_github_user(github_user))
snapshot = take_snapshot(issue, comment="Created from GitHub", user=get_github_user(github_user))
send_notifications(issue, history=snapshot)
class IssueCommentEventHook(BaseEventHook):
def process_event(self):
if self.payload.get('action', None) != "created":
raise ActionSyntaxException(_("Invalid issue comment information"))
github_url = self.payload.get('issue', {}).get('html_url', None)
comment_message = self.payload.get('comment', {}).get('body', None)
github_user = self.payload.get('sender', {}).get('id', None)
project_url = self.payload.get('repository', {}).get('html_url', None)
comment_message = replace_github_references(project_url, comment_message)
if not all([comment_message, github_url, project_url]):
raise ActionSyntaxException(_("Invalid issue comment information"))
issues = Issue.objects.filter(external_reference=["github", github_url])
tasks = Task.objects.filter(external_reference=["github", github_url])
uss = UserStory.objects.filter(external_reference=["github", github_url])
for item in list(issues) + list(tasks) + list(uss):
snapshot = take_snapshot(item,
comment="From GitHub:\n\n{}".format(comment_message),
user=get_github_user(github_user))
send_notifications(item, history=snapshot)
|
molguin92/paramics_traci
|
refs/heads/master
|
traci_client_test/runner.py
|
1
|
#!/usr/bin/env python
import os
import sys
import optparse
import subprocess
import random
import time
SIMTIME = 0x70
DEPARTEDVHCLST = 0x74
ARRIVEDVHCLST = 0x7a
# we need to import python modules from the $SUMO_HOME/tools directory
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
import traci
import random
PORT = 8245
affected = []
def run():
"""execute the TraCI control loop"""
traci.init(PORT)
print("Server version: " + str(traci.getVersion()))
print("Server timestep: " + str(traci.simulation.getDeltaT()))
print(str(traci.simulation.getNetBoundary()))
traci.simulation.subscribe([SIMTIME, DEPARTEDVHCLST, ARRIVEDVHCLST])
traci.vehicle.subscribe("x",[0, 1])
for i in range(0, 12100):
traci.simulationStep()
simsubs = traci.simulation.getSubscriptionResults()
vehsubs = traci.vehicle.getSubscriptionResults("x")
print("Current SIM time: " + str(simsubs[SIMTIME]))
dep = simsubs[DEPARTEDVHCLST]
arr = simsubs[ARRIVEDVHCLST]
carsinsim = vehsubs[0]
print 'dep', dep
print 'arr', arr
## for car in carsinsim:
## #traci.vehicle.setSpeed(car, 5.0)
## road = traci.vehicle.getRoadID(car)
## if road == "26:2" and car not in affected:
## #traci.vehicle.slowDown(car, 0.0, 10000)
## #traci.vehicle.setSpeed(car, 4.0)
## traci.vehicle.changeLane(car, 1, 40000)
## affected.append(car)
## elif road != "26:2" and car in affected:
## traci.vehicle.changeLane(car, 1, -1)
## affected.remove(car)
time.sleep(0.1)
traci.close()
if __name__ == '__main__':
run()
|
techtonik/numpy
|
refs/heads/master
|
numpy/matrixlib/__init__.py
|
140
|
"""Sub-package containing the matrix class and related functions.
"""
from __future__ import division, absolute_import, print_function
from .defmatrix import *
__all__ = defmatrix.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
dya2/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/test/generator_failure_tests.py
|
49
|
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Python 2.5+ test cases for failures thrown into generators.
"""
import sys
import traceback
from twisted.trial.unittest import TestCase
from twisted.python.failure import Failure
from twisted.internet import defer
# Re-implement getDivisionFailure here instead of using the one in
# test_failure.py in order to avoid creating a cyclic dependency.
def getDivisionFailure():
try:
1/0
except:
f = Failure()
return f
class TwoPointFiveFailureTests(TestCase):
def test_inlineCallbacksTracebacks(self):
"""
inlineCallbacks that re-raise tracebacks into their deferred
should not lose their tracebacsk.
"""
f = getDivisionFailure()
d = defer.Deferred()
try:
f.raiseException()
except:
d.errback()
failures = []
def collect_error(result):
failures.append(result)
def ic(d):
yield d
ic = defer.inlineCallbacks(ic)
ic(d).addErrback(collect_error)
newFailure, = failures
self.assertEquals(
traceback.extract_tb(newFailure.getTracebackObject())[-1][-1],
"1/0"
)
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
g.next()
self._throwIntoGenerator(f, g)
self.assertEquals(stuff[0][0], ZeroDivisionError)
self.assertTrue(isinstance(stuff[0][1], ZeroDivisionError))
self.assertEquals(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
g.next()
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(Failure())
else:
self.fail("No exception sent to generator")
g = generator()
g.next()
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(Failure().value, IndexError)
g = generator()
g.next()
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
g.next()
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(Failure().value, IndexError)
|
ptisserand/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/influxdb/influxdb_write.py
|
111
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: influxdb_write
short_description: Write data points into InfluxDB.
description:
- Write data points into InfluxDB.
version_added: 2.5
author: "René Moser (@resmo)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
data_points:
description:
- Data points as dict to write into the database.
required: true
database_name:
description:
- Name of the database.
required: true
extends_documentation_fragment: influxdb
'''
EXAMPLES = r'''
- name: Write points into database
influxdb_write:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
data_points:
- measurement: connections
tags:
host: server01
region: us-west
time: "{{ ansible_date_time.iso8601 }}"
fields:
value: 2000
- measurement: connections
tags:
host: server02
region: us-east
time: "{{ ansible_date_time.iso8601 }}"
fields:
value: 3000
'''
RETURN = r'''
# only defaults
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.influxdb import InfluxDb
class AnsibleInfluxDBWrite(InfluxDb):
def write_data_point(self, data_points):
client = self.connect_to_influxdb()
client.write_points(data_points)
try:
client.write_points(data_points)
except Exception as e:
self.module.fail_json(msg=to_native(e))
def main():
argument_spec = InfluxDb.influxdb_argument_spec()
argument_spec.update(
data_points=dict(required=True, type='list'),
database_name=dict(required=True, type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
)
influx = AnsibleInfluxDBWrite(module)
data_points = module.params.get('data_points')
influx.write_data_point(data_points)
module.exit_json(changed=True)
if __name__ == '__main__':
main()
|
tinfoil/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
|
124
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import BaseHTTPServer
import SocketServer
import logging
import json
import os
import sys
import urllib
from webkitpy.common.memoized import memoized
from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
from webkitpy.port import builders
_log = logging.getLogger(__name__)
class GardeningHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, httpd_port, config):
server_name = ''
self.tool = config['tool']
self.options = config['options']
BaseHTTPServer.HTTPServer.__init__(self, (server_name, httpd_port), GardeningHTTPRequestHandler)
def url(self, args=None):
# We can't use urllib.encode() here because that encodes spaces as plus signs and the buildbots don't decode those properly.
arg_string = ('?' + '&'.join("%s=%s" % (key, urllib.quote(value)) for (key, value) in args.items())) if args else ''
return 'http://localhost:8127/garden-o-matic.html' + arg_string
class GardeningHTTPRequestHandler(ReflectionHandler):
STATIC_FILE_NAMES = frozenset()
STATIC_FILE_EXTENSIONS = ('.js', '.css', '.html', '.gif', '.png', '.ico')
STATIC_FILE_DIRECTORY = os.path.join(
os.path.dirname(__file__),
'..',
'..',
'..',
'..',
'BuildSlaveSupport',
'build.webkit.org-config',
'public_html',
'TestFailures')
allow_cross_origin_requests = True
debug_output = ''
def ping(self):
self._serve_text('pong')
def _run_webkit_patch(self, command, input_string):
PIPE = self.server.tool.executive.PIPE
process = self.server.tool.executive.popen([self.server.tool.path()] + command, cwd=self.server.tool.scm().checkout_root, stdin=PIPE, stdout=PIPE, stderr=PIPE)
process.stdin.write(input_string)
output, error = process.communicate()
return (process.returncode, output, error)
def rebaselineall(self):
command = ['rebaseline-json']
if self.server.options.move_overwritten_baselines:
command.append('--move-overwritten-baselines')
if self.server.options.results_directory:
command.extend(['--results-directory', self.server.options.results_directory])
if not self.server.options.optimize:
command.append('--no-optimize')
if self.server.options.verbose:
command.append('--verbose')
json_input = self.read_entity_body()
_log.debug("calling %s, input='%s'", command, json_input)
return_code, output, error = self._run_webkit_patch(command, json_input)
print >> sys.stderr, error
if return_code:
_log.error("rebaseline-json failed: %d, output='%s'" % (return_code, output))
else:
_log.debug("rebaseline-json succeeded")
# FIXME: propagate error and/or log messages back to the UI.
self._serve_text('success')
def localresult(self):
path = self.query['path'][0]
filesystem = self.server.tool.filesystem
# Ensure that we're only serving files from inside the results directory.
if not filesystem.isabs(path) and self.server.options.results_directory:
fullpath = filesystem.abspath(filesystem.join(self.server.options.results_directory, path))
if fullpath.startswith(filesystem.abspath(self.server.options.results_directory)):
self._serve_file(fullpath, headers_only=(self.command == 'HEAD'))
return
self.send_response(403)
|
JioCloud/ceilometer
|
refs/heads/master
|
tests/compute/test_notifications.py
|
3
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# Copyright © 2013 eNovance
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for converters for producing compute counter messages from
notification events.
"""
from ceilometer.tests import base
from ceilometer.compute import notifications
from ceilometer import sample
INSTANCE_CREATE_END = {
u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2',
u'_context_is_admin': True,
u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e',
u'_context_quota_class': None,
u'_context_read_deleted': u'no',
u'_context_remote_address': u'10.0.2.15',
u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66',
u'_context_roles': [u'admin'],
u'_context_timestamp': u'2012-05-08T20:23:41.425105',
u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'event_type': u'compute.instance.create.end',
u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451',
u'payload': {u'created_at': u'2012-05-08 20:23:41',
u'deleted_at': u'',
u'disk_gb': 0,
u'display_name': u'testme',
u'fixed_ips': [{u'address': u'10.0.0.2',
u'floating_ips': [],
u'meta': {},
u'type': u'fixed',
u'version': 4}],
u'image_ref_url': u'http://10.0.2.15:9292/images/UUID',
u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1',
u'instance_type': u'm1.tiny',
u'instance_type_id': 2,
u'launched_at': u'2012-05-08 20:23:47.985999',
u'memory_mb': 512,
u'state': u'active',
u'state_description': u'',
u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3',
u'vcpus': 1,
u'root_gb': 0,
u'ephemeral_gb': 0,
u'host': u'compute-host-name',
u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4',
u'os_type': u'linux?',
u'architecture': u'x86',
u'image_ref': u'UUID',
u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5',
u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6',
},
u'priority': u'INFO',
u'publisher_id': u'compute.vagrant-precise',
u'timestamp': u'2012-05-08 20:23:48.028195',
}
INSTANCE_DELETE_START = {
u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2',
u'_context_is_admin': True,
u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e',
u'_context_quota_class': None,
u'_context_read_deleted': u'no',
u'_context_remote_address': u'10.0.2.15',
u'_context_request_id': u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39',
u'_context_roles': [u'admin'],
u'_context_timestamp': u'2012-05-08T20:24:14.547374',
u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'event_type': u'compute.instance.delete.start',
u'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4',
u'payload': {u'created_at': u'2012-05-08 20:23:41',
u'deleted_at': u'',
u'disk_gb': 0,
u'display_name': u'testme',
u'image_ref_url': u'http://10.0.2.15:9292/images/UUID',
u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1',
u'instance_type': u'm1.tiny',
u'instance_type_id': 2,
u'launched_at': u'2012-05-08 20:23:47',
u'memory_mb': 512,
u'state': u'active',
u'state_description': u'deleting',
u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3',
u'vcpus': 1,
u'root_gb': 0,
u'ephemeral_gb': 0,
u'host': u'compute-host-name',
u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4',
u'os_type': u'linux?',
u'architecture': u'x86',
u'image_ref': u'UUID',
u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5',
u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6',
},
u'priority': u'INFO',
u'publisher_id': u'compute.vagrant-precise',
u'timestamp': u'2012-05-08 20:24:14.824743',
}
INSTANCE_EXISTS = {
u'_context_auth_token': None,
u'_context_is_admin': True,
u'_context_project_id': None,
u'_context_quota_class': None,
u'_context_read_deleted': u'no',
u'_context_remote_address': None,
u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22',
u'_context_roles': [u'admin'],
u'_context_timestamp': u'2012-05-08T16:03:43.760204',
u'_context_user_id': None,
u'event_type': u'compute.instance.exists',
u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302',
u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00',
u'audit_period_ending': u'2012-05-08 16:00:00',
u'bandwidth': {},
u'created_at': u'2012-05-07 22:16:18',
u'deleted_at': u'',
u'disk_gb': 0,
u'display_name': u'testme',
u'image_ref_url': u'http://10.0.2.15:9292/images/UUID',
u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5',
u'instance_type': u'm1.tiny',
u'instance_type_id': 2,
u'launched_at': u'2012-05-07 23:01:27',
u'memory_mb': 512,
u'state': u'active',
u'state_description': u'',
u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3',
u'vcpus': 1,
u'root_gb': 0,
u'ephemeral_gb': 0,
u'host': u'compute-host-name',
u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4',
u'os_type': u'linux?',
u'architecture': u'x86',
u'image_ref': u'UUID',
u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5',
u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6',
},
u'priority': u'INFO',
u'publisher_id': u'compute.vagrant-precise',
u'timestamp': u'2012-05-08 16:03:44.122481',
}
INSTANCE_FINISH_RESIZE_END = {
u'_context_roles': [u'admin'],
u'_context_request_id': u'req-e3f71bb9-e9b9-418b-a9db-a5950c851b25',
u'_context_quota_class': None,
u'event_type': u'compute.instance.finish_resize.end',
u'_context_user_name': u'admin',
u'_context_project_name': u'admin',
u'timestamp': u'2013-01-04 15:10:17.436974',
u'_context_is_admin': True,
u'message_id': u'a2f7770d-b85d-4797-ab10-41407a44368e',
u'_context_auth_token': None,
u'_context_instance_lock_checked': False,
u'_context_project_id': u'cea4b25edb484e5392727181b7721d29',
u'_context_timestamp': u'2013-01-04T15:08:39.162612',
u'_context_read_deleted': u'no',
u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed',
u'_context_remote_address': u'10.147.132.184',
u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal',
u'payload': {u'state_description': u'',
u'availability_zone': None,
u'ephemeral_gb': 0,
u'instance_type_id': 5,
u'deleted_at': u'',
u'fixed_ips': [{u'floating_ips': [],
u'label': u'private',
u'version': 4,
u'meta': {},
u'address': u'10.0.0.3',
u'type': u'fixed'}],
u'memory_mb': 2048,
u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed',
u'reservation_id': u'r-u3fvim06',
u'hostname': u's1',
u'state': u'resized',
u'launched_at': u'2013-01-04T15:10:14.923939',
u'metadata': [],
u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a',
u'access_ip_v6': None,
u'disk_gb': 20,
u'access_ip_v4': None,
u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a',
u'host': u'ip-10-147-132-184.ec2.internal',
u'display_name': u's1',
u'image_ref_url': u'http://10.147.132.184:9292/images/'
'a130b9d9-e00e-436e-9782-836ccef06e8a',
u'root_gb': 20,
u'tenant_id': u'cea4b25edb484e5392727181b7721d29',
u'created_at': u'2013-01-04T11:21:48.000000',
u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b',
u'instance_type': u'm1.small',
u'vcpus': 1,
u'image_meta': {u'kernel_id':
u'571478e0-d5e7-4c2e-95a5-2bc79443c28a',
u'ramdisk_id':
u'5f23128e-5525-46d8-bc66-9c30cd87141a',
u'base_image_ref':
u'a130b9d9-e00e-436e-9782-836ccef06e8a'},
u'architecture': None,
u'os_type': None
},
u'priority': u'INFO'
}
INSTANCE_RESIZE_REVERT_END = {
u'_context_roles': [u'admin'],
u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a',
u'_context_quota_class': None,
u'event_type': u'compute.instance.resize.revert.end',
u'_context_user_name': u'admin',
u'_context_project_name': u'admin',
u'timestamp': u'2013-01-04 15:20:32.009532',
u'_context_is_admin': True,
u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a',
u'_context_auth_token': None,
u'_context_instance_lock_checked': False,
u'_context_project_id': u'cea4b25edb484e5392727181b7721d29',
u'_context_timestamp': u'2013-01-04T15:19:51.018218',
u'_context_read_deleted': u'no',
u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed',
u'_context_remote_address': u'10.147.132.184',
u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal',
u'payload': {u'state_description': u'resize_reverting',
u'availability_zone': None,
u'ephemeral_gb': 0,
u'instance_type_id': 2,
u'deleted_at': u'',
u'reservation_id': u'r-u3fvim06',
u'memory_mb': 512,
u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed',
u'hostname': u's1',
u'state': u'resized',
u'launched_at': u'2013-01-04T15:10:14.000000',
u'metadata': [],
u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a',
u'access_ip_v6': None,
u'disk_gb': 0,
u'access_ip_v4': None,
u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a',
u'host': u'ip-10-147-132-184.ec2.internal',
u'display_name': u's1',
u'image_ref_url': u'http://10.147.132.184:9292/images/'
'a130b9d9-e00e-436e-9782-836ccef06e8a',
u'root_gb': 0,
u'tenant_id': u'cea4b25edb484e5392727181b7721d29',
u'created_at': u'2013-01-04T11:21:48.000000',
u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b',
u'instance_type': u'm1.tiny',
u'vcpus': 1,
u'image_meta': {u'kernel_id':
u'571478e0-d5e7-4c2e-95a5-2bc79443c28a',
u'ramdisk_id':
u'5f23128e-5525-46d8-bc66-9c30cd87141a',
u'base_image_ref':
u'a130b9d9-e00e-436e-9782-836ccef06e8a'},
u'architecture': None,
u'os_type': None
},
u'priority': u'INFO'
}
INSTANCE_DELETE_SAMPLES = {
u'_context_roles': [u'admin'],
u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a',
u'_context_quota_class': None,
u'event_type': u'compute.instance.delete.samples',
u'_context_user_name': u'admin',
u'_context_project_name': u'admin',
u'timestamp': u'2013-01-04 15:20:32.009532',
u'_context_is_admin': True,
u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a',
u'_context_auth_token': None,
u'_context_instance_lock_checked': False,
u'_context_project_id': u'cea4b25edb484e5392727181b7721d29',
u'_context_timestamp': u'2013-01-04T15:19:51.018218',
u'_context_read_deleted': u'no',
u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed',
u'_context_remote_address': u'10.147.132.184',
u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal',
u'payload': {u'state_description': u'resize_reverting',
u'availability_zone': None,
u'ephemeral_gb': 0,
u'instance_type_id': 2,
u'deleted_at': u'',
u'reservation_id': u'r-u3fvim06',
u'memory_mb': 512,
u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed',
u'hostname': u's1',
u'state': u'resized',
u'launched_at': u'2013-01-04T15:10:14.000000',
u'metadata': [],
u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a',
u'access_ip_v6': None,
u'disk_gb': 0,
u'access_ip_v4': None,
u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a',
u'host': u'ip-10-147-132-184.ec2.internal',
u'display_name': u's1',
u'image_ref_url': u'http://10.147.132.184:9292/images/'
'a130b9d9-e00e-436e-9782-836ccef06e8a',
u'root_gb': 0,
u'tenant_id': u'cea4b25edb484e5392727181b7721d29',
u'created_at': u'2013-01-04T11:21:48.000000',
u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b',
u'instance_type': u'm1.tiny',
u'vcpus': 1,
u'image_meta': {u'kernel_id':
u'571478e0-d5e7-4c2e-95a5-2bc79443c28a',
u'ramdisk_id':
u'5f23128e-5525-46d8-bc66-9c30cd87141a',
u'base_image_ref':
u'a130b9d9-e00e-436e-9782-836ccef06e8a'},
u'architecture': None,
u'os_type': None,
u'samples': [{u'name': u'sample-name1',
u'type': u'sample-type1',
u'unit': u'sample-units1',
u'volume': 1},
{u'name': u'sample-name2',
u'type': u'sample-type2',
u'unit': u'sample-units2',
u'volume': 2},
],
},
u'priority': u'INFO'
}
INSTANCE_SCHEDULED = {
u'_context_roles': [u'admin'],
u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a',
u'_context_quota_class': None,
u'event_type': u'scheduler.run_instance.scheduled',
u'_context_user_name': u'admin',
u'_context_project_name': u'admin',
u'timestamp': u'2013-01-04 15:20:32.009532',
u'_context_is_admin': True,
u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a',
u'_context_auth_token': None,
u'_context_instance_lock_checked': False,
u'_context_project_id': u'cea4b25edb484e5392727181b7721d29',
u'_context_timestamp': u'2013-01-04T15:19:51.018218',
u'_context_read_deleted': u'no',
u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed',
u'_context_remote_address': u'10.147.132.184',
u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal',
u'payload': {
'instance_id': 'fake-uuid1-1',
'weighted_host': {
'host': 'host3',
'weight': 3.0,
},
'request_spec': {
'instance_properties': {
'root_gb': 512,
'ephemeral_gb': 0,
'launch_index': 0,
'memory_mb': 512,
'vcpus': 1,
'os_type': 'Linux',
'project_id': 1,
'system_metadata': {'system': 'metadata'}},
'instance_type': {'memory_mb': 512,
'vcpus': 1,
'root_gb': 512,
'ephemeral_gb': 0},
'instance_uuids': ['fake-uuid1-1'],
},
},
u'priority': u'INFO'
}
class TestNotifications(base.TestCase):
def test_process_notification(self):
info = list(notifications.Instance().process_notification(
INSTANCE_CREATE_END
))[0]
for name, actual, expected in [
('counter_name', info.name, 'instance'),
('counter_type', info.type, sample.TYPE_GAUGE),
('counter_volume', info.volume, 1),
('timestamp', info.timestamp,
INSTANCE_CREATE_END['timestamp']),
('resource_id', info.resource_id,
INSTANCE_CREATE_END['payload']['instance_id']),
('instance_type_id',
info.resource_metadata['instance_type_id'],
INSTANCE_CREATE_END['payload']['instance_type_id']),
('host', info.resource_metadata['host'],
INSTANCE_CREATE_END['publisher_id']),
]:
self.assertEqual(actual, expected, name)
@staticmethod
def _find_counter(counters, name):
return filter(lambda counter: counter.name == name, counters)[0]
def test_instance_create_instance(self):
ic = notifications.Instance()
counters = list(ic.process_notification(INSTANCE_CREATE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, 1)
def test_instance_create_flavor(self):
ic = notifications.InstanceFlavor()
counters = list(ic.process_notification(INSTANCE_CREATE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, 1)
def test_instance_create_memory(self):
ic = notifications.Memory()
counters = list(ic.process_notification(INSTANCE_CREATE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, INSTANCE_CREATE_END['payload']['memory_mb'])
def test_instance_create_vcpus(self):
ic = notifications.VCpus()
counters = list(ic.process_notification(INSTANCE_CREATE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, INSTANCE_CREATE_END['payload']['vcpus'])
def test_instance_create_root_disk_size(self):
ic = notifications.RootDiskSize()
counters = list(ic.process_notification(INSTANCE_CREATE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, INSTANCE_CREATE_END['payload']['root_gb'])
def test_instance_create_ephemeral_disk_size(self):
ic = notifications.EphemeralDiskSize()
counters = list(ic.process_notification(INSTANCE_CREATE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume,
INSTANCE_CREATE_END['payload']['ephemeral_gb'])
def test_instance_exists_instance(self):
ic = notifications.Instance()
counters = list(ic.process_notification(INSTANCE_EXISTS))
self.assertEqual(len(counters), 1)
def test_instance_exists_flavor(self):
ic = notifications.Instance()
counters = list(ic.process_notification(INSTANCE_EXISTS))
self.assertEqual(len(counters), 1)
def test_instance_delete_instance(self):
ic = notifications.Instance()
counters = list(ic.process_notification(INSTANCE_DELETE_START))
self.assertEqual(len(counters), 1)
def test_instance_delete_flavor(self):
ic = notifications.Instance()
counters = list(ic.process_notification(INSTANCE_DELETE_START))
self.assertEqual(len(counters), 1)
def test_instance_finish_resize_instance(self):
ic = notifications.Instance()
counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, 1)
def test_instance_finish_resize_flavor(self):
ic = notifications.InstanceFlavor()
counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, 1)
self.assertEqual(c.name, 'instance:m1.small')
def test_instance_finish_resize_memory(self):
ic = notifications.Memory()
counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume,
INSTANCE_FINISH_RESIZE_END['payload']['memory_mb'])
def test_instance_finish_resize_vcpus(self):
ic = notifications.VCpus()
counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume,
INSTANCE_FINISH_RESIZE_END['payload']['vcpus'])
def test_instance_resize_finish_instance(self):
ic = notifications.Instance()
counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, 1)
def test_instance_resize_finish_flavor(self):
ic = notifications.InstanceFlavor()
counters = list(ic.process_notification(INSTANCE_RESIZE_REVERT_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume, 1)
self.assertEqual(c.name, 'instance:m1.tiny')
def test_instance_resize_finish_memory(self):
ic = notifications.Memory()
counters = list(ic.process_notification(INSTANCE_RESIZE_REVERT_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume,
INSTANCE_RESIZE_REVERT_END['payload']['memory_mb'])
def test_instance_resize_finish_vcpus(self):
ic = notifications.VCpus()
counters = list(ic.process_notification(INSTANCE_RESIZE_REVERT_END))
self.assertEqual(len(counters), 1)
c = counters[0]
self.assertEqual(c.volume,
INSTANCE_RESIZE_REVERT_END['payload']['vcpus'])
def test_instance_delete_samples(self):
ic = notifications.InstanceDelete()
counters = list(ic.process_notification(INSTANCE_DELETE_SAMPLES))
self.assertEqual(len(counters), 2)
names = [c.name for c in counters]
self.assertEqual(names, ['sample-name1', 'sample-name2'])
def test_instance_scheduled(self):
ic = notifications.InstanceScheduled()
self.assertIn(INSTANCE_SCHEDULED['event_type'],
ic.event_types)
counters = list(ic.process_notification(INSTANCE_SCHEDULED))
self.assertEqual(len(counters), 1)
names = [c.name for c in counters]
self.assertEqual(names, ['instance.scheduled'])
rid = [c.resource_id for c in counters]
self.assertEqual(rid, ['fake-uuid1-1'])
|
Alex-Ian-Hamilton/sunpy
|
refs/heads/master
|
sunpy/net/helio/parser.py
|
1
|
# -*- coding: utf-8 -*-
# Author: Michael Malocha <mjm159@humboldt.edu>
# Last Edit: September 22nd, 2013
#
# This module was developed with funding from the GSOC 2013 summer of code
#
"""
This module is meant to parse the HELIO registry and return WSDL endpoints to
facilitate the interfacing between further modules and HELIO.
"""
from __future__ import absolute_import, print_function
import xml.etree.ElementTree as EL
from bs4 import BeautifulSoup
from contextlib import closing
from sunpy.net.helio import registry_links as RL
from sunpy.extern.six.moves import urllib
__author__ = 'Michael Malocha'
__version__ = 'September 22nd, 2013'
# Lifespan in seconds before a link times-out
LINK_TIMEOUT = 3
def webservice_parser(service='HEC'):
"""
Quickly parses important contents from HELIO registry.
Uses the link contained in registry_links in with 'service' appended
and scrapes the web-service links contained on that webpage.
Parameters
----------
service: str
Indicates which particular HELIO service is used. Defaults to HEC.
Returns
-------
links: list or NoneType
List of urls to registries containing WSDL endpoints.
Examples
--------
>>> from sunpy.net.helio import parser
>>> parser.webservice_parser()
['http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService',
'http://festung3.oats.inaf.it:8080/helio-hec/HelioService',
'http://festung1.oats.inaf.it:8080/helio-hec/HelioService',
'http://hec.helio-vo.eu/helio_hec/HelioService',
'http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioLongQueryService',
'http://festung3.oats.inaf.it:8080/helio-hec/HelioLongQueryService',
'http://festung1.oats.inaf.it:8080/helio-hec/HelioLongQueryService',
'http://hec.helio-vo.eu/helio_hec/HelioLongQueryService']
"""
link = RL.LINK + '/' + service.lower()
xml = link_test(link)
if xml is None:
return xml
root = EL.fromstring(xml)
links = []
#WARNING: getiterator is deprecated in Python 2.7+
#Fix for 3.x support
for interface in root.getiterator('interface'):
service_type = interface.attrib
key = list(service_type.keys())
if len(key) > 0:
value = service_type[key[0]]
if value == 'vr:WebService':
for url in interface.getiterator('accessURL'):
if url.text not in links:
links.append(url.text)
return links
def endpoint_parser(link):
"""
Takes a link to a list of endpoints and parses the WSDL links.
Feeding 1 result from webservice_parser() into endpoint_parser() at a time
will return a list of WSDL endpoints that are contained on the page from
that link that was passed in.
Parameters
----------
link: str
A url to a page containing links to WSDL files.
Returns
-------
endpoints: list or NoneType
A list containing all of the available WSDL endpoints from the passed
in url.
Examples
--------
>>> from sunpy.net.helio import parser
>>> parser.endpoint_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')
['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService?wsdl',
'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0?wsdl',
'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0b?wsdl',
'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService?wsdl',
'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0?wsdl',
'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_1?wsdl',
'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0b?wsdl',
'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']
"""
endpoint_page = link_test(link)
if endpoint_page is None:
return None
soup = BeautifulSoup(endpoint_page)
endpoints = []
for web_link in soup.find_all('a'):
endpoints.append(web_link.get('href'))
return endpoints
def taverna_parser(link):
"""
Takes a link to a list of endpoints and parses the taverna WSDL links.
Takes a url to a page containing a list of endpoints, then passes that url
to endpoint_parser(). Upon receiving the resulting list from the parser
taverna_parser() goes through the list and finds all the WSDL links for
the taverna web-service. It then returns a list containing the filtered
links.
Parameters
----------
link: str
A url to a page containing links to WSDL files.
Returns
-------
taverna_links: list or NoneType
A list containing WSDL links for a taverna web-service
Examples
--------
>>> from sunpy.net.helio import parser
>>> parser.taverna_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')
['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']
"""
endpoints = endpoint_parser(link)
taverna_links = []
if endpoints is None:
return None
for web_link in endpoints:
if 'Taverna' in web_link:
taverna_links.append(web_link)
if len(taverna_links) == 0:
return None
return taverna_links
def link_test(link):
"""
Just a quick function to test a link.
Quickly checks to see if the URL is a valid link; if it is it returns the
downloaded contents of that page.
Parameters
----------
link: str
A string containing a URL
Returns
-------
webpage: str or NoneType
String containing the webresults
Examples
--------
>>> from sunpy.net.helio import parser
>>> parser.link_test('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')
u'<html>\n<head>...</body>\n</html>\n'
>>> print(parser.link_test('http://rrnx.invalid_url5523.com'))
None
"""
try:
with closing(urllib.request.urlopen(link, timeout=LINK_TIMEOUT)) as fd:
return fd.read()
except (ValueError, urllib.error.URLError):
return None
def wsdl_retriever(service='HEC'):
"""
Retrieves a link to a taverna WSDL file
This is essentially the master method, from it all the other functions get
called and it essentially knits everything together. It gets a list of
service links via webservice_parser(), then filters the results via
taverna_parser(). Finally it tests all the returned taverna WSDL links
and returns the first live taverna endpoint.
Parameters
----------
service: str
Indicates which particular HELIO service is used. Defaults to HEC.
Returns
-------
wsdl: str
URL to a single live taverna endpoint
Examples
--------
>>> from sunpy.net.helio import parser
>>> parser.wsdl_retriever()
'http://msslkz.mssl.ucl.ac.uk:80/helio_hec/HelioTavernaService?wsdl'
Notes
-----
* Currently only support for HEC exists, but it was designed so that it
could be expanded at a later date
* There is a 3 second timeout lifespan on links, so there is potential for
this function to take a while to return. Timeout duration can be
controlled through the LINK_TIMEOUT value
"""
service_links = webservice_parser(service=service)
wsdl = None
wsdl_links = None
if service_links is None:
return None
for link in service_links:
wsdl_links = taverna_parser(link)
if wsdl_links is None:
return None
for end_point in wsdl_links:
if end_point is not None and link_test(end_point) is not None:
wsdl = end_point
break
return wsdl
|
alexthered/kienhoc-platform
|
refs/heads/master
|
common/djangoapps/course_modes/migrations/0005_auto__add_field_coursemode_expiration_datetime.py
|
114
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.expiration_datetime'
db.add_column('course_modes_coursemode', 'expiration_datetime',
self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.expiration_datetime'
db.delete_column('course_modes_coursemode', 'expiration_datetime')
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
|
hckiang/DBLite
|
refs/heads/master
|
boost_1_54_0/tools/build/v2/test/core_typecheck.py
|
45
|
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This tests the typechecking facilities.
import BoostBuild
t = BoostBuild.Tester(["-ffile.jam"], pass_toolset=0)
t.write("file.jam", """
module .typecheck
{
rule [path] ( x )
{
if ! [ MATCH "^(::)" : $(x) ]
{
ECHO "Error: $(x) is not a path" ;
return true ;
}
}
}
rule do ( [path] a )
{
}
do $(ARGUMENT) ;
actions dummy { }
dummy all ;
""")
t.run_build_system(["-sARGUMENT=::a/b/c"])
t.run_build_system(["-sARGUMENT=a/b/c"], status=1, stdout="""\
Error: a/b/c is not a path
file.jam:18: in module scope
*** argument error
* rule do ( [path] a )
* called with: ( a/b/c )
* true a
file.jam:16:see definition of rule 'do' being called
""")
t.cleanup()
|
mahak/nova
|
refs/heads/master
|
nova/scheduler/filters/compute_capabilities_filter.py
|
2
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
class ComputeCapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter hard-coded to work with InstanceType records."""
# Instance type and host capabilities do not change within a request
run_filter_once_per_request = True
RUN_ON_REBUILD = False
def _get_capabilities(self, host_state, scope):
cap = host_state
for index in range(0, len(scope)):
try:
if isinstance(cap, str):
try:
cap = jsonutils.loads(cap)
except ValueError as e:
LOG.debug("%(host_state)s fails. The capabilities "
"'%(cap)s' couldn't be loaded from JSON: "
"%(error)s",
{'host_state': host_state, 'cap': cap,
'error': e})
return None
if not isinstance(cap, dict):
if getattr(cap, scope[index], None) is None:
# If can't find, check stats dict
cap = cap.stats.get(scope[index], None)
else:
cap = getattr(cap, scope[index], None)
else:
cap = cap.get(scope[index], None)
except AttributeError as e:
LOG.debug("%(host_state)s fails. The capabilities couldn't "
"be retrieved: %(error)s.",
{'host_state': host_state, 'error': e})
return None
if cap is None:
LOG.debug("%(host_state)s fails. There are no capabilities "
"to retrieve.",
{'host_state': host_state})
return None
return cap
def _satisfies_extra_specs(self, host_state, flavor):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
if 'extra_specs' not in flavor:
return True
for key, req in flavor.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
# whether host_state contains the key as an attribute. If not,
# ignore it. If it contains, deal with it in the same way as
# 'capabilities:key'. This is for backward-compatible.
# If the key has a namespace, the scope's size will be bigger than
# 1, check that whether the namespace is 'capabilities'. If not,
# ignore it.
if len(scope) == 1:
stats = getattr(host_state, 'stats', {})
has_attr = hasattr(host_state, key) or key in stats
if not has_attr:
continue
else:
if scope[0] != "capabilities":
continue
else:
del scope[0]
cap = self._get_capabilities(host_state, scope)
if cap is None:
return False
if not extra_specs_ops.match(str(cap), req):
LOG.debug("%(host_state)s fails extra_spec requirements. "
"'%(req)s' does not match '%(cap)s'",
{'host_state': host_state, 'req': req,
'cap': cap})
return False
return True
def host_passes(self, host_state, spec_obj):
"""Return a list of hosts that can create flavor."""
if not self._satisfies_extra_specs(host_state, spec_obj.flavor):
LOG.debug(
"%(host_state)s fails flavor extra_specs requirements",
{'host_state': host_state})
return False
return True
|
Pafcholini/linux-3.10.y
|
refs/heads/linux-3.10.y
|
tools/perf/tests/attr.py
|
3174
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
bgris/ODL_bgris
|
refs/heads/master
|
lib/python3.5/site-packages/isort/__init__.py
|
9
|
"""__init__.py.
Defines the isort module to include the SortImports utility class as well as any defined settings.
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from . import settings
from .isort import SortImports
__version__ = "4.2.5"
|
googlefonts/nototools
|
refs/heads/main
|
setup.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# with open("README.rst", 'r') as readme_file:
# readme = readme_file.read()
readme = """Noto font tools are a set of scripts useful for release
engineering of Noto and similar fonts"""
setup(
name="notofonttools",
use_scm_version={"write_to": "nototools/_version.py"},
description="Noto font tools",
license="Apache",
long_description=readme,
python_requires=">=3.7",
author="Noto Authors",
author_email="noto-font@googlegroups.com",
url="https://github.com/googlefonts/nototools",
# more examples here http://docs.python.org/distutils/examples.html#pure-python-distribution-by-package
packages=find_packages() + ["third_party"],
include_package_data=True,
setup_requires=["setuptools_scm"],
install_requires=[
"fontTools",
# On Mac OS X these need to be installed with homebrew
# 'cairo',
# 'pango',
# 'pygtk',
# 'imagemagick'
],
extras_require={
# optional requirements for nototools.shape_diff module
"shapediff": ["booleanOperations", "defcon", "Pillow",],
},
package_data={"nototools": ["*.sh", "data/*",]},
# $ grep "def main(" nototools/* | cut -d: -f1
scripts=[
"nototools/autofix_for_release.py",
"nototools/add_vs_cmap.py",
"nototools/create_image.py",
"nototools/decompose_ttc.py",
"nototools/drop_hints.py",
"nototools/dump_otl.py",
"nototools/fix_khmer_and_lao_coverage.py",
"nototools/fix_noto_cjk_thin.py",
"nototools/generate_sample_text.py",
"nototools/generate_website_2_data.py",
"nototools/merge_noto.py",
"nototools/merge_fonts.py",
"nototools/noto_lint.py",
"nototools/scale.py",
"nototools/subset.py",
"nototools/subset_symbols.py",
"nototools/test_vertical_extents.py",
],
entry_points={
"console_scripts": [
"notodiff = nototools.notodiff:main",
"notocoverage = nototools.coverage:main",
]
},
)
|
hidekb/espressopp
|
refs/heads/master
|
src/tools/lattice.py
|
7
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: iso-8859-1 -*-
def createCubic(N, rho, perfect=True, RNG=None):
"""
Initializes particles on the sites of a simple cubic lattice.
By setting ``perfect=False``
the particle positions will be given random displacements
with a magnitude of one-tenth the lattice spacing.
"""
if RNG == None:
import random
cubes = []
for i in xrange(100):
cubes.append(i**3)
if(cubes.count(N) != 1):
print '\nWARNING: num_particles is not a perfect cube. Initial'
print ' configuration may be inhomogeneous.\n'
L = (N / rho)**(1.0/3.0)
a = int(N**(1.0/3.0))
if(a**3 < N):
a = a + 1
lattice_spacing = L / a
def rnd(magn_):
if RNG == None:
rand = random.random()
else :
rand = RNG()
return magn_ * (2.0 * rand - 1.0)
# magnitude of random displacements
magn = 0.0 if perfect else lattice_spacing / 10.0
ct = 0
x = []
y = []
z = []
for i in xrange(a):
for j in xrange(a):
for k in xrange(a):
if(ct < N):
x.append(0.5 * lattice_spacing + i * lattice_spacing + rnd(magn))
y.append(0.5 * lattice_spacing + j * lattice_spacing + rnd(magn))
z.append(0.5 * lattice_spacing + k * lattice_spacing + rnd(magn))
ct += 1
return x, y, z, L, L, L
# TODO implement checking for a wrong number of particles, lightly nonideal lattice etc.
def createDiamond(N, rho, perfect=True, RNG=None):
"""
Initializes particles on the sites of a diamond lattice.
"""
from espressopp import Real3D
#L = (N / 8.0 / rho)**(1.0/3.0)
L = (N / rho)**(1.0/3.0)
num_per_edge = int( (N/8.0)**(1.0/3.0) )
if(8.0*num_per_edge**3 < N):
num_per_edge = num_per_edge + 1
#print 'num_per_site= ', num_per_edge
a = L / num_per_edge
#print 'a= ', a
#print 'a1= ', (1.0 / rho)**(1.0/3.0)
pos = []
# in general structure is shifted relative to (0,0,0)
R0 = Real3D(0.125 * a, 0.125 * a, 0.125 * a)
R1 = Real3D(0.25 * a, 0.25 * a, 0.25 * a)
a11 = a * Real3D(1,0,0)
a22 = a * Real3D(0,1,0)
a33 = a * Real3D(0,0,1)
a1 = 0.5 * a * Real3D(0,1,1)
a2 = 0.5 * a * Real3D(1,0,1)
a3 = 0.5 * a * Real3D(1,1,0)
for i in xrange(num_per_edge):
for j in xrange(num_per_edge):
for k in xrange(num_per_edge):
Rijk = R0 + i*a11 + j*a22 + k*a33
pos.append(Rijk)
pos.append(Rijk+a1)
pos.append(Rijk+a2)
pos.append(Rijk+a3)
pos.append(Rijk+R1)
pos.append(Rijk+a1+R1)
pos.append(Rijk+a2+R1)
pos.append(Rijk+a3+R1)
'''
L1 = L-0.01
pos.append( Real3D(0.01, 0.01, 0.01) )
pos.append( Real3D(L1, 0.01, 0.01) )
pos.append( Real3D(0.01, L1, 0.01) )
pos.append( Real3D(0.01, 0.01, L1) )
pos.append( Real3D(0.01, L1, L1) )
pos.append( Real3D(L1, L1, 0.01) )
pos.append( Real3D(L1, 0.01, L1) )
pos.append( Real3D(L1, L1, L1) )
'''
return pos, L, L, L
|
queenp/wakatime
|
refs/heads/master
|
wakatime/packages/pygments_py2/pygments/lexers/c_cpp.py
|
72
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.c_cpp
~~~~~~~~~~~~~~~~~~~~~
Lexers for C/C++ languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, inherit, default, words
from pygments.util import get_bool_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['CLexer', 'CppLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(words(('auto', 'break', 'case', 'const', 'continue', 'default', 'do',
'else', 'enum', 'extern', 'for', 'goto', 'if', 'register',
'restricted', 'return', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'volatile', 'while'),
suffix=r'\b'), Keyword),
(r'(bool|int|long|float|short|double|char|unsigned|signed|void|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(words(('inline', '_inline', '__inline', 'naked', 'restrict',
'thread', 'typename'), suffix=r'\b'), Keyword.Reserved),
# Vector intrinsics
(r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
# Microsoft-isms
(words((
'asm', 'int8', 'based', 'except', 'int16', 'stdcall', 'cdecl',
'fastcall', 'int32', 'declspec', 'finally', 'int64', 'try',
'leave', 'wchar_t', 'w64', 'unaligned', 'raise', 'noop',
'identifier', 'forceinline', 'assume'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
(r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t',
'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
priority = 0.1
def analyse_text(text):
if re.search('^\s*#include [<"]', text, re.MULTILINE):
return 0.1
if re.search('^\s*#ifdef ', text, re.MULTILINE):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(words((
'asm', 'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'namespace', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast',
'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'typeid', 'typename', 'using', 'virtual',
'constexpr', 'nullptr', 'decltype', 'thread_local',
'alignas', 'alignof', 'static_assert', 'noexcept', 'override',
'final'), suffix=r'\b'), Keyword),
(r'char(16_t|32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
if re.search('#include <[a-z]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
|
zasdfgbnm/tensorflow
|
refs/heads/master
|
tensorflow/contrib/eager/python/examples/gan/mnist_graph_test.py
|
35
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.gan import mnist
NOISE_DIM = 100
# Big enough so that summaries are never recorded.
# Lower this value if would like to benchmark with some summaries.
SUMMARY_INTERVAL = 10000
SUMMARY_FLUSH_MS = 100 # Flush summaries every 100ms
def data_format():
return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
class MnistGraphGanBenchmark(tf.test.Benchmark):
def _create_graph(self, batch_size):
# Generate some random data.
images_data = np.random.randn(batch_size, 784).astype(np.float32)
dataset = tf.data.Dataset.from_tensors(images_data)
images = dataset.repeat().make_one_shot_iterator().get_next()
# Create the models and optimizers
generator = mnist.Generator(data_format())
discriminator = mnist.Discriminator(data_format())
with tf.variable_scope('generator'):
generator_optimizer = tf.train.AdamOptimizer(0.001)
with tf.variable_scope('discriminator'):
discriminator_optimizer = tf.train.AdamOptimizer(0.001)
# Run models and compute loss
noise_placeholder = tf.placeholder(tf.float32,
shape=[batch_size, NOISE_DIM])
generated_images = generator(noise_placeholder)
tf.contrib.summary.image('generated_images',
tf.reshape(generated_images, [-1, 28, 28, 1]),
max_images=10)
discriminator_gen_outputs = discriminator(generated_images)
discriminator_real_outputs = discriminator(images)
generator_loss = mnist.generator_loss(discriminator_gen_outputs)
discriminator_loss = mnist.discriminator_loss(discriminator_real_outputs,
discriminator_gen_outputs)
# Get train ops
with tf.variable_scope('generator'):
generator_train = generator_optimizer.minimize(
generator_loss, var_list=generator.variables)
with tf.variable_scope('discriminator'):
discriminator_train = discriminator_optimizer.minimize(
discriminator_loss, var_list=discriminator.variables)
return (generator_train, discriminator_train, noise_placeholder)
def _report(self, test_name, start, num_iters, batch_size):
avg_time = (time.time() - start) / num_iters
dev = 'gpu' if tf.test.is_gpu_available() else 'cpu'
name = 'graph_%s_%s_batch_%d_%s' % (test_name, dev, batch_size,
data_format())
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def benchmark_train(self):
for batch_size in [64, 128, 256]:
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign_add(global_step, 1)
with tf.contrib.summary.create_file_writer(
tempfile.mkdtemp(), flush_millis=SUMMARY_FLUSH_MS).as_default(), (
tf.contrib.summary.record_summaries_every_n_global_steps(
SUMMARY_INTERVAL)):
(generator_train, discriminator_train, noise_placeholder
) = self._create_graph(batch_size)
with tf.Session() as sess:
tf.contrib.summary.initialize(graph=tf.get_default_graph(),
session=sess)
sess.run(tf.global_variables_initializer())
num_burn, num_iters = (3, 100)
for _ in range(num_burn):
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])
# Increment global step before evaluating summary ops to avoid
# race condition.
sess.run(increment_global_step)
sess.run([generator_train, discriminator_train,
tf.contrib.summary.all_summary_ops()],
feed_dict={noise_placeholder: noise})
# Run and benchmark 2 epochs
start = time.time()
for _ in range(num_iters):
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])
sess.run(increment_global_step)
sess.run([generator_train, discriminator_train,
tf.contrib.summary.all_summary_ops()],
feed_dict={noise_placeholder: noise})
self._report('train', start, num_iters, batch_size)
def benchmark_generate(self):
for batch_size in [64, 128, 256]:
with tf.Graph().as_default():
# Using random weights. This will generate garbage.
generator = mnist.Generator(data_format())
noise_placeholder = tf.placeholder(tf.float32,
shape=[batch_size, NOISE_DIM])
generated_images = generator(noise_placeholder)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])
num_burn, num_iters = (30, 1000)
for _ in range(num_burn):
sess.run(generated_images, feed_dict={noise_placeholder: noise})
start = time.time()
for _ in range(num_iters):
# Comparison with the eager execution benchmark in mnist_test.py
# isn't entirely fair as the time here includes the cost of copying
# the feeds from CPU memory to GPU.
sess.run(generated_images, feed_dict={noise_placeholder: noise})
self._report('generate', start, num_iters, batch_size)
if __name__ == '__main__':
tf.test.main()
|
shdowofdeath/rally
|
refs/heads/master
|
tests/unit/plugins/openstack/scenarios/nova/test_security_group.py
|
5
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.nova import security_group
from tests.unit import fakes
from tests.unit import test
SECGROUP = "rally.plugins.openstack.scenarios.nova.security_group"
class FakeNeutronScenario():
def __enter__(self):
return {}
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class NovaSecurityGroupTestCase(test.TestCase):
def test_create_and_delete_security_groups(self):
fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1"),
fakes.FakeSecurityGroup(None, None, 2, "uuid2")]
nova_scenario = security_group.NovaSecGroup()
nova_scenario._create_security_groups = mock.MagicMock(
return_value=fake_secgroups)
nova_scenario._create_rules_for_security_group = mock.MagicMock()
nova_scenario._delete_security_groups = mock.MagicMock()
security_group_count = 2
rules_per_security_group = 10
nova_scenario.create_and_delete_secgroups(
security_group_count, rules_per_security_group)
nova_scenario._create_security_groups.assert_called_once_with(
security_group_count)
nova_scenario._create_rules_for_security_group.assert_called_once_with(
fake_secgroups, rules_per_security_group)
nova_scenario._delete_security_groups.assert_called_once_with(
fake_secgroups)
def test_create_and_list_secgroups(self):
fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1"),
fakes.FakeSecurityGroup(None, None, 2, "uuid2")]
nova_scenario = security_group.NovaSecGroup()
nova_scenario._create_security_groups = mock.MagicMock(
return_value=fake_secgroups)
nova_scenario._create_rules_for_security_group = mock.MagicMock()
nova_scenario._list_security_groups = mock.MagicMock()
security_group_count = 2
rules_per_security_group = 10
nova_scenario.create_and_list_secgroups(
security_group_count, rules_per_security_group)
nova_scenario._create_security_groups.assert_called_once_with(
security_group_count)
nova_scenario._create_rules_for_security_group.assert_called_once_with(
fake_secgroups, rules_per_security_group)
nova_scenario._list_security_groups.assert_called_once_with()
def _generate_fake_server_with_sg(self, number_of_secgroups):
sg_list = []
for i in range(number_of_secgroups):
sg_list.append(
fakes.FakeSecurityGroup(None, None, i, "uuid%s" % i))
return mock.MagicMock(
list_security_group=mock.MagicMock(return_value=sg_list)), sg_list
def _test_boot_and_delete_server_with_secgroups(self):
fake_server, sg_list = self._generate_fake_server_with_sg(2)
nova_scenario = security_group.NovaSecGroup()
nova_scenario._create_security_groups = mock.MagicMock(
return_value=sg_list)
nova_scenario._create_rules_for_security_group = mock.MagicMock()
nova_scenario._boot_server = mock.MagicMock(return_value=fake_server)
nova_scenario._generate_random_name = mock.MagicMock(
return_value="name")
nova_scenario._delete_server = mock.MagicMock()
nova_scenario._delete_security_groups = mock.MagicMock()
image = "img"
flavor = 1
security_group_count = 2
rules_per_security_group = 10
nova_scenario.boot_and_delete_server_with_secgroups(
image, flavor, security_group_count, rules_per_security_group,
fakearg="fakearg")
nova_scenario._create_security_groups.assert_called_once_with(
security_group_count)
self.assertEqual(1, nova_scenario._generate_random_name.call_count)
nova_scenario._create_rules_for_security_group.assert_called_once_with(
sg_list, rules_per_security_group)
nova_scenario._boot_server.assert_called_once_with(
"name", image, flavor,
security_groups=[sg.name for sg in sg_list], fakearg="fakearg")
fake_server.list_security_group.assert_called_once_with()
nova_scenario._delete_server.assert_called_once_with(fake_server)
nova_scenario._delete_security_groups.assert_called_once_with(sg_list)
def _test_boot_and_delete_server_with_sg_not_attached(self):
fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1"),
fakes.FakeSecurityGroup(None, None, 2, "uuid2")]
fake_server, sg_list = self._generate_fake_server_with_sg(1)
nova_scenario = security_group.NovaSecGroup()
nova_scenario._create_security_groups = mock.MagicMock(
return_value=fake_secgroups)
nova_scenario._create_rules_for_security_group = mock.MagicMock()
nova_scenario._boot_server = mock.MagicMock(return_value=fake_server)
nova_scenario._generate_random_name = mock.MagicMock(
return_value="name")
nova_scenario._delete_server = mock.MagicMock()
nova_scenario._delete_security_groups = mock.MagicMock()
image = "img"
flavor = 1
security_group_count = 2
rules_per_security_group = 10
self.assertRaises(security_group.NovaSecurityGroupException,
nova_scenario.boot_and_delete_server_with_secgroups,
image, flavor, security_group_count,
rules_per_security_group)
nova_scenario._create_security_groups.assert_called_once_with(
security_group_count)
self.assertEqual(1, nova_scenario._generate_random_name.call_count)
nova_scenario._create_rules_for_security_group.assert_called_once_with(
fake_secgroups, rules_per_security_group)
nova_scenario._boot_server.assert_called_once_with(
"name", image, flavor,
security_groups=[sg.name for sg in fake_secgroups])
fake_server.list_security_group.assert_called_once_with()
nova_scenario._delete_server.assert_called_once_with(fake_server)
nova_scenario._delete_security_groups.assert_called_once_with(
fake_secgroups)
|
raspberrywhite/raspberrywhite
|
refs/heads/master
|
server/views.py
|
1
|
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout as logout_user
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import render
from django_sse.redisqueue import RedisQueueView
from django_sse.redisqueue import send_event as redis_event
from redis.exceptions import ConnectionError
import json
from server import models
def send_event(event_name, data, channel):
try:
redis_event(event_name, data, channel=channel)
except ConnectionError:
pass
def logout(request):
logout_user(request)
return HttpResponseRedirect("/accounts/login/")
def login(request):
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
next_page = request.POST.get('next', '/playlist')
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
models.Player.objects.get_or_create(user=user)
auth.login(request, user)
return HttpResponseRedirect(next_page)
else:
return HttpResponseRedirect("/account/invalid/")
else:
next_page = request.GET.get('next', '/playlist')
return render(request, 'server/login.html', {'next':next_page})
def register(request):
if request.method == 'POST':
firstname = request.POST.get('firstname', '')
lastname = request.POST.get('lastname', '')
username = request.POST.get('username', '')
password = request.POST.get('password', '')
try:
User.objects.get(username=username)
except:
user = User.objects.create_user(username=username,
password=password, first_name=firstname,
last_name=lastname)
user.save()
models.Player.objects.get_or_create(user=user)
return render(request, 'server/login.html')
return render(request, 'server/register.html')
@login_required
def search_songs(request):
if request.method == 'GET':
page = request.GET.get('page')
q = request.GET.get('term', '')
songs = models.Song.songs.query(q)
paginator = Paginator(songs, 2)
try:
songs = paginator.page(page)
except PageNotAnInteger:
songs = paginator.page(1)
except EmptyPage:
songs = paginator.page(paginator.num_pages)
paginated_results = {'total_pages' : paginator.num_pages}
results = [song.as_json() for song in songs]
paginated_results['results'] = results
data = json.dumps(paginated_results)
return HttpResponse(data, 'application/json')
@login_required
def get_current_playlist(request):
if request.method == 'GET':
requests = models.Request.requests.all()
results = [request.as_json() for request in requests]
data = json.dumps(results)
return HttpResponse(data, 'application/json')
@login_required
def playlist(request):
return render(request, 'server/playlist.html')
@login_required
def songrequest(request):
if request.method == 'POST':
id_song = request.POST.get('id_song', 123)
song = models.Song.songs.get(pk=id_song)
if not song.can_play():
return HttpResponse(json.dumps({'status':'Song recently played'}),
'application/json', status=405)
song.play()
user = models.Player.objects.get_or_create(user=request.user)[0]
models.Request.requests.create(user=user, song=song)
try:
send_event('newsong', "ok", channel="foo")
except:
pass
return HttpResponse(json.dumps({'status':'{0} added!'.format(song.title)}),
'application/json', status=201)
elif request.method == 'GET':
return render(request, 'server/request.html')
def get_next_song(request):
if request.method == 'GET':
try:
next_request = models.Request.requests.next()
path = next_request.song.path
send_event('newsong', "ok", channel="foo")
return HttpResponse(json.dumps({'path':path}), 'application/json')
except ObjectDoesNotExist:
send_event('newsong', "ok", channel="foo")
return HttpResponse(json.dumps({'status':'Request not found'}),
'application/json', status=404)
class SSE(RedisQueueView):
def get_redis_channel(self):
ch = self.redis_channel
return ch
|
strk/QGIS
|
refs/heads/master
|
python/core/auto_additions/qgsnetworkcontentfetcherregistry.py
|
37
|
# The following has been generated automatically from src/core/qgsnetworkcontentfetcherregistry.h
QgsNetworkContentFetcherRegistry.FetchingMode.baseClass = QgsNetworkContentFetcherRegistry
|
yonglehou/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_lena_compress.py
|
271
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
GetSomeBlocks/Score_Soccer
|
refs/heads/master
|
resources/lib/twisted/twisted/spread/flavors.py
|
56
|
# -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module represents flavors of remotely acessible objects.
Currently this is only objects accessible through Perspective Broker, but will
hopefully encompass all forms of remote access which can emulate subsets of PB
(such as XMLRPC or SOAP).
Future Plans: Optimization. Exploitation of new-style object model.
Optimizations to this module should not affect external-use semantics at all,
but may have a small impact on users who subclass and override methods.
@author: Glyph Lefkowitz
"""
# NOTE: this module should NOT import pb; it is supposed to be a module which
# abstractly defines remotely accessible types. Many of these types expect to
# be serialized by Jelly, but they ought to be accessible through other
# mechanisms (like XMLRPC)
# system imports
import sys
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, reflect
# sibling imports
from jelly import setUnjellyableForClass, setUnjellyableForClassTree, setUnjellyableFactoryForClass, unjellyableRegistry
from jelly import Jellyable, Unjellyable, _Dummy, _DummyNewStyle
from jelly import setInstanceState, getInstanceState
# compatibility
setCopierForClass = setUnjellyableForClass
setCopierForClassTree = setUnjellyableForClassTree
setFactoryForClass = setUnjellyableFactoryForClass
copyTags = unjellyableRegistry
copy_atom = "copy"
cache_atom = "cache"
cached_atom = "cached"
remote_atom = "remote"
class NoSuchMethod(AttributeError):
"""Raised if there is no such remote method"""
class IPBRoot(Interface):
"""Factory for root Referenceable objects for PB servers."""
def rootObject(broker):
"""Return root Referenceable for broker."""
class Serializable(Jellyable):
"""An object that can be passed remotely.
I am a style of object which can be serialized by Perspective
Broker. Objects which wish to be referenceable or copied remotely
have to subclass Serializable. However, clients of Perspective
Broker will probably not want to directly subclass Serializable; the
Flavors of transferable objects are listed below.
What it means to be \"Serializable\" is that an object can be
passed to or returned from a remote method. Certain basic types
(dictionaries, lists, tuples, numbers, strings) are serializable by
default; however, classes need to choose a specific serialization
style: L{Referenceable}, L{Viewable}, L{Copyable} or L{Cacheable}.
You may also pass C{[lists, dictionaries, tuples]} of L{Serializable}
instances to or return them from remote methods, as many levels deep
as you like.
"""
def processUniqueID(self):
"""Return an ID which uniquely represents this object for this process.
By default, this uses the 'id' builtin, but can be overridden to
indicate that two values are identity-equivalent (such as proxies
for the same object).
"""
return id(self)
class Referenceable(Serializable):
perspective = None
"""I am an object sent remotely as a direct reference.
When one of my subclasses is sent as an argument to or returned
from a remote method call, I will be serialized by default as a
direct reference.
This means that the peer will be able to call methods on me;
a method call xxx() from my peer will be resolved to methods
of the name remote_xxx.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'remote_messagename' and call it with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "remote_%s" % message, None)
if method is None:
raise NoSuchMethod("No such method: remote_%s" % (message,))
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self.perspective)
def jellyFor(self, jellier):
"""(internal)
Return a tuple which will be used as the s-expression to
serialize this to a peer.
"""
return "remote", jellier.invoker.registerReference(self)
class Root(Referenceable):
"""I provide a root object to L{pb.Broker}s for a L{pb.BrokerFactory}.
When a L{pb.BrokerFactory} produces a L{pb.Broker}, it supplies that
L{pb.Broker} with an object named \"root\". That object is obtained
by calling my rootObject method.
"""
implements(IPBRoot)
def rootObject(self, broker):
"""A L{pb.BrokerFactory} is requesting to publish me as a root object.
When a L{pb.BrokerFactory} is sending me as the root object, this
method will be invoked to allow per-broker versions of an
object. By default I return myself.
"""
return self
class ViewPoint(Referenceable):
"""
I act as an indirect reference to an object accessed through a
L{pb.Perspective}.
Simply put, I combine an object with a perspective so that when a
peer calls methods on the object I refer to, the method will be
invoked with that perspective as a first argument, so that it can
know who is calling it.
While L{Viewable} objects will be converted to ViewPoints by default
when they are returned from or sent as arguments to a remote
method, any object may be manually proxied as well. (XXX: Now that
this class is no longer named C{Proxy}, this is the only occourance
of the term 'proxied' in this docstring, and may be unclear.)
This can be useful when dealing with L{pb.Perspective}s, L{Copyable}s,
and L{Cacheable}s. It is legal to implement a method as such on
a perspective::
| def perspective_getViewPointForOther(self, name):
| defr = self.service.getPerspectiveRequest(name)
| defr.addCallbacks(lambda x, self=self: ViewPoint(self, x), log.msg)
| return defr
This will allow you to have references to Perspective objects in two
different ways. One is through the initial 'attach' call -- each
peer will have a L{pb.RemoteReference} to their perspective directly. The
other is through this method; each peer can get a L{pb.RemoteReference} to
all other perspectives in the service; but that L{pb.RemoteReference} will
be to a L{ViewPoint}, not directly to the object.
The practical offshoot of this is that you can implement 2 varieties
of remotely callable methods on this Perspective; view_xxx and
C{perspective_xxx}. C{view_xxx} methods will follow the rules for
ViewPoint methods (see ViewPoint.L{remoteMessageReceived}), and
C{perspective_xxx} methods will follow the rules for Perspective
methods.
"""
def __init__(self, perspective, object):
"""Initialize me with a Perspective and an Object.
"""
self.perspective = perspective
self.object = object
def processUniqueID(self):
"""Return an ID unique to a proxy for this perspective+object combination.
"""
return (id(self.perspective), id(self.object))
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{view_messagename}' to my Object and call it on my object with
the same arguments, modified by inserting my Perspective as
the first argument.
"""
args = broker.unserialize(args, self.perspective)
kw = broker.unserialize(kw, self.perspective)
method = getattr(self.object, "view_%s" % message)
try:
state = apply(method, (self.perspective,)+args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
rv = broker.serialize(state, self.perspective, method, args, kw)
return rv
class Viewable(Serializable):
"""I will be converted to a L{ViewPoint} when passed to or returned from a remote method.
The beginning of a peer's interaction with a PB Service is always
through a perspective. However, if a C{perspective_xxx} method returns
a Viewable, it will be serialized to the peer as a response to that
method.
"""
def jellyFor(self, jellier):
"""Serialize a L{ViewPoint} for me and the perspective of the given broker.
"""
return ViewPoint(jellier.invoker.serializingPerspective, self).jellyFor(jellier)
class Copyable(Serializable):
"""Subclass me to get copied each time you are returned from or passed to a remote method.
When I am returned from or passed to a remote method call, I will be
converted into data via a set of callbacks (see my methods for more
info). That data will then be serialized using Jelly, and sent to
the peer.
The peer will then look up the type to represent this with; see
L{RemoteCopy} for details.
"""
def getStateToCopy(self):
"""Gather state to send when I am serialized for a peer.
I will default to returning self.__dict__. Override this to
customize this behavior.
"""
return self.__dict__
def getStateToCopyFor(self, perspective):
"""
Gather state to send when I am serialized for a particular
perspective.
I will default to calling L{getStateToCopy}. Override this to
customize this behavior.
"""
return self.getStateToCopy()
def getTypeToCopy(self):
"""Determine what type tag to send for me.
By default, send the string representation of my class
(package.module.Class); normally this is adequate, but
you may override this to change it.
"""
return reflect.qual(self.__class__)
def getTypeToCopyFor(self, perspective):
"""Determine what type tag to send for me.
By default, defer to self.L{getTypeToCopy}() normally this is
adequate, but you may override this to change it.
"""
return self.getTypeToCopy()
def jellyFor(self, jellier):
"""Assemble type tag and state to copy for this broker.
This will call L{getTypeToCopyFor} and L{getStateToCopy}, and
return an appropriate s-expression to represent me.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
p = jellier.invoker.serializingPerspective
t = self.getTypeToCopyFor(p)
state = self.getStateToCopyFor(p)
sxp = jellier.prepare(self)
sxp.extend([t, jellier.jelly(state)])
return jellier.preserve(self, sxp)
class Cacheable(Copyable):
"""A cached instance.
This means that it's copied; but there is some logic to make sure
that it's only copied once. Additionally, when state is retrieved,
it is passed a "proto-reference" to the state as it will exist on
the client.
XXX: The documentation for this class needs work, but it's the most
complex part of PB and it is inherently difficult to explain.
"""
def getStateToCacheAndObserveFor(self, perspective, observer):
"""
Get state to cache on the client and client-cache reference
to observe locally.
This is similiar to getStateToCopyFor, but it additionally
passes in a reference to the client-side RemoteCache instance
that will be created when it is unserialized. This allows
Cacheable instances to keep their RemoteCaches up to date when
they change, such that no changes can occur between the point
at which the state is initially copied and the client receives
it that are not propogated.
"""
return self.getStateToCopyFor(perspective)
def jellyFor(self, jellier):
"""Return an appropriate tuple to serialize me.
Depending on whether this broker has cached me or not, this may
return either a full state or a reference to an existing cache.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
luid = jellier.invoker.cachedRemotelyAs(self, 1)
if luid is None:
luid = jellier.invoker.cacheRemotely(self)
p = jellier.invoker.serializingPerspective
type_ = self.getTypeToCopyFor(p)
observer = RemoteCacheObserver(jellier.invoker, self, p)
state = self.getStateToCacheAndObserveFor(p, observer)
l = jellier.prepare(self)
jstate = jellier.jelly(state)
l.extend([type_, luid, jstate])
return jellier.preserve(self, l)
else:
return cached_atom, luid
def stoppedObserving(self, perspective, observer):
"""This method is called when a client has stopped observing me.
The 'observer' argument is the same as that passed in to
getStateToCacheAndObserveFor.
"""
class RemoteCopy(Unjellyable):
"""I am a remote copy of a Copyable object.
When the state from a L{Copyable} object is received, an instance will
be created based on the copy tags table (see setUnjellyableForClass) and
sent the L{setCopyableState} message. I provide a reasonable default
implementation of that message; subclass me if you wish to serve as
a copier for remote data.
NOTE: copiers are invoked with no arguments. Do not implement a
constructor which requires args in a subclass of L{RemoteCopy}!
"""
def setCopyableState(self, state):
"""I will be invoked with the state to copy locally.
'state' is the data returned from the remote object's
'getStateToCopyFor' method, which will often be the remote
object's dictionary (or a filtered approximation of it depending
on my peer's perspective).
"""
self.__dict__ = state
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.setCopyableState(unjellier.unjelly(jellyList[1]))
return self
class RemoteCache(RemoteCopy, Serializable):
"""A cache is a local representation of a remote L{Cacheable} object.
This represents the last known state of this object. It may
also have methods invoked on it -- in order to update caches,
the cached class generates a L{pb.RemoteReference} to this object as
it is originally sent.
Much like copy, I will be invoked with no arguments. Do not
implement a constructor that requires arguments in one of my
subclasses.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{observe_messagename}' and call it on my with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "observe_%s" % message)
try:
state = apply(method, args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, None, method, args, kw)
def jellyFor(self, jellier):
"""serialize me (only for the broker I'm for) as the original cached reference
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
assert jellier.invoker is self.broker, "You cannot exchange cached proxies between brokers."
return 'lcache', self.luid
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.broker = unjellier.invoker
self.luid = jellyList[1]
if isinstance(self.__class__, type): #new-style class
cProxy = _DummyNewStyle()
else:
cProxy = _Dummy()
cProxy.__class__ = self.__class__
cProxy.__dict__ = self.__dict__
# XXX questionable whether this was a good design idea...
init = getattr(cProxy, "__init__", None)
if init:
init()
unjellier.invoker.cacheLocally(jellyList[1], self)
cProxy.setCopyableState(unjellier.unjelly(jellyList[2]))
# Might have changed due to setCopyableState method; we'll assume that
# it's bad form to do so afterwards.
self.__dict__ = cProxy.__dict__
# chomp, chomp -- some existing code uses "self.__dict__ =", some uses
# "__dict__.update". This is here in order to handle both cases.
self.broker = unjellier.invoker
self.luid = jellyList[1]
return cProxy
## def __really_del__(self):
## """Final finalization call, made after all remote references have been lost.
## """
def __cmp__(self, other):
"""Compare me [to another RemoteCache.
"""
if isinstance(other, self.__class__):
return cmp(id(self.__dict__), id(other.__dict__))
else:
return cmp(id(self.__dict__), other)
def __hash__(self):
"""Hash me.
"""
return int(id(self.__dict__) % sys.maxint)
broker = None
luid = None
def __del__(self):
"""Do distributed reference counting on finalize.
"""
try:
# log.msg( ' --- decache: %s %s' % (self, self.luid) )
if self.broker:
self.broker.decCacheRef(self.luid)
except:
log.deferr()
def unjellyCached(unjellier, unjellyList):
luid = unjellyList[1]
cNotProxy = unjellier.invoker.cachedLocallyAs(luid)
cProxy = _Dummy()
cProxy.__class__ = cNotProxy.__class__
cProxy.__dict__ = cNotProxy.__dict__
return cProxy
setUnjellyableForClass("cached", unjellyCached)
def unjellyLCache(unjellier, unjellyList):
luid = unjellyList[1]
obj = unjellier.invoker.remotelyCachedForLUID(luid)
return obj
setUnjellyableForClass("lcache", unjellyLCache)
def unjellyLocal(unjellier, unjellyList):
obj = unjellier.invoker.localObjectForID(unjellyList[1])
return obj
setUnjellyableForClass("local", unjellyLocal)
class RemoteCacheMethod:
"""A method on a reference to a L{RemoteCache}.
"""
def __init__(self, name, broker, cached, perspective):
"""(internal) initialize.
"""
self.name = name
self.broker = broker
self.perspective = perspective
self.cached = cached
def __cmp__(self, other):
return cmp((self.name, self.broker, self.perspective, self.cached), other)
def __hash__(self):
return hash((self.name, self.broker, self.perspective, self.cached))
def __call__(self, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID, self.name, args, kw)
class RemoteCacheObserver:
"""I am a reverse-reference to the peer's L{RemoteCache}.
I am generated automatically when a cache is serialized. I
represent a reference to the client's L{RemoteCache} object that
will represent a particular L{Cacheable}; I am the additional
object passed to getStateToCacheAndObserveFor.
"""
def __init__(self, broker, cached, perspective):
"""(internal) Initialize me.
@param broker: a L{pb.Broker} instance.
@param cached: a L{Cacheable} instance that this L{RemoteCacheObserver}
corresponds to.
@param perspective: a reference to the perspective who is observing this.
"""
self.broker = broker
self.cached = cached
self.perspective = perspective
def __repr__(self):
return "<RemoteCacheObserver(%s, %s, %s) at %s>" % (
self.broker, self.cached, self.perspective, id(self))
def __hash__(self):
"""Generate a hash unique to all L{RemoteCacheObserver}s for this broker/perspective/cached triplet
"""
return ( (hash(self.broker) % 2**10)
+ (hash(self.perspective) % 2**10)
+ (hash(self.cached) % 2**10))
def __cmp__(self, other):
"""Compare me to another L{RemoteCacheObserver}.
"""
return cmp((self.broker, self.perspective, self.cached), other)
def callRemote(self, _name, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the "
"object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{pb.RemoteMethod} for this key.
"""
return RemoteCacheMethod(key, self.broker, self.cached, self.perspective)
|
kevinmost/LiteSync
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py
|
164
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter,
cached_property, get_cache_base, read_exports)
logger = logging.getLogger(__name__)
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
ABI = 'none'
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
return '%s-%s%s-%s-%s-%s.whl' % (self.name, self.version, buildver,
pyver, abi, arch)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
metadata_filename = posixpath.join(info_dir, METADATA_FILENAME)
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % METADATA_FILENAME)
return result
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
message = message_from_file(wf)
result = dict(message)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
data = b'#!python' + data[m.end():]
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = b'#!python' + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
records = []
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
# Use native string to avoid issues on 2.x: see Python #20140.
result = os.path.join(get_cache_base(), str('dylib-cache'), sys.version[:3])
if not os.path.isdir(result):
os.makedirs(result)
return result
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache_base = self._get_dylib_cache()
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not is_compatible(self):
msg = 'Wheel %s not mountable in this Python.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
# Most specific - our Python version, ABI and arch
for abi in abis:
result.append((''.join((IMP_PREFIX, versions[0])), abi, ARCH))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return result
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
|
shssoichiro/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_handshake_hybi00.py
|
466
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake.hybi00 module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake.hybi00 import Handshaker
from mod_pywebsocket.handshake.hybi00 import _validate_subprotocol
from test import mock
_TEST_KEY1 = '4 @1 46546xW%0l 1 5'
_TEST_KEY2 = '12998 5 Y3 1 .P00'
_TEST_KEY3 = '^n:ds[4U'
_TEST_CHALLENGE_RESPONSE = '8jKS\'y:G*Co,Wxa-'
_GOOD_REQUEST = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'UPGRADE',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WEBSOCKET',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES = (
80,
'GET',
'/demo',
{
'hOsT': 'example.com',
'cOnNeCtIoN': 'Upgrade',
'sEc-wEbsOcKeT-kEy2': _TEST_KEY2,
'sEc-wEbsOcKeT-pRoToCoL': 'sample',
'uPgRaDe': 'WebSocket',
'sEc-wEbsOcKeT-kEy1': _TEST_KEY1,
'oRiGiN': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_DEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_RESPONSE_SECURE = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_NONDEFAULT_PORT = (
8081,
'GET',
'/demo',
{
'Host': 'example.com:8081',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_NONDEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_RESPONSE_SECURE_NONDEF = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_NO_PROTOCOL = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_NO_PROTOCOL = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_WITH_OPTIONAL_HEADERS = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'EmptyValue': '',
'Sec-WebSocket-Protocol': 'sample',
'AKey': 'AValue',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
# TODO(tyoshino): Include \r \n in key3, challenge response.
_GOOD_REQUEST_WITH_NONPRINTABLE_KEY = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': 'y R2 48 Q1O4 e|BV3 i5 1 u- 65',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '36 7 74 i 92 2\'m 9 0G',
'Origin': 'http://example.com',
},
''.join(map(chr, [0x01, 0xd1, 0xdd, 0x3b, 0xd1, 0x56, 0x63, 0xff])))
_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
''.join(map(chr, [0x0b, 0x99, 0xfa, 0x55, 0xbd, 0x01, 0x23, 0x7b,
0x45, 0xa2, 0xf1, 0xd0, 0x87, 0x8a, 0xee, 0xeb])))
_GOOD_REQUEST_WITH_QUERY_PART = (
80,
'GET',
'/demo?e=mc2',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_WITH_QUERY_PART = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo?e=mc2\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_BAD_REQUESTS = (
( # HTTP request
80,
'GET',
'/demo',
{
'Host': 'www.google.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
' GTB6 GTBA',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,'
'*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip,deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '300',
'Connection': 'keep-alive',
}),
( # Wrong method
80,
'POST',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Missing Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Wrong Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'NonWebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Empty WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': '',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Wrong port number format
80,
'GET',
'/demo',
{
'Host': 'example.com:0x50',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Header/connection port mismatch
8080,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Illegal WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'illegal\x09protocol',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
)
def _create_request(request_def):
data = ''
if len(request_def) > 4:
data = request_def[4]
conn = mock.MockConn(data)
conn.local_addr = ('0.0.0.0', request_def[0])
return mock.MockRequest(
method=request_def[1],
uri=request_def[2],
headers_in=request_def[3],
connection=conn)
def _create_get_memorized_lines(lines):
"""Creates a function that returns the given string."""
def get_memorized_lines():
return lines
return get_memorized_lines
def _create_requests_with_lines(request_lines_set):
requests = []
for lines in request_lines_set:
request = _create_request(_GOOD_REQUEST)
request.connection.get_memorized_lines = _create_get_memorized_lines(
lines)
requests.append(request)
return requests
class HyBi00HandshakerTest(unittest.TestCase):
def test_good_request_default_port(self):
request = _create_request(_GOOD_REQUEST)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
self.assertEqual('/demo', request.ws_resource)
self.assertEqual('http://example.com', request.ws_origin)
self.assertEqual('ws://example.com/demo', request.ws_location)
self.assertEqual('sample', request.ws_protocol)
def test_good_request_capitalized_header_values(self):
request = _create_request(_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
def test_good_request_case_mixed_header_names(self):
request = _create_request(_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
def test_good_request_secure_default_port(self):
request = _create_request(_GOOD_REQUEST)
request.connection.local_addr = ('0.0.0.0', 443)
request.is_https_ = True
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_nondefault_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
handshaker = Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NONDEFAULT_PORT,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_secure_non_default_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
request.is_https_ = True
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE_NONDEF,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_default_no_protocol(self):
request = _create_request(_GOOD_REQUEST_NO_PROTOCOL)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NO_PROTOCOL,
request.connection.written_data())
self.assertEqual(None, request.ws_protocol)
def test_good_request_optional_headers(self):
request = _create_request(_GOOD_REQUEST_WITH_OPTIONAL_HEADERS)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual('AValue',
request.headers_in['AKey'])
self.assertEqual('',
request.headers_in['EmptyValue'])
def test_good_request_with_nonprintable_key(self):
request = _create_request(_GOOD_REQUEST_WITH_NONPRINTABLE_KEY)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_with_query_part(self):
request = _create_request(_GOOD_REQUEST_WITH_QUERY_PART)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_WITH_QUERY_PART,
request.connection.written_data())
self.assertEqual('ws://example.com/demo?e=mc2', request.ws_location)
def test_bad_requests(self):
for request in map(_create_request, _BAD_REQUESTS):
handshaker = Handshaker(request, mock.MockDispatcher())
self.assertRaises(HandshakeException, handshaker.do_handshake)
class HyBi00ValidateSubprotocolTest(unittest.TestCase):
def test_validate_subprotocol(self):
# should succeed.
_validate_subprotocol('sample')
_validate_subprotocol('Sample')
_validate_subprotocol('sample\x7eprotocol')
_validate_subprotocol('sample\x20protocol')
# should fail.
self.assertRaises(HandshakeException,
_validate_subprotocol,
'')
self.assertRaises(HandshakeException,
_validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
_validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
_validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
jgraham/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/user_input/clear_test.py
|
58
|
# -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from webdriver import exceptions
class ElementClearTest(base_test.WebDriverBaseTest):
def test_writable_text_input_element_should_clear(self):
self.driver.get(self.webserver.where_is("user_input/res/element_clear_writable_input_page.html"))
e = self.driver.find_element_by_css("#writableTextInput")
e.clear()
self.assertEquals("", e.get_attribute("value"))
def test_disabled_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is("user_input/res/element_clear_disabled_input_page.html"))
e = self.driver.find_element_by_css("#disabledTextInput")
self.assertRaises(exceptions.InvalidElementStateException, lambda: e.clear())
def test_read_only_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is("user_input/res/element_clear_readonly_input_page.html"))
e = self.driver.find_element_by_css("#readOnlyTextInput")
self.assertRaises(exceptions.InvalidElementStateException, lambda: e.clear())
def test_writable_text_area_element_should_clear(self):
self.driver.get(self.webserver.where_is("user_input/res/element_clear_writable_textarea_page.html"))
e = self.driver.find_element_by_css("#writableTextArea")
e.clear()
self.assertEquals("", e.get_attribute("value"))
def test_disabled_text_area_element_should_not_clear(self):
self.driver.get(self.webserver.where_is("user_input/res/element_clear_disabled_textarea_page.html"))
e = self.driver.find_element_by_css("#disabledTextArea")
self.assertRaises(exceptions.InvalidElementStateException, lambda: e.clear())
def test_read_only_text_input_element_should_not_clear(self):
self.driver.get(self.webserver.where_is("user_input/res/element_clear_readonly_textarea_page.html"))
e = self.driver.find_element_by_css("#readOnlyTextArea")
self.assertRaises(exceptions.InvalidElementStateException, lambda: e.clear())
def test_content_editable_area_should_clear(self):
self.driver.get(self.webserver.where_is("user_input/res/element_clear_contenteditable_page.html"))
e = self.driver.find_element_by_css("#contentEditableElement")
e.clear()
self.assertEquals("", e.text)
if __name__ == "__main__":
unittest.main()
|
pilou-/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/frr/frr_facts.py
|
34
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: frr_facts
version_added: "2.8"
author: "Nilashish Chakraborty (@NilashishC)"
short_description: Collect facts from remote devices running Free Range Routing (FRR).
description:
- Collects a base set of device facts from a remote device that
is running FRR. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
notes:
- Tested against FRR 6.0.
options:
gather_subset:
description:
- When supplied, this argument restricts the facts collected
to a given subset.
- Possible values for this argument include
C(all), C(hardware), C(config), and C(interfaces).
- Specify a list of values to include a larger subset.
- Use a value with an initial C(!) to collect all facts except that subset.
required: false
default: '!config'
"""
EXAMPLES = """
- name: Collect all facts from the device
frr_facts:
gather_subset: all
- name: Collect only the config and default facts
frr_facts:
gather_subset:
- config
- name: Collect the config and hardware facts
frr_facts:
gather_subset:
- config
- hardware
- name: Do not collect hardware facts
frr_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_version:
description: The FRR version running on the remote device
returned: always
type: str
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version that the Ansible controller is using
returned: always
type: str
# hardware
ansible_net_mem_stats:
description: The memory statistics fetched from the device
returned: when hardware is configured
type: dict
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_mpls_ldp_neighbors:
description: The list of MPLS LDP neighbors from the remote device
returned: when interfaces is configured and LDP daemon is running on the device
type: dict
"""
import platform
import re
from ansible.module_utils.network.frr.frr import run_commands, get_capabilities
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
self._capabilities = get_capabilities(self.module)
def populate(self):
self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(commands=cmd, check_rc=False)
def parse_facts(self, pattern, data):
value = None
match = re.search(pattern, data, re.M)
if match:
value = match.group(1)
return value
class Default(FactsBase):
COMMANDS = ['show version']
def populate(self):
super(Default, self).populate()
self.facts.update(self.platform_facts())
def platform_facts(self):
platform_facts = {}
resp = self._capabilities
device_info = resp['device_info']
platform_facts['system'] = device_info['network_os']
for item in ('version', 'hostname'):
val = device_info.get('network_os_%s' % item)
if val:
platform_facts[item] = val
platform_facts['api'] = resp['network_api']
platform_facts['python_version'] = platform.python_version()
return platform_facts
class Hardware(FactsBase):
COMMANDS = ['show memory']
def _parse_daemons(self, data):
match = re.search(r'Memory statistics for (\w+)', data, re.M)
if match:
return match.group(1)
def gather_memory_facts(self, data):
mem_details = data.split('\n\n')
mem_stats = {}
mem_counters = {
'total_heap_allocated': r'Total heap allocated:(?:\s*)(.*)',
'holding_block_headers': r'Holding block headers:(?:\s*)(.*)',
'used_small_blocks': r'Used small blocks:(?:\s*)(.*)',
'used_ordinary_blocks': r'Used ordinary blocks:(?:\s*)(.*)',
'free_small_blocks': r'Free small blocks:(?:\s*)(.*)',
'free_ordinary_blocks': r'Free ordinary blocks:(?:\s*)(.*)',
'ordinary_blocks': r'Ordinary blocks:(?:\s*)(.*)',
'small_blocks': r'Small blocks:(?:\s*)(.*)',
'holding_blocks': r'Holding blocks:(?:\s*)(.*)'
}
for item in mem_details:
daemon = self._parse_daemons(item)
mem_stats[daemon] = {}
for fact, pattern in iteritems(mem_counters):
mem_stats[daemon][fact] = self.parse_facts(pattern, item)
return mem_stats
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['mem_stats'] = self.gather_memory_facts(data)
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
data = re.sub(r'^Building configuration...\s+Current configuration:', '', data, flags=re.MULTILINE)
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = ['show interface']
def populate(self):
ldp_supported = self._capabilities['supported_protocols']['ldp']
if ldp_supported:
self.COMMANDS.append('show mpls ldp discovery')
super(Interfaces, self).populate()
data = self.responses[0]
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
self.populate_ipv4_interfaces(interfaces)
self.populate_ipv6_interfaces(interfaces)
if ldp_supported:
data = self.responses[1]
if data:
self.facts['mpls_ldp_neighbors'] = self.populate_mpls_ldp_neighbors(data)
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^Interface (\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def populate_interfaces(self, interfaces):
facts = dict()
counters = {
'description': r'Description: (.+)',
'macaddress': r'HWaddr: (\S+)',
'type': r'Type: (\S+)',
'vrf': r'vrf: (\S+)',
'mtu': r'mtu (\d+)',
'bandwidth': r'bandwidth (\d+)',
'lineprotocol': r'line protocol is (\S+)',
'operstatus': r'^(?:.+) is (.+),'
}
for key, value in iteritems(interfaces):
intf = dict()
for fact, pattern in iteritems(counters):
intf[fact] = self.parse_facts(pattern, value)
facts[key] = intf
return facts
def populate_ipv4_interfaces(self, data):
for key, value in data.items():
self.facts['interfaces'][key]['ipv4'] = list()
primary_address = addresses = []
primary_address = re.findall(r'inet (\S+) broadcast (?:\S+)(?:\s{2,})', value, re.M)
addresses = re.findall(r'inet (\S+) broadcast (?:\S+)(?:\s+)secondary', value, re.M)
if len(primary_address) == 0:
continue
addresses.append(primary_address[0])
for address in addresses:
addr, subnet = address.split("/")
ipv4 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv4')
self.facts['interfaces'][key]['ipv4'].append(ipv4)
def populate_ipv6_interfaces(self, data):
for key, value in data.items():
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'inet6 (\S+)', value, re.M)
for address in addresses:
addr, subnet = address.split("/")
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def populate_mpls_ldp_neighbors(self, data):
facts = {}
entries = data.splitlines()
for x in entries:
if x.startswith('AF'):
continue
x = x.split()
if len(x) > 0:
ldp = {}
ldp['neighbor'] = x[1]
ldp['source'] = x[3]
facts[ldp['source']] = []
facts[ldp['source']].append(ldp)
return facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
config=Config,
interfaces=Interfaces
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' % (', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
|
mahantheshhv/ibis
|
refs/heads/master
|
ibis/expr/tests/__init__.py
|
12133432
| |
dweinstein/mitmproxy
|
refs/heads/master
|
test/scripts/loaderr.py
|
44
|
a = x
|
dbrattli/python-gearshift
|
refs/heads/master
|
gearshift/command/base.py
|
1
|
"""Commands for the TurboGears command line tool."""
import glob
import optparse
import os
import sys
import pkg_resources
import configobj
import gearshift
from gearshift.util import get_model, load_project_config, \
get_project_config, get_package_name
from gearshift.identity import SecureObject, from_any_host
from gearshift import config, database
from sacommand import sacommand
sys.path.insert(0, os.getcwd())
no_connection_param = ["help", "list"]
no_model_param = ["help"]
def silent_os_remove(fname):
"""Try to remove file 'fname' but mute any error that may happen.
Returns True if file was actually removed and False otherwise.
"""
try:
os.remove(fname)
return True
except os.error:
pass
return False
class CommandWithDB(object):
"""Base class for commands that need to use the database"""
config = None
def __init__(self, version):
pass
def find_config(self):
"""Chooses the config file, trying to guess whether this is a
development or installed project."""
load_project_config(self.config)
self.dburi = config.get("sqlobject.dburi", None)
if self.dburi and self.dburi.startswith("notrans_"):
self.dburi = self.dburi[8:]
class SQL(CommandWithDB):
"""Wrapper command for sqlobject-admin, and some sqlalchemy support.
This automatically supplies sqlobject-admin with the database that
is found in the config file.
Will also supply the model module as appropriate.
"""
desc = "Run the database provider manager"
need_project = True
def __init__(self, version):
if len(sys.argv) == 1 or sys.argv[1][0] == "-":
parser = optparse.OptionParser(
usage="%prog sql [command]\n\n" \
"hint: '%prog sql help' will list the sqlobject " \
"commands",
version="%prog " + version)
parser.add_option("-c", "--config", help="config file",
dest="config")
options, args = parser.parse_args(sys.argv[1:3])
if not options.config:
parser.error("Please provide a valid option or command.")
self.config = options.config
# get rid of our config option
if args:
del sys.argv[1]
else:
del sys.argv[1:3]
self.find_config()
def run(self):
"""Run the sqlobject-admin tool or functions from the sacommand module."""
if not "--egg" in sys.argv and not gearshift.util.get_project_name():
print "This doesn't look like a GearShift project."
return
else:
command = sys.argv[1]
if config.get("sqlalchemy.dburi"):
try:
sacommand(command, sys.argv)
except Exception: # NoApplicableMethods:
# Anonymous except to avoid making entire
# gearshift dependent on peak.rules just to get
# this ONE case of NoApplicableMethods...
sacommand("help", [])
return
try:
from sqlobject.manager import command
except ImportError:
from gearshift.util import missing_dependency_error
print missing_dependency_error('SQLObject')
return
sqlobjcommand = command
if sqlobjcommand not in no_connection_param:
if self.dburi:
print "Using database URI %s" % self.dburi
sys.argv.insert(2, self.dburi)
sys.argv.insert(2, "-c")
else:
print ("Database URI not specified in the config file"
" (%s).\nPlease be sure it's on the command line."
% (self.config or get_project_config()))
if sqlobjcommand not in no_model_param:
if not "--egg" in sys.argv:
eggname = glob.glob("*.egg-info")
if not eggname or not os.path.exists(
os.path.join(eggname[0], "sqlobject.txt")):
eggname = self.fix_egginfo(eggname)
eggname = eggname[0].replace(".egg-info", "")
if not "." in sys.path:
sys.path.append(".")
pkg_resources.working_set.add_entry(".")
sys.argv.insert(2, eggname)
sys.argv.insert(2, "--egg")
command.the_runner.run(sys.argv)
def fix_egginfo(self, eggname):
"""Add egg-info directory if necessary."""
print """
This project seems incomplete. In order to use the sqlobject commands
without manually specifying a model, there needs to be an
egg-info directory with an appropriate sqlobject.txt file.
I can fix this automatically. Would you like me to?
"""
dofix = raw_input("Enter [y] or n: ")
if not dofix or dofix.lower()[0] == 'y':
oldargs = sys.argv
sys.argv = ["setup.py", "egg_info"]
import imp
imp.load_module("setup", *imp.find_module("setup", ["."]))
sys.argv = oldargs
import setuptools
package = setuptools.find_packages()[0]
eggname = glob.glob("*.egg-info")
sqlobjectmeta = open(os.path.join(eggname[0], "sqlobject.txt"), "w")
sqlobjectmeta.write("""db_module=%(package)s.model
history_dir=$base/%(package)s/sqlobject-history
""" % dict(package=package))
else:
sys.exit(0)
return eggname
class Shell(CommandWithDB):
"""Convenient version of the Python interactive shell.
This shell attempts to locate your configuration file and model module
so that it can import everything from your model and make it available
in the Python shell namespace.
"""
desc = "Start a Python prompt with your database available"
need_project = True
def run(self):
"""Run the shell"""
self.find_config()
locals = dict(__name__="tg-admin")
try:
mod = get_model()
if mod:
locals.update(mod.__dict__)
except (pkg_resources.DistributionNotFound, ImportError), e:
mod = None
print "Warning: Failed to import your data model: %s" % e
print "You will not have access to your data model objects."
print
if config.get("sqlalchemy.dburi"):
using_sqlalchemy = True
database.bind_metadata()
locals.update(session=database.session,
metadata=database.metadata)
else:
using_sqlalchemy = False
class CustomShellMixin(object):
def commit_changes(self):
if mod:
# XXX Can we check somehow, if there are actually any
# database changes to be commited?
r = raw_input("Do you wish to commit"
" your database changes? [yes]")
if not r.startswith("n"):
if using_sqlalchemy:
self.push("session.flush()")
else:
self.push("hub.commit()")
try:
# try to use IPython if possible
from IPython import iplib, Shell
class CustomIPShell(iplib.InteractiveShell, CustomShellMixin):
def raw_input(self, *args, **kw):
try:
# needs decoding (see below)?
return iplib.InteractiveShell.raw_input(self, *args,
**kw)
except EOFError:
self.commit_changes()
raise EOFError
shell = Shell.IPShell(user_ns=locals, shell_class=CustomIPShell)
shell.mainloop()
except ImportError:
import code
class CustomShell(code.InteractiveConsole, CustomShellMixin):
def raw_input(self, *args, **kw):
try:
import readline
except ImportError:
pass
try:
r = code.InteractiveConsole.raw_input(self,
*args, **kw)
for encoding in (getattr(sys.stdin, 'encoding', None),
sys.getdefaultencoding(), 'utf-8', 'latin-1'):
if encoding:
try:
return r.decode(encoding)
except UnicodeError:
pass
return r
except EOFError:
self.commit_changes()
raise EOFError
shell = CustomShell(locals=locals)
shell.interact()
class ToolboxCommand(CommandWithDB):
desc = "Launch the TurboGears Toolbox"
def __init__(self, version):
self.hostlist = ['127.0.0.1','::1']
parser = optparse.OptionParser(
usage="%prog toolbox [options]",
version="%prog " + version)
parser.add_option("-n", "--no-open",
help="don't open browser automatically",
dest="noopen", action="store_true", default=False)
parser.add_option("-c", "--add-client",
help="allow client ip address specified to connect to toolbox"
" (can be specified more than once)",
dest="host", action="append", default=None)
parser.add_option("-p", "--port",
help="port to run the Toolbox on",
dest="port", default=7654)
parser.add_option("--config", help="config file to use",
dest="config", default=self.config or get_project_config())
options, args = parser.parse_args(sys.argv[1:])
self.port = int(options.port)
self.noopen = options.noopen
self.config = options.config
if options.host:
self.hostlist = self.hostlist + options.host
gearshift.widgets.load_widgets()
def openbrowser(self):
import webbrowser
webbrowser.open("http://localhost:%d" % self.port)
def run(self):
import cherrypy
from gearshift import toolbox
# TODO: remove this check once we convert the whole toolbox to genshi
try:
import turbokid
except ImportError:
# we could not import turbokid, the toolbox will crash with
# horrible tracebacks...
print "Please easy_install turbokid, toolbox cannot run without it"
# sys exit with different than zero error code in case someone
# is using the error code to know if it worked...
sys.exit(2)
# Make sure we have full configuration with every option
# in it so other plugins or whatever find what they need
# when starting even inside the toolblox
conf = get_package_name()
conf = conf and "%s.config" % conf or None
conf = config.config_obj(configfile=self.config, modulename=conf)
if 'global' in conf:
config.update({'global': conf['global']})
root = SecureObject(toolbox.Toolbox(), from_any_host(self.hostlist),
exclude=['noaccess'])
cherrypy.tree.mount(root, '/', config=gearshift.config.app)
# amend some parameters since we are running from the command
# line in order to change port, log methods...
config.update({'global': {
'server.socket_port': self.port,
'server.webpath': '/',
'server.environment': 'development',
'server.log_to_screen': True,
'autoreload.on': False,
'server.package': 'gearshift.toolbox',
'log_debug_info_filter.on': False,
'tools.identity.failure_url': '/noaccess',
'tools.identity.force_external_redirect': False,
'tg.defaultview': 'kid',
'tg.strict_parameters': False,
'kid.outputformat': 'html default',
'kid.encoding': 'utf-8'
}})
gearshift.view.load_engines()
if self.noopen:
cherrypy.engine.start()
else:
cherrypy.engine.start_with_callback(self.openbrowser)
cherrypy.engine.block()
commands = None
def main():
"""Main command runner. Manages the primary command line arguments."""
# add commands defined by entrypoints
commands = {}
for entrypoint in pkg_resources.iter_entry_points("gearshift.command"):
command = entrypoint.load()
commands[entrypoint.name] = (command.desc, entrypoint)
def _help():
"""Custom help text for tg-admin."""
print """
GearShift %s command line interface
Usage: %s [options] <command>
Options:
-c CONFIG --config=CONFIG Config file to use
-e EGG_SPEC --egg=EGG_SPEC Run command on given Egg
Commands:""" % (gearshift.__version__, sys.argv[0])
longest = max([len(key) for key in commands.keys()])
format = "%" + str(longest) + "s %s"
commandlist = commands.keys()
commandlist.sort()
for key in commandlist:
print format % (key, commands[key][0])
parser = optparse.OptionParser()
parser.allow_interspersed_args = False
parser.add_option("-c", "--config", dest="config")
parser.add_option("-e", "--egg", dest="egg")
parser.print_help = _help
options, args = parser.parse_args(sys.argv[1:])
# if command is not found display help
if not args or not commands.has_key(args[0]):
_help()
sys.exit()
commandname = args[0]
# strip command and any global options from the sys.argv
sys.argv = [sys.argv[0]] + args[1:]
command = commands[commandname][1]
command = command.load()
if options.egg:
egg = pkg_resources.get_distribution(options.egg)
os.chdir(egg.location)
if hasattr(command,"need_project"):
if not gearshift.util.get_project_name():
print "This command needs to be run from inside a project directory"
return
elif not options.config and not os.path.isfile(get_project_config()):
print """No default config file was found.
If it has been renamed use:
tg-admin --config=<FILE> %s""" % commandname
return
command.config = options.config
command = command(gearshift.__version__)
command.run()
__all__ = ["main"]
|
lqhuang/SAXS-tools
|
refs/heads/master
|
RAW/SASCalib.py
|
1
|
'''
Created on Jul 11, 2010
@author: Soren S. Nielsen
#******************************************************************************
# This file is part of RAW.
#
# RAW is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAW is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAW. If not, see <http://www.gnu.org/licenses/>.
#
#******************************************************************************
'''
from __future__ import print_function, division
import os, sys
from math import pi, asin, tan, atan, cos
import numpy as np
RAW_DIR = os.path.dirname(os.path.abspath(__file__))
if RAW_DIR not in sys.path:
sys.path.append(RAW_DIR)
import RAWGlobals
try:
import pyFAI, pyFAI.geometryRefinement
RAWGlobals.usepyFAI = True
except ModuleNotFoundError:
RAWGlobals.usepyFAI = False
def calcAbsScaleConstWater(water_sasm, start_idx, end_idx):
'''
Calculates the absolute scaling constant using water (with empty cell subtracted).
(This constant is multiplied to the background subtracted samples to obtain
it on absolute scale.)
Currently only precise for 25 deg and 9.47 keV
Should be extended to include other temperatures and energies
see http://www.ncnr.nist.gov/resources/sldcalc.html to calc for other temperatures
'''
avg_water = np.average(water_sasm.i[start_idx:end_idx])
abs_scale_constant = 0.0162 / avg_water
return abs_scale_constant
def calcTheta(sd_distance, pixel_size, q_length_pixels):
'''
Calculates theta for a sample-detector distance,
the detector pixel size and the length of the pixels.
sd_distance = sample detector distance
pixel_size = Detector pixel size
q__pixel_length = length of q-vector in pixels.
'''
if q_length_pixels == 0:
return 0
else:
theta = .5 * atan( (q_length_pixels * pixel_size) / sd_distance )
return theta
def calcSolidAngleCorrection(sasm, sd_distance, pixel_size):
'''
returns an array that should be multiplied to the intensity values
calculated to apply the solid angle correction.
This compensates for the fact that the detector face is assumed to be planar.
Thus, as you move out on the detector, each pixel subtends a smaller solid angle,
and so absorbs fewer pixels. This results in artificially low intensities at high
q. This can be compensated for by dividing by the ratio of the solid angles,
which is proportional to cos(2*theta)^3.
Inputs:
pixel_size = Detector Pixel Size in millimeters
sd_distance = Sample-Detector distance
sasm, with the q vector still in pixel units, rather than calibrated to A^-1.
'''
q_list = sasm.q
iac = np.ones(len(q_list))
for idx in range(0,len(iac)):
iac[idx] = np.power( cos( 2 * calcTheta(sd_distance, pixel_size, q_list[idx]) ),3 ) #cos^3(2*theta)
return iac
def calcDistanceFromAgBeh(first_ring_dist, pixel_size, wavelength):
''' Calculates sample detector distance from the rings
of Silver Behenate.
first_ring_dist = Distance to 1st circle in AgBe measurement in pixels
q = ( 4 * pi * sin(theta)) / wavelength
tan(theta) = opposite / adjacent
pixel_size : detector pixelsize in mm
Ouput:
sd_distance = Sample Detector Distance in mm
'''
q = 0.107625 # Q for 1st cirle in AgBeh
sin_theta = (q * wavelength) / (4 * pi)
theta = asin(sin_theta)
opposite = first_ring_dist * pixel_size
adjacent = opposite / tan(2*theta)
sd_distance = adjacent
return sd_distance
#########################################
#Methods adapted from pyFAI methods of the same or similar name to automatically get points in calibrant rings and fit them
def new_grp(img, loc, gpt, defaultNbPoints, ring):
massif = pyFAI.massif.Massif(img)
points = massif.find_peaks([loc[1], loc[0]], defaultNbPoints)
if points:
gpt.append(points, ring=ring)
return points, gpt
class RAWCalibration():
# A mash up of the pyFAI.calibration AbstractCalibration and Calibration classes
PARAMETERS = ["dist", "poni1", "poni2", "rot1", "rot2", "rot3", "wavelength"]
def __init__(self, img, wavelength = None, detector = None, calibrant = None, pixelSize = None, gaussianWidth = None):
self.gaussianWidth = gaussianWidth
self.detector = detector
self.calibrant = calibrant
self.pixelSize = pixelSize
self.wavelength = wavelength
self.img = img
self.fixed = pyFAI.utils.FixedParameters()
self.fixed.add_or_discard("wavelength", True)
self.fixed.add_or_discard("rot1", True)
self.fixed.add_or_discard("rot2", True)
self.fixed.add_or_discard("rot3", True)
self.max_iter = 1000
self.interactive = False
self.weighted = False
def initgeoRef(self):
# Modified initgeoRef from the pyFAI.calibration.Calibration class
"""
Tries to initialise the GeometryRefinement (dist, poni, rot)
Returns a dictionary of key value pairs
"""
defaults = {"dist": 0.1, "poni1": 0.0, "poni2": 0.0,
"rot1": 0.0, "rot2": 0.0, "rot3": 0.0}
if self.detector:
try:
p1, p2, _p3 = self.detector.calc_cartesian_positions()
defaults["poni1"] = p1.max() / 2.
defaults["poni2"] = p2.max() / 2.
except Exception as err:
print(err)
if self.ai:
for key in defaults.keys(): # not PARAMETERS which holds wavelength
val = getattr(self.ai, key, None)
if val is not None:
defaults[key] = val
return defaults
def refine(self):
# Modified refine from the pyFAI.calibration.Calibration class
"""
Contains the geometry refinement part specific to Calibration
Sets up the initial guess when starting pyFAI-calib
"""
# First attempt
defaults = self.initgeoRef()
self.geoRef = pyFAI.geometryRefinement.GeometryRefinement(self.data,
detector=self.detector,
wavelength=self.wavelength,
calibrant=self.calibrant,
**defaults)
self.geoRef.refine2(1000000, fix=self.fixed)
scor = self.geoRef.chi2()
pars = [getattr(self.geoRef, p) for p in self.PARAMETERS]
scores = [(scor, pars), ]
# Second attempt
defaults = self.initgeoRef()
self.geoRef = pyFAI.geometryRefinement.GeometryRefinement(self.data,
detector=self.detector,
wavelength=self.wavelength,
calibrant=self.calibrant,
**defaults)
self.geoRef.guess_poni()
self.geoRef.refine2(1000000, fix=self.fixed)
scor = self.geoRef.chi2()
pars = [getattr(self.geoRef, p) for p in self.PARAMETERS]
scores.append((scor, pars))
# Choose the best scoring method: At this point we might also ask
# a user to just type the numbers in?
scores.sort()
scor, pars = scores[0]
for parval, parname in zip(pars, self.PARAMETERS):
setattr(self.geoRef, parname, parval)
# Now continue as before
self.refine2()
def refine2(self):
# Modified refine from the pyFAI.calibration.AbstractCalibration class
"""
Contains the common geometry refinement part
"""
previous = sys.maxint
finished = False
while not finished:
count = 0
if "wavelength" in self.fixed:
while (previous > self.geoRef.chi2()) and (count < self.max_iter):
if (count == 0):
previous = sys.maxsize
else:
previous = self.geoRef.chi2()
self.geoRef.refine2(1000000, fix=self.fixed)
count += 1
else:
while previous > self.geoRef.chi2_wavelength() and (count < self.max_iter):
if (count == 0):
previous = sys.maxsize
else:
previous = self.geoRef.chi2()
self.geoRef.refine2_wavelength(1000000, fix=self.fixed)
count += 1
self.points.setWavelength_change2th(self.geoRef.wavelength)
# self.geoRef.save(self.basename + ".poni")
self.geoRef.del_ttha()
self.geoRef.del_dssa()
self.geoRef.del_chia()
tth = self.geoRef.twoThetaArray(self.img.shape)
dsa = self.geoRef.solidAngleArray(self.img.shape)
# self.geoRef.chiArray(self.peakPicker.shape)
# self.geoRef.cornerArray(self.peakPicker.shape)
if self.interactive:
finished = self.prompt()
else:
finished = True
if not finished:
previous = sys.maxsize
|
8191/ansible
|
refs/heads/devel
|
lib/ansible/runner/action_plugins/pause.py
|
9
|
# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.callbacks import vv
from ansible.errors import AnsibleError as ae
from ansible.runner.return_data import ReturnData
from ansible.utils import getch, parse_kv
import ansible.utils.template as template
from termios import tcflush, TCIFLUSH
import datetime
import sys
import time
class ActionModule(object):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def __init__(self, runner):
self.runner = runner
# Set defaults
self.duration_unit = 'minutes'
self.prompt = None
self.seconds = None
self.result = {'changed': False,
'rc': 0,
'stderr': '',
'stdout': '',
'start': None,
'stop': None,
'delta': None,
}
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' run the pause action module '''
# note: this module does not need to pay attention to the 'check'
# flag, it always runs
hosts = ', '.join(self.runner.host_set)
args = {}
if complex_args:
args.update(complex_args)
# extra template call unneeded?
args.update(parse_kv(template.template(self.runner.basedir, module_args, inject)))
# Are 'minutes' or 'seconds' keys that exist in 'args'?
if 'minutes' in args or 'seconds' in args:
try:
if 'minutes' in args:
self.pause_type = 'minutes'
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
self.seconds = int(args['minutes']) * 60
else:
self.pause_type = 'seconds'
self.seconds = int(args['seconds'])
self.duration_unit = 'seconds'
except ValueError, e:
raise ae("non-integer value given for prompt duration:\n%s" % str(e))
# Is 'prompt' a key in 'args'?
elif 'prompt' in args:
self.pause_type = 'prompt'
self.prompt = "[%s]\n%s:\n" % (hosts, args['prompt'])
# Is 'args' empty, then this is the default prompted pause
elif len(args.keys()) == 0:
self.pause_type = 'prompt'
self.prompt = "[%s]\nPress enter to continue:\n" % hosts
# I have no idea what you're trying to do. But it's so wrong.
else:
raise ae("invalid pause type given. must be one of: %s" % \
", ".join(self.PAUSE_TYPES))
vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
(self.pause_type, self.duration_unit, self.seconds, self.prompt))
########################################################################
# Begin the hard work!
try:
self._start()
if not self.pause_type == 'prompt':
print "[%s]\nPausing for %s seconds" % (hosts, self.seconds)
time.sleep(self.seconds)
else:
# Clear out any unflushed buffered input which would
# otherwise be consumed by raw_input() prematurely.
tcflush(sys.stdin, TCIFLUSH)
self.result['user_input'] = raw_input(self.prompt)
except KeyboardInterrupt:
while True:
print '\nAction? (a)bort/(c)ontinue: '
c = getch()
if c == 'c':
# continue playbook evaluation
break
elif c == 'a':
# abort further playbook evaluation
raise ae('user requested abort!')
finally:
self._stop()
return ReturnData(conn=conn, result=self.result)
def _start(self):
''' mark the time of execution for duration calculations later '''
self.start = time.time()
self.result['start'] = str(datetime.datetime.now())
if not self.pause_type == 'prompt':
print "(^C-c = continue early, ^C-a = abort)"
def _stop(self):
''' calculate the duration we actually paused for and then
finish building the task result string '''
duration = time.time() - self.start
self.result['stop'] = str(datetime.datetime.now())
self.result['delta'] = int(duration)
if self.duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
self.result['stdout'] = "Paused for %s %s" % (duration, self.duration_unit)
|
ordinary-developer/lin_education
|
refs/heads/master
|
books/techno/python/fluent_python_l_ramalho/code/ch_2-an_array_of_sequences/04-cartesian_products/main.py
|
2
|
if __name__ == '__main__':
colors = ['black', 'white']
sizes = ['S', 'M', 'L']
tshirts = [(color, size) for color in colors for size in sizes]
print(tshirts)
for color in colors:
for size in sizes:
print((color, size))
tshirts = [(color, size) for size in sizes
for color in colors]
print(tshirts)
|
ryanneufeld/OctoPrint
|
refs/heads/master
|
src/octoprint/plugins/softwareupdate/updaters/pip.py
|
17
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
import pkg_resources
from octoprint.util.pip import PipCaller, UnknownPip
from .. import exceptions
logger = logging.getLogger("octoprint.plugins.softwareupdate.updaters.pip")
console_logger = logging.getLogger("octoprint.plugins.softwareupdate.updaters.pip.console")
_pip_callers = dict()
_pip_version_dependency_links = pkg_resources.parse_version("1.5")
def can_perform_update(target, check):
pip_caller = _get_pip_caller(command=check["pip_command"] if "pip_command" in check else None)
return "pip" in check and pip_caller is not None and pip_caller.available
def _get_pip_caller(command=None):
key = command
if command is None:
key = "__default"
if not key in _pip_callers:
try:
_pip_callers[key] = PipCaller(configured=command)
_pip_callers[key].on_log_call = _log_call
_pip_callers[key].on_log_stdout = _log_stdout
_pip_callers[key].on_log_stderr = _log_stderr
except UnknownPip:
_pip_callers[key] = None
return _pip_callers[key]
def perform_update(target, check, target_version):
pip_command = None
if "pip_command" in check:
pip_command = check["pip_command"]
pip_caller = _get_pip_caller(command=pip_command)
if pip_caller is None:
raise exceptions.UpdateError("Can't run pip", None)
install_arg = check["pip"].format(target_version=target_version)
logger.debug(u"Target: %s, executing pip install %s" % (target, install_arg))
pip_args = ["install", check["pip"].format(target_version=target_version, target=target_version)]
if "dependency_links" in check and check["dependency_links"] and pip_caller >= _pip_version_dependency_links:
pip_args += ["--process-dependency-links"]
returncode, stdout, stderr = pip_caller.execute(*pip_args)
if returncode != 0:
raise exceptions.UpdateError("Error while executing pip install", (stdout, stderr))
logger.debug(u"Target: %s, executing pip install %s --ignore-reinstalled --force-reinstall --no-deps" % (target, install_arg))
pip_args += ["--ignore-installed", "--force-reinstall", "--no-deps"]
returncode, stdout, stderr = pip_caller.execute(*pip_args)
if returncode != 0:
raise exceptions.UpdateError("Error while executing pip install --force-reinstall", (stdout, stderr))
return "ok"
def _log_call(*lines):
_log(lines, prefix=u" ")
def _log_stdout(*lines):
_log(lines, prefix=u">")
def _log_stderr(*lines):
_log(lines, prefix=u"!")
def _log(lines, prefix=None):
lines = map(lambda x: x.strip(), lines)
for line in lines:
console_logger.debug(u"{prefix} {line}".format(**locals()))
|
windyuuy/opera
|
refs/heads/master
|
chromium/src/chrome/test/functional/chromeos_vpn.py
|
52
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import pyauto_functional
import pyauto
import chromeos_network
class PrivateNetworkTest(chromeos_network.PyNetworkUITest):
"""Tests for VPN.
Expected to be run with access to the lab setup as defined in
vpn_testbed_config.
"""
def _PingTest(self, hostname, timeout=10):
"""Attempt to ping a remote host.
Returns:
True if the ping succeeds.
False otherwise.
"""
return subprocess.call(['ping', '-c', '1', '-W',
str(timeout), hostname]) == 0
def testCanAddNetwork(self):
"""Test to add a VPN network, connect and disconnect."""
# Load VPN config data from file.
vpn_info_file = os.path.join(pyauto.PyUITest.DataDir(),
'pyauto_private/chromeos/network',
'vpn_testbed_config')
self.assertTrue(os.path.exists(vpn_info_file))
vpn = self.EvalDataFrom(vpn_info_file)
# Connect to wifi.
self.NetworkScan()
self.WaitUntilWifiNetworkAvailable(vpn['wifi'])
wifi_vpn = self.GetServicePath(vpn['wifi'])
self.assertTrue(wifi_vpn)
self.assertTrue(self.ConnectToWifiNetwork(wifi_vpn) is None)
self.assertFalse(self._PingTest(vpn['ping']),
msg='VPN ping succeeded when not connected.')
# Connect to the VPN.
self.AddPrivateNetwork(hostname=vpn['hostname'],
service_name=vpn['service_name'],
provider_type=vpn['provider_type'],
username=vpn['username'],
password=vpn['password'],
key=vpn['key'])
# Get private network info.
result = self.GetPrivateNetworkInfo()
self.assertTrue('connected' in result, msg='Could not connect to VPN')
connected = result['connected']
self.assertTrue(self._PingTest(vpn['ping']), msg='VPN ping failed.')
self.DisconnectFromPrivateNetwork()
self.assertFalse(self._PingTest(vpn['ping']),
msg='VPN ping succeeded when not connected.')
# Connect to the remembered private network.
self.ConnectToPrivateNetwork(connected)
self.assertTrue(self._PingTest(vpn['ping']), msg='VPN ping failed.')
self.DisconnectFromPrivateNetwork()
self.assertFalse(self._PingTest(vpn['ping']),
msg='VPN ping succeeded when not connected.')
if __name__ == '__main__':
pyauto_functional.Main()
|
d33tah/npyscreen
|
refs/heads/master
|
npyscreen/compatibility_code/oldtreeclasses.py
|
15
|
from .. import wgmultiline as multiline
from .. import wgcheckbox as checkbox
from ..wgmultilinetree import TreeLine, TreeLineAnnotated
#####################################################################################
# The Following are maintained here for compatibility only.
# All new Applications should use new classes.
#####################################################################################
# OLD TREE WIDGET
class MultiLineTree(multiline.MultiLine):
def _setMyValues(self, tree):
if tree == [] or tree == None:
self._myFullValues = NPSTree.NPSTreeData()
elif not isinstance(tree, NPSTree.NPSTreeData):
raise TypeError("MultiLineTree widget can only contain a NPSTreeData object in its values attribute")
else:
self._myFullValues = tree
def _getApparentValues(self):
return self._myFullValues.getTreeAsList()
def _walkMyValues(self):
return self._myFullValues.walkTree()
def _delMyValues(self):
self._myFullValues = None
values = property(_getApparentValues, _setMyValues, _delMyValues)
def get_tree_display(self, vl):
dp = vl.findDepth()
if dp > 0:
control_chars = "| " * (dp-1) + "|-"
else:
control_chars = ""
if vl.hasChildren():
if vl.expanded:
control_chars = control_chars + "+"
else:
control_chars = control_chars + ">"
else:
control_chars = control_chars + ""
return control_chars
def _set_line_values(self, line, value_indexer):
try:
line.value = self.get_tree_display(self.values[value_indexer]) + " " + self.display_value(self.values[value_indexer]) + " "
line.hidden = False
except IndexError:
self._set_line_blank(line)
except TypeError:
self._set_line_blank(line)
def display_value(self, vl):
return str(vl.getContentForDisplay())
class SelectOneTree(MultiLineTree):
_contained_widgets = checkbox.RoundCheckBox
def _print_line(self, line, value_indexer):
try:
line.value = self.values[value_indexer]
line.hidden = False
if (self.values[value_indexer] in self.value and (self.value is not None)):
line.show_bold = True
line.name = self.display_value(self.values[value_indexer])
line.value = True
else:
line.show_bold = False
line.name = self.display_value(self.values[value_indexer])
line.value = False
if value_indexer in self._filtered_values_cache:
line.important = True
else:
line.important = False
except IndexError:
line.name = None
line.hidden = True
line.highlight= False
def update(self, clear=True):
if self.hidden and clear:
self.clear()
return False
elif clear:
return False
# Make sure that self.value is a list
if not hasattr(self.value, "append"):
if self.value is not None:
self.value = [self.value, ]
else:
self.value = []
super(SelectOneTree, self).update(clear=clear)
def h_select(self, ch):
try:
self.value = [weakref.proxy(self.values[self.cursor_line]),]
except TypeError:
# Actually, this is inefficient, since with the NPSTree class (default) we will always be here - since by default we will
# try to create a weakref to a weakref, and that will fail with a type-error. BUT we are only doing it on a keypress, so
# it shouldn't create a huge performance hit, and is future-proof. Code replicated in h_select_exit
self.value = [self.values[self.cursor_line],]
def h_select_exit(self, ch):
try:
self.value = [weakref.proxy(self.values[self.cursor_line]),]
except TypeError:
# Actually, this is inefficient, since with the NPSTree class (default) we will always be here - since by default we will
# try to create a weakref to a weakref, and that will fail with a type-error. BUT we are only doing it on a keypress, so
# it shouldn't create a huge performance hit, and is future-proof.
self.value = [self.values[self.cursor_line],]
if self.return_exit:
self.editing = False
self.how_exited=True
def h_set_filtered_to_selected(self, ch):
if len(self._filtered_values_cache) < 2:
self.value = self.get_filtered_values()
else:
# There is an error - trying to select too many things.
curses.beep()
#####################################################################################
# The Following are maintained here for compatibility only.
# All new Applications should use classes above this comment.
#####################################################################################
class MultiLineTreeNew(multiline.MultiLine):
# Experimental
_contained_widgets = TreeLineAnnotated
#_contained_widgets = TreeLine
def _setMyValues(self, tree):
if tree == [] or tree == None:
self._myFullValues = NPSTree.NPSTreeData()
elif not isinstance(tree, NPSTree.NPSTreeData):
tree = self.convertToTree(tree)
self._myFullValues = tree
if not isinstance(tree, NPSTree.NPSTreeData):
raise TypeError("MultiLineTree widget can only contain a NPSTreeData object in its values attribute")
else:
self._myFullValues = tree
def convertToTree(tree):
"Override this function to convert a set of values to a tree."
return None
def clearDisplayCache(self):
self._cached_tree = None
self._cached_sort = None
self._cached_tree_as_list = None
def _getApparentValues(self):
try:
if self._cached_tree is weakref.proxy(self._myFullValues) and \
(self._cached_sort == (self._myFullValues.sort, self._myFullValues.sort_function)):
return self._cached_tree_as_list
except:
pass
self._cached_tree = weakref.proxy(self._myFullValues)
self._cached_sort = (self._myFullValues.sort, self._myFullValues.sort_function)
self._cached_tree_as_list = self._myFullValues.getTreeAsList()
return self._cached_tree_as_list
def _walkMyValues(self):
return self._myFullValues.walkTree()
def _delMyValues(self):
self._myFullValues = None
values = property(_getApparentValues, _setMyValues, _delMyValues)
def filter_value(self, index):
if self._filter in self.display_value(self.values[index].getContent()):
return True
else:
return False
#def display_value(self, vl):
# return vl
def set_up_handlers(self):
super(MultiLineTreeNew, self).set_up_handlers()
self.handlers.update({
ord('<'): self.h_collapse_tree,
ord('>'): self.h_expand_tree,
ord('['): self.h_collapse_tree,
ord(']'): self.h_expand_tree,
ord('{'): self.h_collapse_all,
ord('}'): self.h_expand_all,
ord('h'): self.h_collapse_tree,
ord('l'): self.h_expand_tree,
})
#def display_value(self, vl):
# return vl
def _before_print_lines(self):
pass
def _set_line_values(self, line, value_indexer):
line._tree_real_value = None
line._tree_depth = False
line._tree_sibling_next = False
line._tree_has_children = False
line._tree_expanded = False
line._tree_last_line = False
line._tree_depth_next = False
line._tree_ignore_root = None
try:
line.value = self.display_value(self.values[value_indexer])
line._tree_real_value = self.values[value_indexer]
line._tree_ignore_root = self._myFullValues.ignoreRoot
try:
line._tree_depth = self.values[value_indexer].findDepth()
line._tree_has_children = self.values[value_indexer].hasChildren()
line._tree_expanded = self.values[value_indexer].expanded
except:
line._tree_depth = False
line._tree_has_children = False
line._tree_expanded = False
try:
if line._tree_depth == self.values[value_indexer+1].findDepth():
line._tree_sibling_next = True
else:
line._tree_sibling_next = False
except:
line._sibling_next = False
line._tree_last_line = True
try:
line._tree_depth_next = self.values[value_indexer+1].findDepth()
except:
line._tree_depth_next = False
line.hidden = False
except IndexError:
self._set_line_blank(line)
except TypeError:
self._set_line_blank(line)
def h_collapse_tree(self, ch):
if self.values[self.cursor_line].expanded and self.values[self.cursor_line].hasChildren():
self.values[self.cursor_line].expanded = False
else:
look_for_depth = self.values[self.cursor_line].findDepth() - 1
cursor_line = self.cursor_line - 1
while cursor_line >= 0:
if look_for_depth == self.values[cursor_line].findDepth():
self.cursor_line = cursor_line
self.values[cursor_line].expanded = False
break
else:
cursor_line -= 1
self._cached_tree = None
self.display()
def h_expand_tree(self, ch):
if not self.values[self.cursor_line].expanded:
self.values[self.cursor_line].expanded = True
else:
for v in self.values[self.cursor_line].walkTree(onlyExpanded=False):
v.expanded = True
self._cached_tree = None
self.display()
def h_collapse_all(self, ch):
for v in self._myFullValues.walkTree(onlyExpanded=True):
v.expanded = False
self._cached_tree = None
self.cursor_line = 0
self.display()
def h_expand_all(self, ch):
for v in self._myFullValues.walkTree(onlyExpanded=False):
v.expanded = True
self._cached_tree = None
self.cursor_line = 0
self.display()
class MultiLineTreeNewAnnotated(MultiLineTreeNew):
_contained_widgets = TreeLineAnnotated
class MultiLineTreeNewAction(MultiLineTreeNew, multiline.MultiLineAction):
pass
class MultiLineTreeNewAnnotatedAction(MultiLineTreeNew, multiline.MultiLineAction):
_contained_widgets = TreeLineAnnotated
|
loechel/lmu.contenttypes.polls
|
refs/heads/master
|
src/lmu/contenttypes/polls/browser/__init__.py
|
12133432
| |
iyer-arvind/PyFR
|
refs/heads/develop
|
pyfr/scripts/__init__.py
|
12133432
| |
san-mate/python-social-auth
|
refs/heads/master
|
social/tests/backends/__init__.py
|
12133432
| |
eemirtekin/edx-platform
|
refs/heads/master
|
common/djangoapps/request_cache/__init__.py
|
12133432
| |
mainakibui/kobocat
|
refs/heads/master
|
onadata/libs/mixins/view_permission_mixin.py
|
10
|
from django.core.exceptions import ImproperlyConfigured
from guardian.shortcuts import get_objects_for_user
class ViewPermissionMixin(object):
def get_queryset(self):
"""
Get the list of items for this view
based on user's view_%(model_name)s permissions.
"""
self.model = self.model if self.model is not None else \
self.queryset.model if self.queryset is not None else None
if self.request is not None and self.model is not None:
kwargs = {
'app_label': self.model._meta.app_label,
'model_name': self.model._meta.module_name
}
perms = ['%(app_label)s.view_%(model_name)s' % kwargs]
return get_objects_for_user(self.request.user, perms, self.model)
if self.model is not None:
return self.model._default_manager.all()
raise ImproperlyConfigured("'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
|
snahelou/awx
|
refs/heads/devel
|
awx/main/south_migrations/0053_v210_changes.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'JobTemplate.survey_enabled'
db.add_column(u'main_jobtemplate', 'survey_enabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'JobTemplate.survey_spec'
db.add_column(u'main_jobtemplate', 'survey_spec',
self.gf('jsonfield.fields.JSONField')(default={}, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'JobTemplate.survey_enabled'
db.delete_column(u'main_jobtemplate', 'survey_enabled')
# Deleting field 'JobTemplate.survey_spec'
db.delete_column(u'main_jobtemplate', 'survey_spec')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.activitystream': {
'Meta': {'object_name': 'ActivityStream'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_stream'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'changes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'credential': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Credential']", 'symmetrical': 'False', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Host']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Inventory']", 'symmetrical': 'False', 'blank': 'True'}),
'inventory_source': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventorySource']", 'symmetrical': 'False', 'blank': 'True'}),
'inventory_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventoryUpdate']", 'symmetrical': 'False', 'blank': 'True'}),
'job': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Job']", 'symmetrical': 'False', 'blank': 'True'}),
'job_template': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.JobTemplate']", 'symmetrical': 'False', 'blank': 'True'}),
'object1': ('django.db.models.fields.TextField', [], {}),
'object2': ('django.db.models.fields.TextField', [], {}),
'object_relationship_type': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '13'}),
'organization': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Organization']", 'symmetrical': 'False', 'blank': 'True'}),
'permission': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Project']", 'symmetrical': 'False', 'blank': 'True'}),
'project_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ProjectUpdate']", 'symmetrical': 'False', 'blank': 'True'}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Schedule']", 'symmetrical': 'False', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'unified_job': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'activity_stream_as_unified_job+'", 'blank': 'True', 'to': "orm['main.UnifiedJob']"}),
'unified_job_template': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'activity_stream_as_unified_job_template+'", 'blank': 'True', 'to': "orm['main.UnifiedJobTemplate']"}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'main.authtoken': {
'Meta': {'object_name': 'AuthToken'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'request_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_tokens'", 'to': u"orm['auth.User']"})
},
'main.credential': {
'Meta': {'ordering': "('kind', 'name')", 'unique_together': "[('user', 'team', 'kind', 'name')]", 'object_name': 'Credential'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cloud': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'host': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'default': "'ssh'", 'max_length': '32'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'project': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'ssh_key_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'ssh_key_unlock': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'sudo_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'sudo_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': "orm['main.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'vault_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'})
},
'main.group': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('name', 'inventory'),)", 'object_name': 'Group'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': "orm['main.Host']"}),
'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['main.Inventory']"}),
'inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['main.InventorySource']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'children'", 'blank': 'True', 'to': "orm['main.Group']"}),
'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.host': {
'Meta': {'ordering': "('inventory', 'name')", 'unique_together': "(('name', 'inventory'),)", 'object_name': 'Host'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts'", 'to': "orm['main.Inventory']"}),
'inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'hosts'", 'symmetrical': 'False', 'to': "orm['main.InventorySource']"}),
'last_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'hosts_as_last_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Job']"}),
'last_job_host_summary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts_as_last_job_summary+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobHostSummary']", 'blank': 'True', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.inventory': {
'Meta': {'ordering': "('name',)", 'unique_together': "[('name', 'organization')]", 'object_name': 'Inventory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_sources_with_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventories'", 'to': "orm['main.Organization']"}),
'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_inventory_sources': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.inventorysource': {
'Meta': {'object_name': 'InventorySource', '_ormbases': ['main.UnifiedJobTemplate']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventorysources'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'group': ('awx.main.fields.AutoOneToOneField', [], {'default': 'None', 'related_name': "'inventory_source'", 'unique': 'True', 'null': 'True', 'to': "orm['main.Group']"}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'inventory_sources'", 'null': 'True', 'to': "orm['main.Inventory']"}),
'overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'overwrite_vars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'source_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_regions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}),
'update_cache_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.inventoryupdate': {
'Meta': {'object_name': 'InventoryUpdate', '_ormbases': ['main.UnifiedJob']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventoryupdates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'inventory_source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_updates'", 'to': "orm['main.InventorySource']"}),
'license_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'overwrite_vars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'source_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_regions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'})
},
'main.job': {
'Meta': {'ordering': "('id',)", 'object_name': 'Job', '_ormbases': ['main.UnifiedJob']},
'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'jobs'", 'symmetrical': 'False', 'through': "orm['main.JobHostSummary']", 'to': "orm['main.Host']"}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobTemplate']", 'blank': 'True', 'null': 'True'}),
'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'playbook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Project']"}),
u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'}),
'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'})
},
'main.jobevent': {
'Meta': {'ordering': "('pk',)", 'object_name': 'JobEvent'},
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'counter': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'event_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'job_events_as_primary_host'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Host']"}),
'host_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'job_events'", 'symmetrical': 'False', 'to': "orm['main.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_events'", 'to': "orm['main.Job']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.JobEvent']"}),
'play': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'role': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'task': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'})
},
'main.jobhostsummary': {
'Meta': {'ordering': "('-pk',)", 'unique_together': "[('job', 'host_name')]", 'object_name': 'JobHostSummary'},
'changed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'dark': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'job_host_summaries'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Host']"}),
'host_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_host_summaries'", 'to': "orm['main.Job']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'ok': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'skipped': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'main.jobtemplate': {
'Meta': {'ordering': "('name',)", 'object_name': 'JobTemplate', '_ormbases': ['main.UnifiedJobTemplate']},
'ask_variables_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'host_config_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'playbook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplates'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Project']"}),
'survey_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey_spec': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}),
'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'})
},
'main.organization': {
'Meta': {'ordering': "('name',)", 'object_name': 'Organization'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_of_organizations'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['main.Project']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
'main.permission': {
'Meta': {'object_name': 'Permission'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'permission_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"})
},
'main.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ldap_dn': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('awx.main.fields.AutoOneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'main.project': {
'Meta': {'ordering': "('id',)", 'object_name': 'Project', '_ormbases': ['main.UnifiedJobTemplate']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'scm_branch': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'scm_clean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_next_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'blank': 'True'}),
'scm_update_cache_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'scm_update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'})
},
'main.projectupdate': {
'Meta': {'object_name': 'ProjectUpdate', '_ormbases': ['main.UnifiedJob']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projectupdates'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_updates'", 'to': "orm['main.Project']"}),
'scm_branch': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'scm_clean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'blank': 'True'}),
'scm_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'})
},
'main.schedule': {
'Meta': {'ordering': "['-next_run']", 'object_name': 'Schedule'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'schedule\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'dtend': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'dtstart': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'schedule\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'next_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'rrule': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unified_job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'schedules'", 'to': "orm['main.UnifiedJobTemplate']"})
},
'main.team': {
'Meta': {'ordering': "('organization__name', 'name')", 'unique_together': "[('organization', 'name')]", 'object_name': 'Team'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'teams'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': "orm['main.Project']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
'main.unifiedjob': {
'Meta': {'object_name': 'UnifiedJob'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cancel_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjob\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'dependent_jobs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'dependent_jobs_rel_+'", 'to': "orm['main.UnifiedJob']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'elapsed': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '3'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finished': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'job_cwd': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_env': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'job_explanation': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'launch_type': ('django.db.models.fields.CharField', [], {'default': "'manual'", 'max_length': '20'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjob\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'old_pk': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_main.unifiedjob_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'result_stdout_file': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_stdout_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_traceback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['main.Schedule']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'start_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'}),
'unified_job_template': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjob_unified_jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJobTemplate']"})
},
'main.unifiedjobtemplate': {
'Meta': {'unique_together': "[('polymorphic_ctype', 'name')]", 'object_name': 'UnifiedJobTemplate'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjobtemplate\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'current_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_current_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJob']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'has_schedules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_last_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJob']"}),
'last_job_failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_job_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjobtemplate\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'next_job_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'next_schedule': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_next_schedule+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Schedule']"}),
'old_pk': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_main.unifiedjobtemplate_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ok'", 'max_length': '32'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['main']
|
ssebastianj/ia2013-tpi-rl
|
refs/heads/master
|
src/tests/entrenar_tests.py
|
1
|
#!/usr/bin/env python
# ! -*- coding: utf-8 -*-
from __future__ import absolute_import
import csv
import decimal
import Queue
import multiprocessing
import numpy
import os
import sys
import threading
import time
sys.path.append(os.path.abspath(os.path.join(os.pardir)))
from core.qlearning.qlearning import QLearning
from core.gridworld.gridworld import GridWorld
from core.estado.estado import Estado, TipoEstado, TIPOESTADO
from core.tecnicas.aleatorio import Aleatorio
from core.tecnicas.egreedy import EGreedy, Greedy
from core.tecnicas.softmax import Softmax
from graphs.avgrwds.worker import GraphRecompensasPromedioWorker
from graphs.sucessfuleps.worker import GraphSucessfulEpisodesWorker
from graphs.matdiffs.worker import GraphMatrizDiffsWorker
from graphs.itersep.worker import GraphIteracionesXEpisodioWorker
TESTS_DIR = os.path.abspath(os.path.join(os.pardir, '..', 'pruebas'))
tecnicas = {0: "Greedy",
1: "ε-Greedy",
2: "Softmax",
3: "Aleatorio"
}
gw_dimensiones = ["3 x 3", "4 x 4", "5 x 5",
"6 x 6", "7 x 7", "8 x 8", "9 x 9", "10 x 10"]
window_config = {"item":
{"show_tooltip": True,
"menu_estado":
{"ocultar_tipos":
[TIPOESTADO.AGENTE],
"enabled": True
},
"size": 40},
"gw":
{"entrenamiento": {"actual_state": {"show": True, "color": "#000000", "icono": None},
"recompfinalauto": True},
"recorrido": {"actual_state": {"show": True, "color": "#000000", "icono": None}}
},
"tipos_estados":
{0: TipoEstado(0, None, "Inicial", "I", "#FF5500", None),
1: TipoEstado(1, 1000, "Final", "F", "#00AB00", None),
2: TipoEstado(2, None, "Agente", "A", "#474747", None),
3: TipoEstado(3, 0, "Neutro", "N", "#FFFFFF", None),
4: TipoEstado(4, 100, "Excelente", "E", "#BB0011", None),
5: TipoEstado(5, 50, "Bueno", "B", "#4F0ACC", None),
6: TipoEstado(6, -50, "Malo", "M", "#EB00A1", None),
7: TipoEstado(7, None, "Pared", "P", "#000000", None),
},
"opt_path":
{"color": "#55FF00",
"pintar_inicial": False,
"pintar_final": False,
"delay": 0,
"show_icon": False
},
"exponentes_final": {6: 12,
7: 17,
8: 19,
9: 28,
10: 31
}
}
def main():
lista_archivos_pruebas = []
sys.stdout.write("Indexando archivos de pruebas...\n")
for root, folder, archivos in os.walk(TESTS_DIR):
for archivo in archivos:
if archivo.endswith(".csv") and archivo != "info.csv":
lista_archivos_pruebas.append(os.path.join(root, archivo))
for archivo_pruebas in lista_archivos_pruebas:
sys.stdout.write("Usando archivo '{0}'...\n".format(archivo_pruebas))
nombre_archivo = os.path.splitext(os.path.basename(archivo_pruebas))[0]
test1_dir = os.path.abspath(os.path.join(os.path.dirname(archivo_pruebas),
'resultados'))
if not os.path.exists(test1_dir):
os.mkdir(test1_dir)
test2_dir = os.path.abspath(os.path.join(test1_dir, nombre_archivo))
if not os.path.exists(test2_dir):
os.mkdir(test2_dir)
with open(archivo_pruebas, 'rb') as apf:
# dialecto = csv.Sniffer().sniff(apf.read(), delimiters=';')
inp_csv = csv.reader(apf, dialect='excel', delimiter=';')
contador_pruebas = 1
for linea_prueba in inp_csv:
sys.stdout.write("Ejecutando prueba {0}... ".format(contador_pruebas))
try:
ejecutar_prueba(linea_prueba[0],
linea_prueba[1],
linea_prueba[2],
linea_prueba[3],
linea_prueba[4],
linea_prueba[5],
linea_prueba[6],
linea_prueba[7],
linea_prueba[8],
linea_prueba[9],
linea_prueba[10],
linea_prueba[11],
linea_prueba[12],
contador_pruebas,
test2_dir)
sys.stdout.write("Prueba {0} OK\n".format(contador_pruebas))
contador_pruebas += 1
except decimal.Overflow:
sys.stdout.write("Prueba {0} ERROR: Overflow\n".format(contador_pruebas))
contador_pruebas += 1
continue
except TypeError as te:
sys.stdout.write("Prueba {0} ERROR: TypeError\n".format(contador_pruebas))
sys.stdout.write(str(te))
contador_pruebas += 1
continue
except ValueError:
sys.stdout.write("Prueba {0} ERROR: ValueError\n".format(contador_pruebas))
contador_pruebas += 1
continue
except AttributeError:
sys.stdout.write("Prueba {0} ERROR: AttributeError\n".format(contador_pruebas))
contador_pruebas += 1
continue
except multiprocessing.ProcessError:
sys.stdout.write("Prueba {0} ERROR: ProcessError\n".format(contador_pruebas))
contador_pruebas += 1
continue
sys.stdout.write("Fin de pruebas\n\n")
def ejecutar_prueba(estados, gamma, tecnica_idx, parametro, cant_episodios,
paso_decremento, intervalo_decremento, limitar_iteraciones,
cant_max_iteraciones, valor_inicial, detener_por_diff,
diff_minima, interv_calculo_diff, nro_prueba, output_dir):
gamma = float(gamma)
tecnica_idx = int(tecnica_idx)
if tecnica_idx == 0:
tecnica = Greedy
elif tecnica_idx == 1:
tecnica = EGreedy
elif tecnica_idx == 2:
tecnica = Softmax
elif tecnica_idx == 3:
tecnica = Aleatorio
cant_episodios = int(cant_episodios)
parametro = float(parametro)
paso_decremento = float(paso_decremento)
intervalo_decremento = int(intervalo_decremento)
if isinstance(limitar_iteraciones, bool):
limitar_nro_iteraciones = limitar_iteraciones
else:
if limitar_iteraciones.strip().lower() == 'false':
limitar_nro_iteraciones = False
elif limitar_iteraciones.strip().lower() == 'true':
limitar_nro_iteraciones = True
cant_max_iter = int(cant_max_iteraciones)
estado_excelente = window_config["tipos_estados"][TIPOESTADO.EXCELENTE]
estado_final = window_config["tipos_estados"][TIPOESTADO.FINAL]
val_inicial = float(valor_inicial)
if val_inicial == 0:
init_value_fn = 0
elif val_inicial > 0:
init_value_fn = estado_final.recompensa + val_inicial
else:
init_value_fn = 0
if isinstance(detener_por_diff, bool):
matdiff_status = detener_por_diff
else:
if detener_por_diff.strip().lower() == 'false':
matdiff_status = False
elif detener_por_diff.strip().lower() == 'true':
matdiff_status = True
matriz_min_diff = float(diff_minima)
intervalo_diff_calc = int(interv_calculo_diff)
estados_num = eval(estados)
ancho, alto = len(estados_num), len(estados_num[0])
recomp_excelente = estado_excelente.recompensa
exponente = window_config["exponentes_final"][ancho]
calc_recomp_final = int(recomp_excelente / (gamma ** exponente))
estado_final_cfg = estado_final
estado_final_cfg.recompensa = calc_recomp_final
# Crear GridWorld de estados
estados = numpy.empty((alto, ancho), Estado)
coordenadas = []
tipos_estados = window_config["tipos_estados"]
# Crear una lista de listas
for i in xrange(1, alto + 1):
fila = numpy.empty((1, ancho), Estado)
for j in xrange(1, ancho + 1):
fila[0][j - 1] = Estado(i, j, tipos_estados[estados_num[i - 1][j - 1]])
coordenadas.append((i, j))
estados[i - 1] = fila
gridworld = GridWorld(ancho, alto, tipos_estados, estados, [TIPOESTADO.PARED])
estado_final_gw = gridworld.tipos_estados[TIPOESTADO.FINAL]
estado_final_gw.recompensa = calc_recomp_final
# Crear una nueva instancia de Q-Learning
qlearning = QLearning(gridworld,
gamma,
(tecnica, parametro, paso_decremento, intervalo_decremento),
cant_episodios,
(limitar_nro_iteraciones, cant_max_iter),
init_value_fn,
(matdiff_status, matriz_min_diff, intervalo_diff_calc),
None)
out_queue = multiprocessing.Queue()
error_queue = multiprocessing.Queue()
try:
entrenar_worker = qlearning.entrenar(out_queue, error_queue)
except TypeError:
return
except ValueError:
return
except AttributeError:
return
except decimal.Overflow:
return
# logging.debug(entrenar_worker)
matriz_q_inp = None
graph_recompensas_promedio = None
graph_episodios_finalizados = None
graph_mat_diff = None
graph_iters_por_episodio = None
while 1:
try:
data_entrenar = get_all_from_queue(out_queue)
for ql_ent_info in data_entrenar:
# estado_actual_ent = ql_ent_info.get('EstadoActual', None)
# nro_episodio = ql_ent_info.get('NroEpisodio', None)
# cant_iteraciones = ql_ent_info.get('NroIteracion', None)
# episode_exec_time = ql_ent_info.get('EpisodiosExecTime', 0.0)
# iter_exec_time = ql_ent_info.get('IteracionesExecTime', 0.0)
worker_joined = ql_ent_info.get('ProcesoJoined', False)
# loop_alarm = ql_ent_info.get('LoopAlarm', False)
matriz_q_inp = ql_ent_info.get('MatrizQ', None)
# valor_parametro = ql_ent_info.get('ValorParametro', None)
# running_exec_time_ent = ql_ent_info.get('RunningExecTime', 0.0)
# tmp_mat_diff = ql_ent_info.get('MatDiff', None)
# corte_iteracion = ql_ent_info.get('CorteIteracion', None)
graph_recompensas_promedio = ql_ent_info.get('MatRecompProm', None)
graph_episodios_finalizados = ql_ent_info.get('EpFinalizados', None)
graph_mat_diff = ql_ent_info.get('MatDiffStat', None)
graph_iters_por_episodio = ql_ent_info.get('ItersXEpisodio', None)
time.sleep(0.01)
except Queue.Empty:
pass
except AttributeError:
pass
active_children = multiprocessing.active_children()
for proceso in active_children:
if not proceso.is_alive():
proceso.join(0.01)
if not active_children:
entrenar_worker.join(0.01)
break
# logging.debug(graph_episodios_finalizados)
# logging.debug(graph_recompensas_promedio)
# logging.debug(matriz_q_inp)
parametros = (gamma,
(tecnica_idx, parametro, paso_decremento, intervalo_decremento),
cant_episodios,
(limitar_nro_iteraciones, cant_max_iter),
init_value_fn
)
test_dir = os.path.abspath(os.path.join(output_dir, "Prueba_{0}".format(nro_prueba)))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
csv_path = os.path.abspath(os.path.join(test_dir, 'info.csv'))
with open(csv_path, 'wb') as csvf:
csv_writer = csv.writer(csvf, dialect='excel', delimiter=';')
csv_writer.writerow(['Gamma', gamma])
csv_writer.writerow(['Tecnica', tecnicas[tecnica_idx]])
csv_writer.writerow(['Parametro', parametro])
csv_writer.writerow(['Paso decremento', paso_decremento])
csv_writer.writerow(['Intervalo decremento', intervalo_decremento])
csv_writer.writerow(['Episodios', cant_episodios])
csv_writer.writerow(['Limitar iteraciones', limitar_iteraciones])
csv_writer.writerow(['Cant. Max. Iteraciones', cant_max_iter])
csv_writer.writerow(['Valor Inicial', init_value_fn])
csv_writer.writerow([])
q_1 = multiprocessing.Queue()
q_1.put(((parametros, graph_recompensas_promedio), nro_prueba, output_dir))
worker_1 = multiprocessing.Process(None,
g_r_p_w,
"GraficarRecompensasPromedio",
(q_1,),
{}
)
q_2 = multiprocessing.Queue()
q_2.put(((parametros, graph_mat_diff), nro_prueba, output_dir))
worker_2 = multiprocessing.Process(None,
g_d_m_w,
"GraficarDiferenciaMatrices",
(q_2,),
{}
)
#===============================================================================
# worker_3 = threading.Thread(None,
# graficar_episodios_exitosos,
# "GraficarRecompensasPromedio",
# ((parametros, graph_episodios_finalizados), nro_prueba, output_dir),
# {},
# None)
#
# worker_4 = threading.Thread(None,
# graficar_iters_por_episodio,
# "GraficarRecompensasPromedio",
# ((parametros, graph_iters_por_episodio), nro_prueba, output_dir),
# {},
# None)
#===============================================================================
worker_1.start()
worker_2.start()
# worker_3.start()
# worker_4.start()
worker_1.join(0.1)
worker_2.join(0.1)
time.sleep(0.1)
if not worker_1.is_alive():
worker_1.terminate()
if not worker_2.is_alive():
worker_2.terminate()
# graficar_recompensas_promedio((parametros, graph_recompensas_promedio), nro_prueba, output_dir)
# graficar_episodios_exitosos((parametros, graph_episodios_finalizados), nro_prueba, output_dir)
# graficar_iters_por_episodio((parametros, graph_iters_por_episodio), nro_prueba, output_dir)
# graficar_diferencias_matrizq((parametros, graph_mat_diff), nro_prueba, output_dir)
def get_all_from_queue(cola):
try:
while 1:
yield cola.get_nowait()
except Queue.Empty:
raise StopIteration
def g_r_p_w(inp_queue):
try:
datos = inp_queue.get()
graficar_recompensas_promedio(datos[0], datos[1], datos[2])
except Queue.Empty:
pass
def g_d_m_w(inp_queue):
try:
datos = inp_queue.get()
graficar_diferencias_matrizq(datos[0], datos[1], datos[2])
except Queue.Empty:
pass
def graficar_episodios_exitosos(tupla, nro_prueba, output_dir):
worker = GraphSucessfulEpisodesWorker(tupla)
test_dir = os.path.abspath(os.path.join(output_dir, "Prueba_{0}".format(nro_prueba)))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
image_path = os.path.abspath(os.path.join(test_dir, 'episodios_exitosos.png'))
worker.guardar_dibujo(image_path)
csv_path = os.path.abspath(os.path.join(test_dir, 'info.csv'))
worker.exportar_info(csv_path, True)
def graficar_recompensas_promedio(tupla, nro_prueba, output_dir):
worker = GraphRecompensasPromedioWorker(tupla)
test_dir = os.path.abspath(os.path.join(output_dir, "Prueba_{0}".format(nro_prueba)))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
image_path = os.path.abspath(os.path.join(test_dir, 'recompensas_promedio.png'))
worker.guardar_dibujo(image_path)
csv_path = os.path.abspath(os.path.join(test_dir, 'info.csv'))
worker.exportar_info(csv_path, True)
def graficar_iters_por_episodio(tupla, nro_prueba, output_dir):
worker = GraphIteracionesXEpisodioWorker(tupla)
test_dir = os.path.abspath(os.path.join(output_dir, "Prueba_{0}".format(nro_prueba)))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
image_path = os.path.abspath(os.path.join(test_dir, 'iters_por_ep.png'))
worker.guardar_dibujo(image_path)
csv_path = os.path.abspath(os.path.join(test_dir, 'info.csv'))
worker.exportar_info(csv_path, True)
def graficar_diferencias_matrizq(tupla, nro_prueba, output_dir):
worker = GraphMatrizDiffsWorker(tupla)
test_dir = os.path.abspath(os.path.join(output_dir, "Prueba_{0}".format(nro_prueba)))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
image_path = os.path.abspath(os.path.join(test_dir, 'difs_mat_q.png'))
worker.guardar_dibujo(image_path)
csv_path = os.path.abspath(os.path.join(test_dir, 'info.csv'))
worker.exportar_info(csv_path, True)
if __name__ == '__main__':
main()
|
artemutin/beets
|
refs/heads/master
|
test/test_mediafile_edge.py
|
10
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Specific, edge-case tests for the MediaFile metadata layer.
"""
from __future__ import division, absolute_import, print_function
import os
import shutil
import unittest
import mutagen.id3
from test import _common
from beets import mediafile
import six
_sc = mediafile._safe_cast
class EdgeTest(unittest.TestCase):
def test_emptylist(self):
# Some files have an ID3 frame that has a list with no elements.
# This is very hard to produce, so this is just the first 8192
# bytes of a file found "in the wild".
emptylist = mediafile.MediaFile(
os.path.join(_common.RSRC, b'emptylist.mp3')
)
genre = emptylist.genre
self.assertEqual(genre, None)
def test_release_time_with_space(self):
# Ensures that release times delimited by spaces are ignored.
# Amie Street produces such files.
space_time = mediafile.MediaFile(
os.path.join(_common.RSRC, b'space_time.mp3')
)
self.assertEqual(space_time.year, 2009)
self.assertEqual(space_time.month, 9)
self.assertEqual(space_time.day, 4)
def test_release_time_with_t(self):
# Ensures that release times delimited by Ts are ignored.
# The iTunes Store produces such files.
t_time = mediafile.MediaFile(
os.path.join(_common.RSRC, b't_time.m4a')
)
self.assertEqual(t_time.year, 1987)
self.assertEqual(t_time.month, 3)
self.assertEqual(t_time.day, 31)
def test_tempo_with_bpm(self):
# Some files have a string like "128 BPM" in the tempo field
# rather than just a number.
f = mediafile.MediaFile(os.path.join(_common.RSRC, b'bpm.mp3'))
self.assertEqual(f.bpm, 128)
def test_discc_alternate_field(self):
# Different taggers use different vorbis comments to reflect
# the disc and disc count fields: ensure that the alternative
# style works.
f = mediafile.MediaFile(os.path.join(_common.RSRC, b'discc.ogg'))
self.assertEqual(f.disc, 4)
self.assertEqual(f.disctotal, 5)
def test_old_ape_version_bitrate(self):
media_file = os.path.join(_common.RSRC, b'oldape.ape')
f = mediafile.MediaFile(media_file)
self.assertEqual(f.bitrate, 0)
def test_only_magic_bytes_jpeg(self):
# Some jpeg files can only be recognized by their magic bytes and as
# such aren't recognized by imghdr. Ensure that this still works thanks
# to our own follow up mimetype detection based on
# https://github.com/file/file/blob/master/magic/Magdir/jpeg#L12
magic_bytes_file = os.path.join(_common.RSRC, b'only-magic-bytes.jpg')
with open(magic_bytes_file, 'rb') as f:
jpg_data = f.read()
self.assertEqual(
mediafile._imghdr_what_wrapper(jpg_data), 'jpeg')
def test_soundcheck_non_ascii(self):
# Make sure we don't crash when the iTunes SoundCheck field contains
# non-ASCII binary data.
f = mediafile.MediaFile(os.path.join(_common.RSRC,
b'soundcheck-nonascii.m4a'))
self.assertEqual(f.rg_track_gain, 0.0)
class InvalidValueToleranceTest(unittest.TestCase):
def test_safe_cast_string_to_int(self):
self.assertEqual(_sc(int, u'something'), 0)
def test_safe_cast_int_string_to_int(self):
self.assertEqual(_sc(int, u'20'), 20)
def test_safe_cast_string_to_bool(self):
self.assertEqual(_sc(bool, u'whatever'), False)
def test_safe_cast_intstring_to_bool(self):
self.assertEqual(_sc(bool, u'5'), True)
def test_safe_cast_string_to_float(self):
self.assertAlmostEqual(_sc(float, u'1.234'), 1.234)
def test_safe_cast_int_to_float(self):
self.assertAlmostEqual(_sc(float, 2), 2.0)
def test_safe_cast_string_with_cruft_to_float(self):
self.assertAlmostEqual(_sc(float, u'1.234stuff'), 1.234)
def test_safe_cast_negative_string_to_float(self):
self.assertAlmostEqual(_sc(float, u'-1.234'), -1.234)
def test_safe_cast_special_chars_to_unicode(self):
us = _sc(six.text_type, 'caf\xc3\xa9')
self.assertTrue(isinstance(us, six.text_type))
self.assertTrue(us.startswith(u'caf'))
def test_safe_cast_float_with_no_numbers(self):
v = _sc(float, u'+')
self.assertEqual(v, 0.0)
def test_safe_cast_float_with_dot_only(self):
v = _sc(float, u'.')
self.assertEqual(v, 0.0)
def test_safe_cast_float_with_multiple_dots(self):
v = _sc(float, u'1.0.0')
self.assertEqual(v, 1.0)
class SafetyTest(unittest.TestCase, _common.TempDirMixin):
def setUp(self):
self.create_temp_dir()
def tearDown(self):
self.remove_temp_dir()
def _exccheck(self, fn, exc, data=''):
fn = os.path.join(self.temp_dir, fn)
with open(fn, 'w') as f:
f.write(data)
try:
self.assertRaises(exc, mediafile.MediaFile, fn)
finally:
os.unlink(fn) # delete the temporary file
def test_corrupt_mp3_raises_unreadablefileerror(self):
# Make sure we catch Mutagen reading errors appropriately.
self._exccheck(b'corrupt.mp3', mediafile.UnreadableFileError)
def test_corrupt_mp4_raises_unreadablefileerror(self):
self._exccheck(b'corrupt.m4a', mediafile.UnreadableFileError)
def test_corrupt_flac_raises_unreadablefileerror(self):
self._exccheck(b'corrupt.flac', mediafile.UnreadableFileError)
def test_corrupt_ogg_raises_unreadablefileerror(self):
self._exccheck(b'corrupt.ogg', mediafile.UnreadableFileError)
def test_invalid_ogg_header_raises_unreadablefileerror(self):
self._exccheck(b'corrupt.ogg', mediafile.UnreadableFileError,
'OggS\x01vorbis')
def test_corrupt_monkeys_raises_unreadablefileerror(self):
self._exccheck(b'corrupt.ape', mediafile.UnreadableFileError)
def test_invalid_extension_raises_filetypeerror(self):
self._exccheck(b'something.unknown', mediafile.FileTypeError)
def test_magic_xml_raises_unreadablefileerror(self):
self._exccheck(b'nothing.xml', mediafile.UnreadableFileError,
"ftyp")
@unittest.skipUnless(_common.HAVE_SYMLINK, u'platform lacks symlink')
def test_broken_symlink(self):
fn = os.path.join(_common.RSRC, b'brokenlink')
os.symlink('does_not_exist', fn)
try:
self.assertRaises(mediafile.UnreadableFileError,
mediafile.MediaFile, fn)
finally:
os.unlink(fn)
class SideEffectsTest(unittest.TestCase):
def setUp(self):
self.empty = os.path.join(_common.RSRC, b'empty.mp3')
def test_opening_tagless_file_leaves_untouched(self):
old_mtime = os.stat(self.empty).st_mtime
mediafile.MediaFile(self.empty)
new_mtime = os.stat(self.empty).st_mtime
self.assertEqual(old_mtime, new_mtime)
class MP4EncodingTest(unittest.TestCase, _common.TempDirMixin):
def setUp(self):
self.create_temp_dir()
src = os.path.join(_common.RSRC, b'full.m4a')
self.path = os.path.join(self.temp_dir, b'test.m4a')
shutil.copy(src, self.path)
self.mf = mediafile.MediaFile(self.path)
def tearDown(self):
self.remove_temp_dir()
def test_unicode_label_in_m4a(self):
self.mf.label = u'foo\xe8bar'
self.mf.save()
new_mf = mediafile.MediaFile(self.path)
self.assertEqual(new_mf.label, u'foo\xe8bar')
class MP3EncodingTest(unittest.TestCase, _common.TempDirMixin):
def setUp(self):
self.create_temp_dir()
src = os.path.join(_common.RSRC, b'full.mp3')
self.path = os.path.join(self.temp_dir, b'test.mp3')
shutil.copy(src, self.path)
self.mf = mediafile.MediaFile(self.path)
def test_comment_with_latin1_encoding(self):
# Set up the test file with a Latin1-encoded COMM frame. The encoding
# indices defined by MP3 are listed here:
# http://id3.org/id3v2.4.0-structure
self.mf.mgfile['COMM::eng'].encoding = 0
# Try to store non-Latin1 text.
self.mf.comments = u'\u2028'
self.mf.save()
class ZeroLengthMediaFile(mediafile.MediaFile):
@property
def length(self):
return 0.0
class MissingAudioDataTest(unittest.TestCase):
def setUp(self):
super(MissingAudioDataTest, self).setUp()
path = os.path.join(_common.RSRC, b'full.mp3')
self.mf = ZeroLengthMediaFile(path)
def test_bitrate_with_zero_length(self):
del self.mf.mgfile.info.bitrate # Not available directly.
self.assertEqual(self.mf.bitrate, 0)
class TypeTest(unittest.TestCase):
def setUp(self):
super(TypeTest, self).setUp()
path = os.path.join(_common.RSRC, b'full.mp3')
self.mf = mediafile.MediaFile(path)
def test_year_integer_in_string(self):
self.mf.year = u'2009'
self.assertEqual(self.mf.year, 2009)
def test_set_replaygain_gain_to_none(self):
self.mf.rg_track_gain = None
self.assertEqual(self.mf.rg_track_gain, 0.0)
def test_set_replaygain_peak_to_none(self):
self.mf.rg_track_peak = None
self.assertEqual(self.mf.rg_track_peak, 0.0)
def test_set_year_to_none(self):
self.mf.year = None
self.assertIsNone(self.mf.year)
def test_set_track_to_none(self):
self.mf.track = None
self.assertEqual(self.mf.track, 0)
def test_set_date_to_none(self):
self.mf.date = None
self.assertIsNone(self.mf.date)
self.assertIsNone(self.mf.year)
self.assertIsNone(self.mf.month)
self.assertIsNone(self.mf.day)
class SoundCheckTest(unittest.TestCase):
def test_round_trip(self):
data = mediafile._sc_encode(1.0, 1.0)
gain, peak = mediafile._sc_decode(data)
self.assertEqual(gain, 1.0)
self.assertEqual(peak, 1.0)
def test_decode_zero(self):
data = b' 80000000 80000000 00000000 00000000 00000000 00000000 ' \
b'00000000 00000000 00000000 00000000'
gain, peak = mediafile._sc_decode(data)
self.assertEqual(gain, 0.0)
self.assertEqual(peak, 0.0)
def test_malformatted(self):
gain, peak = mediafile._sc_decode(b'foo')
self.assertEqual(gain, 0.0)
self.assertEqual(peak, 0.0)
def test_special_characters(self):
gain, peak = mediafile._sc_decode(u'caf\xe9'.encode('utf-8'))
self.assertEqual(gain, 0.0)
self.assertEqual(peak, 0.0)
def test_decode_handles_unicode(self):
# Most of the time, we expect to decode the raw bytes. But some formats
# might give us text strings, which we need to handle.
gain, peak = mediafile._sc_decode(u'caf\xe9')
self.assertEqual(gain, 0.0)
self.assertEqual(peak, 0.0)
class ID3v23Test(unittest.TestCase, _common.TempDirMixin):
def _make_test(self, ext=b'mp3', id3v23=False):
self.create_temp_dir()
src = os.path.join(_common.RSRC,
b'full.' + ext)
self.path = os.path.join(self.temp_dir,
b'test.' + ext)
shutil.copy(src, self.path)
return mediafile.MediaFile(self.path, id3v23=id3v23)
def _delete_test(self):
self.remove_temp_dir()
def test_v24_year_tag(self):
mf = self._make_test(id3v23=False)
try:
mf.year = 2013
mf.save()
frame = mf.mgfile['TDRC']
self.assertTrue('2013' in six.text_type(frame))
self.assertTrue('TYER' not in mf.mgfile)
finally:
self._delete_test()
def test_v23_year_tag(self):
mf = self._make_test(id3v23=True)
try:
mf.year = 2013
mf.save()
frame = mf.mgfile['TYER']
self.assertTrue('2013' in six.text_type(frame))
self.assertTrue('TDRC' not in mf.mgfile)
finally:
self._delete_test()
def test_v23_on_non_mp3_is_noop(self):
mf = self._make_test(b'm4a', id3v23=True)
try:
mf.year = 2013
mf.save()
finally:
self._delete_test()
def test_image_encoding(self):
"""For compatibility with OS X/iTunes.
See https://github.com/beetbox/beets/issues/899#issuecomment-62437773
"""
for v23 in [True, False]:
mf = self._make_test(id3v23=v23)
try:
mf.images = [
mediafile.Image(b'data', desc=u""),
mediafile.Image(b'data', desc=u"foo"),
mediafile.Image(b'data', desc=u"\u0185"),
]
mf.save()
apic_frames = mf.mgfile.tags.getall('APIC')
encodings = dict([(f.desc, f.encoding) for f in apic_frames])
self.assertEqual(encodings, {
u"": mutagen.id3.Encoding.LATIN1,
u"foo": mutagen.id3.Encoding.LATIN1,
u"\u0185": mutagen.id3.Encoding.UTF16,
})
finally:
self._delete_test()
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
XperiAMM/android_kernel_lge_msm8226
|
refs/heads/maxi-6.0
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
libsmelt/Simulator
|
refs/heads/master
|
contrib/python-graph/core/pygraph/algorithms/filters/find.py
|
26
|
# Copyright (c) 2008-2009 Pedro Matiello <pmatiello@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Search filter for finding a specific node.
"""
class find(object):
"""
Search filter for finding a specific node.
"""
def __init__(self, target):
"""
Initialize the filter.
@type target: node
@param target: Target node.
"""
self.graph = None
self.spanning_tree = None
self.target = target
self.done = False
def configure(self, graph, spanning_tree):
"""
Configure the filter.
@type graph: graph
@param graph: Graph.
@type spanning_tree: dictionary
@param spanning_tree: Spanning tree.
"""
self.graph = graph
self.spanning_tree = spanning_tree
def __call__(self, node, parent):
"""
Decide if the given node should be included in the search process.
@type node: node
@param node: Given node.
@type parent: node
@param parent: Given node's parent in the spanning tree.
@rtype: boolean
@return: Whether the given node should be included in the search process.
"""
if (not self.done):
if (node == self.target):
self.done = True
return True
else:
return False
|
AbdealiJK/coala
|
refs/heads/master
|
tests/parsing/StringProcessing/EscapeTest.py
|
17
|
from coalib.parsing.StringProcessing import escape
from tests.parsing.StringProcessing.StringProcessingTestBase import (
StringProcessingTestBase)
class EscapeTest(StringProcessingTestBase):
# Test escape() using a single character to escape and default parameters.
def test_normal_behaviour(self):
expected_results = [
r"out1 \'escaped-escape: \\ \' out2",
r"out1 \'escaped-quote: \\' \' out2",
r"out1 \'escaped-anything: \X \' out2",
r"out1 \'two escaped escapes: \\\\ \' out2",
r"out1 \'escaped-quote at end: \\'\' out2",
r"out1 \'escaped-escape at end: \\\' out2",
r"out1 \'str1\' out2 \'str2\' out2",
r"out1 \\' \'str1\' out2 \'str2\' out2",
r"out1 \\\\' \'str1\' out2 \'str2\' out2",
r"out1 \\ \'str1\' out2 \'str2\' out2",
r"out1 \\\\ \'str1\' out2 \'str2\' out2",
r"out1 \\\'str1\' out2 \'str2\' out2",
r"out1 \\\\\'str1\' out2 \'str2\' out2",
r"out1 \'str1\'\'str2\'\'str3\' out2",
r"",
r"out1 out2 out3",
self.bs,
2 * self.bs]
self.assertResultsEqual(
escape,
{(test_string, "'"): result
for test_string, result in zip(self.test_strings,
expected_results)})
# Tests escape() with more than one char to escape and an escape sequence
# that consists of more than one char.
def test_advanced(self):
expected_results = [
r"out()1 'e()scaped-e()scape: \\ ' out2",
r"out()1 'e()scaped-quote: \' ' out2",
r"out()1 'e()scaped-anything: \X ' out2",
r"out()1 'two e()scaped e()scape()s: \\\\ ' out2",
r"out()1 'e()scaped-quote at end: \'' out2",
r"out()1 'e()scaped-e()scape at end: \\' out2",
r"out()1 '()str()1' out2 '()str2' out2",
r"out()1 \' '()str()1' out2 '()str2' out2",
r"out()1 \\\' '()str()1' out2 '()str2' out2",
r"out()1 \\ '()str()1' out2 '()str2' out2",
r"out()1 \\\\ '()str()1' out2 '()str2' out2",
r"out()1 \\'()str()1' out2 '()str2' out2",
r"out()1 \\\\'()str()1' out2 '()str2' out2",
r"out()1 '()str()1''()str2''()str()()3' out2",
r"",
r"out()1 out2 out()()3",
self.bs,
2 * self.bs]
self.assertResultsEqual(
escape,
{(test_string, "1s33", "()"): result
for test_string, result in zip(self.test_strings,
expected_results)})
# Tests the realistic case when needing to escape spaces inside a shell
# with carets.
def test_windows_shell_space_escape(self):
expected_results = [
r"out1^ 'escaped-escape:^ ^ ^ ^ ^ ^ ^ ^ \\^ '^ out2",
r"out1^ 'escaped-quote:^ ^ ^ ^ ^ ^ ^ ^ ^ \'^ '^ out2",
r"out1^ 'escaped-anything:^ ^ ^ ^ ^ ^ \X^ '^ out2",
r"out1^ 'two^ escaped^ escapes:^ \\\\^ '^ out2",
r"out1^ 'escaped-quote^ at^ end:^ ^ ^ \''^ out2",
r"out1^ 'escaped-escape^ at^ end:^ ^ \\'^ out2",
r"out1^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ 'str1'^ out2^ 'str2'^ out2",
r"out1^ \'^ ^ ^ ^ ^ ^ ^ ^ 'str1'^ out2^ 'str2'^ out2",
r"out1^ \\\'^ ^ ^ ^ ^ ^ 'str1'^ out2^ 'str2'^ out2",
r"out1^ \\^ ^ ^ ^ ^ ^ ^ ^ 'str1'^ out2^ 'str2'^ out2",
r"out1^ \\\\^ ^ ^ ^ ^ ^ 'str1'^ out2^ 'str2'^ out2",
r"out1^ ^ ^ ^ ^ ^ ^ ^ ^ \\'str1'^ out2^ 'str2'^ out2",
r"out1^ ^ ^ ^ ^ ^ ^ \\\\'str1'^ out2^ 'str2'^ out2",
r"out1^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ 'str1''str2''str3'^ out2",
r"",
r"out1^ out2^ out3",
self.bs,
2 * self.bs]
self.assertResultsEqual(
escape,
{(test_string, " ", "^"): result
for test_string, result in zip(self.test_strings,
expected_results)})
# Tests using iterators instead of strings for the chars to escape. This
# allows to escape complete strings and not only chars.
def test_iterators_not_strings(self):
expected_results = [
r"\out1 'escaped-escape: \\ ' out2",
r"\out1 'escaped-quote: \' ' out2",
r"\out1 'escaped-anything: \X ' out2",
r"\out1 'two escaped escapes: \\\\ ' out2",
r"\out1 'escaped-quote at end: \'' out2",
r"\out1 'escaped-escape at end: \\' out2",
r"\out1 'str1' out2 '\str2' out2",
r"\out1 \' 'str1' out2 '\str2' out2",
r"\out1 \\\' 'str1' out2 '\str2' out2",
r"\out1 \\ 'str1' out2 '\str2' out2",
r"\out1 \\\\ 'str1' out2 '\str2' out2",
r"\out1 \\'str1' out2 '\str2' out2",
r"\out1 \\\\'str1' out2 '\str2' out2",
r"\out1 'str1''\str2''str3' out2",
r"",
r"\out1 out2 out3",
self.bs,
2 * self.bs]
self.assertResultsEqual(
escape,
{(test_string, ("out1", "str2")): result
for test_string, result in zip(self.test_strings,
expected_results)})
|
ol-loginov/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/ja/__init__.py
|
12133432
| |
roaet/python-neutronclient
|
refs/heads/master
|
neutronclient/v2_0/__init__.py
|
12133432
| |
aligoren/NameParser
|
refs/heads/master
|
setup.py
|
1
|
from distutils.core import setup
setup(
name = 'NameParser',
version = '0.1',
url = 'https://github.com/aligoren/NameParser',
download_url = 'https://github.com/aligoren/NameParser/archive/master.zip',
author = 'Ali GOREN <goren.ali@yandex.com>',
author_email = 'goren.ali@yandex.com',
license = 'Apache v2.0 License',
packages = ['NameParser'],
description = 'Name Parser Library',
keywords = ['parse', 'parsing', 'parser', 'parsed'],
)
|
dturner-tw/pants
|
refs/heads/master
|
contrib/node/src/python/pants/contrib/node/__init__.py
|
12133432
| |
anushbmx/kitsune
|
refs/heads/master
|
kitsune/lib/__init__.py
|
12133432
| |
bobc/Marlin
|
refs/heads/RC
|
buildroot/share/scripts/g29_auto.py
|
184
|
#!/usr/bin/python3
# This file is for preprocessing gcode and the new G29 Autobedleveling from Marlin
# It will analyse the first 2 Layer and return the maximum size for this part
# After this it will replace with g29_keyword = ';MarlinG29Script' with the new G29 LRFB
# the new file will be created in the same folder.
# your gcode-file/folder
folder = './'
my_file = 'test.gcode'
# this is the minimum of G1 instructions which should be between 2 different heights
min_g1 = 3
# maximum number of lines to parse, I don't want to parse the complete file
# only the first plane is we are interested in
max_g1 = 100000000
# g29 keyword
g29_keyword = 'g29'
g29_keyword = g29_keyword.upper()
# output filename
output_file = folder + 'g29_' + my_file
# input filename
input_file = folder + my_file
# minimum scan size
min_size = 40
probing_points = 3 # points x points
# other stuff
min_x = 500
min_y = min_x
max_x = -500
max_y = max_x
last_z = 0.001
layer = 0
lines_of_g1 = 0
gcode = []
# return only g1-lines
def has_g1(line):
return line[:2].upper() == "G1"
# find position in g1 (x,y,z)
def find_axis(line, axis):
found = False
number = ""
for char in line:
if found:
if char == ".":
number += char
elif char == "-":
number += char
else:
try:
int(char)
number += char
except ValueError:
break
else:
found = char.upper() == axis.upper()
try:
return float(number)
except ValueError:
return None
# save the min or max-values for each axis
def set_mima(line):
global min_x, max_x, min_y, max_y, last_z
current_x = find_axis(line, 'x')
current_y = find_axis(line, 'y')
if current_x is not None:
min_x = min(current_x, min_x)
max_x = max(current_x, max_x)
if current_y is not None:
min_y = min(current_y, min_y)
max_y = max(current_y, max_y)
return min_x, max_x, min_y, max_y
# find z in the code and return it
def find_z(gcode, start_at_line=0):
for i in range(start_at_line, len(gcode)):
my_z = find_axis(gcode[i], 'Z')
if my_z is not None:
return my_z, i
def z_parse(gcode, start_at_line=0, end_at_line=0):
i = start_at_line
all_z = []
line_between_z = []
z_at_line = []
# last_z = 0
last_i = -1
while len(gcode) > i:
try:
z, i = find_z(gcode, i + 1)
except TypeError:
break
all_z.append(z)
z_at_line.append(i)
temp_line = i - last_i -1
line_between_z.append(i - last_i - 1)
# last_z = z
last_i = i
if 0 < end_at_line <= i or temp_line >= min_g1:
# print('break at line {} at heigth {}'.format(i, z))
break
line_between_z = line_between_z[1:]
return all_z, line_between_z, z_at_line
# get the lines which should be the first layer
def get_lines(gcode, minimum):
i = 0
all_z, line_between_z, z_at_line = z_parse(gcode, end_at_line=max_g1)
for count in line_between_z:
i += 1
if count > minimum:
# print('layer: {}:{}'.format(z_at_line[i-1], z_at_line[i]))
return z_at_line[i - 1], z_at_line[i]
with open(input_file, 'r') as file:
lines = 0
for line in file:
lines += 1
if lines > 1000:
break
if has_g1(line):
gcode.append(line)
file.close()
start, end = get_lines(gcode, min_g1)
for i in range(start, end):
set_mima(gcode[i])
print('x_min:{} x_max:{}\ny_min:{} y_max:{}'.format(min_x, max_x, min_y, max_y))
# resize min/max - values for minimum scan
if max_x - min_x < min_size:
offset_x = int((min_size - (max_x - min_x)) / 2 + 0.5) # int round up
# print('min_x! with {}'.format(int(max_x - min_x)))
min_x = int(min_x) - offset_x
max_x = int(max_x) + offset_x
if max_y - min_y < min_size:
offset_y = int((min_size - (max_y - min_y)) / 2 + 0.5) # int round up
# print('min_y! with {}'.format(int(max_y - min_y)))
min_y = int(min_y) - offset_y
max_y = int(max_y) + offset_y
new_command = 'G29 L{0} R{1} F{2} B{3} P{4}\n'.format(min_x,
max_x,
min_y,
max_y,
probing_points)
out_file = open(output_file, 'w')
in_file = open(input_file, 'r')
for line in in_file:
if line[:len(g29_keyword)].upper() == g29_keyword:
out_file.write(new_command)
print('write G29')
else:
out_file.write(line)
file.close()
out_file.close()
print('auto G29 finished')
|
mikec964/gships
|
refs/heads/master
|
spacewarish/__init__.py
|
12133432
| |
aronsky/home-assistant
|
refs/heads/dev
|
homeassistant/components/openuv/config_flow.py
|
1
|
"""Config flow to configure the OpenUV component."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.const import (
CONF_API_KEY, CONF_ELEVATION, CONF_LATITUDE, CONF_LONGITUDE,
CONF_SCAN_INTERVAL)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN
@callback
def configured_instances(hass):
"""Return a set of configured OpenUV instances."""
return set(
'{0}, {1}'.format(
entry.data[CONF_LATITUDE], entry.data[CONF_LONGITUDE])
for entry in hass.config_entries.async_entries(DOMAIN))
@config_entries.HANDLERS.register(DOMAIN)
class OpenUvFlowHandler(config_entries.ConfigFlow):
"""Handle an OpenUV config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the config flow."""
pass
async def _show_form(self, errors=None):
"""Show the form to the user."""
data_schema = vol.Schema({
vol.Required(CONF_API_KEY): str,
vol.Optional(CONF_LATITUDE, default=self.hass.config.latitude):
cv.latitude,
vol.Optional(CONF_LONGITUDE, default=self.hass.config.longitude):
cv.longitude,
vol.Optional(CONF_ELEVATION, default=self.hass.config.elevation):
vol.Coerce(float),
})
return self.async_show_form(
step_id='user',
data_schema=data_schema,
errors=errors if errors else {},
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
from pyopenuv.util import validate_api_key
if not user_input:
return await self._show_form()
latitude = user_input[CONF_LATITUDE]
longitude = user_input[CONF_LONGITUDE]
elevation = user_input[CONF_ELEVATION]
identifier = '{0}, {1}'.format(latitude, longitude)
if identifier in configured_instances(self.hass):
return await self._show_form({CONF_LATITUDE: 'identifier_exists'})
websession = aiohttp_client.async_get_clientsession(self.hass)
api_key_validation = await validate_api_key(
user_input[CONF_API_KEY], websession)
if not api_key_validation:
return await self._show_form({CONF_API_KEY: 'invalid_api_key'})
scan_interval = user_input.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
user_input.update({
CONF_LATITUDE: latitude,
CONF_LONGITUDE: longitude,
CONF_ELEVATION: elevation,
CONF_SCAN_INTERVAL: scan_interval.seconds,
})
return self.async_create_entry(title=identifier, data=user_input)
|
orchidproject/ar2mav
|
refs/heads/master
|
scripts/calibrate.py
|
1
|
#!/usr/bin/python
import rospy
from sensor_msgs.msg import CameraInfo
from sensor_msgs.srv import SetCameraInfo
class CalibrateHelper:
def __init__(self, width=640, height=360):
self.info = CameraInfo()
self.info.width = width
self.info.height = height
self.info_pub = None
def run(self):
rospy.init_node("calibrate_helper")
self.info_pub = rospy.Publisher("/camera/camera_info", CameraInfo, queue_size=100)
rospy.Service("camera/set_camera_info", SetCameraInfo, self.set_info_cb)
while not rospy.is_shutdown():
self.info.header.stamp = rospy.Time.now()
self.info_pub.publish(self.info)
rospy.sleep(2)
def set_info_cb(self, data):
print data.camera_info
self.info = data.camera_info
return True, "Hi"
if __name__ == "__main__":
try:
cal = CalibrateHelper()
cal.run()
except KeyboardInterrupt:
print cal.info
pass
|
Weuxel/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/variables/variable-in-path/gyptest-variable-in-path.py
|
342
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure <(CONFIGURATION_NAME) variable is correctly expanded.
"""
import TestGyp
import sys
test = TestGyp.TestGyp()
test.set_configuration('C1')
test.run_gyp('variable-in-path.gyp')
test.build('variable-in-path.gyp', 'hello1')
test.build('variable-in-path.gyp', 'hello2')
test.pass_test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.