repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tillrohrmann/flink
|
refs/heads/master
|
flink-python/pyflink/common/restart_strategy.py
|
11
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABCMeta
from datetime import timedelta
from py4j.java_gateway import get_java_class
from pyflink.java_gateway import get_gateway
from pyflink.util.utils import to_j_flink_time, from_j_flink_time
__all__ = ['RestartStrategies', 'RestartStrategyConfiguration']
class RestartStrategyConfiguration(object):
"""
Abstract configuration for restart strategies.
"""
__metaclass__ = ABCMeta
def __init__(self, j_restart_strategy_configuration):
self._j_restart_strategy_configuration = j_restart_strategy_configuration
def get_description(self):
"""
Returns a description which is shown in the web interface.
:return: Description of the restart strategy.
"""
return self._j_restart_strategy_configuration.getDescription()
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self._j_restart_strategy_configuration == \
other._j_restart_strategy_configuration
def __hash__(self):
return self._j_restart_strategy_configuration.hashCode()
class RestartStrategies(object):
"""
This class defines methods to generate RestartStrategyConfigurations. These configurations are
used to create RestartStrategies at runtime.
The RestartStrategyConfigurations are used to decouple the core module from the runtime module.
"""
class NoRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Configuration representing no restart strategy.
"""
def __init__(self, j_restart_strategy=None):
if j_restart_strategy is None:
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies.NoRestartStrategyConfiguration()
super(RestartStrategies.NoRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.NoRestartStrategyConfiguration, self) \
.__init__(j_restart_strategy)
class FixedDelayRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Configuration representing a fixed delay restart strategy.
"""
def __init__(self, restart_attempts=None, delay_between_attempts_interval=None,
j_restart_strategy=None):
if j_restart_strategy is None:
if not isinstance(delay_between_attempts_interval, (timedelta, int)):
raise TypeError("The delay_between_attempts_interval 'failure_interval' "
"only supports integer and datetime.timedelta, current input "
"type is %s." % type(delay_between_attempts_interval))
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies\
.fixedDelayRestart(
restart_attempts, to_j_flink_time(delay_between_attempts_interval))
super(RestartStrategies.FixedDelayRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.FixedDelayRestartStrategyConfiguration, self) \
.__init__(j_restart_strategy)
def get_restart_attempts(self):
return self._j_restart_strategy_configuration.getRestartAttempts()
def get_delay_between_attempts_interval(self):
return from_j_flink_time(
self._j_restart_strategy_configuration.getDelayBetweenAttemptsInterval())
class FailureRateRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Configuration representing a failure rate restart strategy.
"""
def __init__(self,
max_failure_rate=None,
failure_interval=None,
delay_between_attempts_interval=None,
j_restart_strategy=None):
if j_restart_strategy is None:
if not isinstance(failure_interval, (timedelta, int)):
raise TypeError("The parameter 'failure_interval' "
"only supports integer and datetime.timedelta, current input "
"type is %s." % type(failure_interval))
if not isinstance(delay_between_attempts_interval, (timedelta, int)):
raise TypeError("The delay_between_attempts_interval 'failure_interval' "
"only supports integer and datetime.timedelta, current input "
"type is %s." % type(delay_between_attempts_interval))
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies\
.FailureRateRestartStrategyConfiguration(max_failure_rate,
to_j_flink_time(failure_interval),
to_j_flink_time(
delay_between_attempts_interval))
super(RestartStrategies.FailureRateRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.FailureRateRestartStrategyConfiguration, self)\
.__init__(j_restart_strategy)
def get_max_failure_rate(self):
return self._j_restart_strategy_configuration.getMaxFailureRate()
def get_failure_interval(self):
return from_j_flink_time(self._j_restart_strategy_configuration.getFailureInterval())
def get_delay_between_attempts_interval(self):
return from_j_flink_time(self._j_restart_strategy_configuration
.getDelayBetweenAttemptsInterval())
class FallbackRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Restart strategy configuration that could be used by jobs to use cluster level restart
strategy. Useful especially when one has a custom implementation of restart strategy set via
flink-conf.yaml.
"""
def __init__(self, j_restart_strategy=None):
if j_restart_strategy is None:
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies.FallbackRestartStrategyConfiguration()
super(RestartStrategies.FallbackRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.FallbackRestartStrategyConfiguration, self)\
.__init__(j_restart_strategy)
@staticmethod
def _from_j_restart_strategy(j_restart_strategy):
if j_restart_strategy is None:
return None
gateway = get_gateway()
NoRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.NoRestartStrategyConfiguration
FixedDelayRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.FixedDelayRestartStrategyConfiguration
FailureRateRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.FailureRateRestartStrategyConfiguration
FallbackRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.FallbackRestartStrategyConfiguration
clz = j_restart_strategy.getClass()
if clz.getName() == get_java_class(NoRestartStrategyConfiguration).getName():
return RestartStrategies.NoRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
elif clz.getName() == get_java_class(FixedDelayRestartStrategyConfiguration).getName():
return RestartStrategies.FixedDelayRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
elif clz.getName() == get_java_class(FailureRateRestartStrategyConfiguration).getName():
return RestartStrategies.FailureRateRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
elif clz.getName() == get_java_class(FallbackRestartStrategyConfiguration).getName():
return RestartStrategies.FallbackRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
else:
raise Exception("Unsupported java RestartStrategyConfiguration: %s" % clz.getName())
@staticmethod
def no_restart():
"""
Generates NoRestartStrategyConfiguration.
:return: The :class:`NoRestartStrategyConfiguration`.
"""
return RestartStrategies.NoRestartStrategyConfiguration()
@staticmethod
def fall_back_restart():
return RestartStrategies.FallbackRestartStrategyConfiguration()
@staticmethod
def fixed_delay_restart(restart_attempts, delay_between_attempts):
"""
Generates a FixedDelayRestartStrategyConfiguration.
:param restart_attempts: Number of restart attempts for the FixedDelayRestartStrategy.
:param delay_between_attempts: Delay in-between restart attempts for the
FixedDelayRestartStrategy, the input could be integer value
in milliseconds or datetime.timedelta object.
:return: The :class:`FixedDelayRestartStrategyConfiguration`.
"""
return RestartStrategies.FixedDelayRestartStrategyConfiguration(restart_attempts,
delay_between_attempts)
@staticmethod
def failure_rate_restart(failure_rate, failure_interval, delay_interval):
"""
Generates a FailureRateRestartStrategyConfiguration.
:param failure_rate: Maximum number of restarts in given interval ``failure_interval``
before failing a job.
:param failure_interval: Time interval for failures, the input could be integer value
in milliseconds or datetime.timedelta object.
:param delay_interval: Delay in-between restart attempts, the input could be integer value
in milliseconds or datetime.timedelta object.
"""
return RestartStrategies.FailureRateRestartStrategyConfiguration(failure_rate,
failure_interval,
delay_interval)
|
StrellaGroup/erpnext
|
refs/heads/develop
|
erpnext/patches/v12_0/set_default_for_add_taxes_from_item_tax_template.py
|
1
|
import frappe
def execute():
frappe.db.set_value("Accounts Settings", None, "add_taxes_from_item_tax_template", 1)
frappe.db.set_default("add_taxes_from_item_tax_template", 1)
|
grpc/grpc
|
refs/heads/master
|
src/python/grpcio/grpc/framework/foundation/stream_util.py
|
26
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpful utilities related to the stream module."""
import logging
import threading
from grpc.framework.foundation import stream
_NO_VALUE = object()
_LOGGER = logging.getLogger(__name__)
class TransformingConsumer(stream.Consumer):
"""A stream.Consumer that passes a transformation of its input to another."""
def __init__(self, transformation, downstream):
self._transformation = transformation
self._downstream = downstream
def consume(self, value):
self._downstream.consume(self._transformation(value))
def terminate(self):
self._downstream.terminate()
def consume_and_terminate(self, value):
self._downstream.consume_and_terminate(self._transformation(value))
class IterableConsumer(stream.Consumer):
"""A Consumer that when iterated over emits the values it has consumed."""
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._active = True
def consume(self, value):
with self._condition:
if self._active:
self._values.append(value)
self._condition.notify()
def terminate(self):
with self._condition:
self._active = False
self._condition.notify()
def consume_and_terminate(self, value):
with self._condition:
if self._active:
self._values.append(value)
self._active = False
self._condition.notify()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while self._active and not self._values:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
class ThreadSwitchingConsumer(stream.Consumer):
"""A Consumer decorator that affords serialization and asynchrony."""
def __init__(self, sink, pool):
self._lock = threading.Lock()
self._sink = sink
self._pool = pool
# True if self._spin has been submitted to the pool to be called once and
# that call has not yet returned, False otherwise.
self._spinning = False
self._values = []
self._active = True
def _spin(self, sink, value, terminate):
while True:
try:
if value is _NO_VALUE:
sink.terminate()
elif terminate:
sink.consume_and_terminate(value)
else:
sink.consume(value)
except Exception as e: # pylint:disable=broad-except
_LOGGER.exception(e)
with self._lock:
if terminate:
self._spinning = False
return
elif self._values:
value = self._values.pop(0)
terminate = not self._values and not self._active
elif not self._active:
value = _NO_VALUE
terminate = True
else:
self._spinning = False
return
def consume(self, value):
with self._lock:
if self._active:
if self._spinning:
self._values.append(value)
else:
self._pool.submit(self._spin, self._sink, value, False)
self._spinning = True
def terminate(self):
with self._lock:
if self._active:
self._active = False
if not self._spinning:
self._pool.submit(self._spin, self._sink, _NO_VALUE, True)
self._spinning = True
def consume_and_terminate(self, value):
with self._lock:
if self._active:
self._active = False
if self._spinning:
self._values.append(value)
else:
self._pool.submit(self._spin, self._sink, value, True)
self._spinning = True
|
BMJHayward/django
|
refs/heads/master
|
django/contrib/admindocs/middleware.py
|
477
|
from django import http
from django.conf import settings
class XViewMiddleware(object):
"""
Adds an X-View header to internal HEAD requests -- used by the documentation system.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
If the request method is HEAD and either the IP is internal or the
user is a logged-in staff member, quickly return with an x-header
indicating the view function. This is used by the documentation module
to lookup the view function for an arbitrary page.
"""
assert hasattr(request, 'user'), (
"The XView middleware requires authentication middleware to be "
"installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.auth.middleware.AuthenticationMiddleware'.")
if request.method == 'HEAD' and (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS or
(request.user.is_active and request.user.is_staff)):
response = http.HttpResponse()
response['X-View'] = "%s.%s" % (view_func.__module__, view_func.__name__)
return response
|
TheLady/python-livereload
|
refs/heads/master
|
livereload/server.py
|
3
|
# -*- coding: utf-8 -*-
"""
livereload.server
~~~~~~~~~~~~~~~~~
WSGI app server for livereload.
:copyright: (c) 2013 - 2015 by Hsiaoming Yang
:license: BSD, see LICENSE for more details.
"""
import os
import time
import shlex
import logging
import threading
import webbrowser
from subprocess import Popen, PIPE
from tornado.wsgi import WSGIContainer
from tornado.ioloop import IOLoop
from tornado import web
from tornado import escape
from tornado import httputil
from tornado.log import LogFormatter
from .handlers import LiveReloadHandler, LiveReloadJSHandler
from .handlers import ForceReloadHandler, StaticFileHandler
from .watcher import get_watcher_class
from six import string_types, PY3
logger = logging.getLogger('livereload')
HEAD_END = b'</head>'
def shell(cmd, output=None, mode='w', cwd=None, shell=False):
"""Execute a shell command.
You can add a shell command::
server.watch(
'style.less', shell('lessc style.less', output='style.css')
)
:param cmd: a shell command, string or list
:param output: output stdout to the given file
:param mode: only works with output, mode ``w`` means write,
mode ``a`` means append
:param cwd: set working directory before command is executed.
:param shell: if true, on Unix the executable argument specifies a
replacement shell for the default ``/bin/sh``.
"""
if not output:
output = os.devnull
else:
folder = os.path.dirname(output)
if folder and not os.path.isdir(folder):
os.makedirs(folder)
if not isinstance(cmd, (list, tuple)) and not shell:
cmd = shlex.split(cmd)
def run_shell():
try:
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd,
shell=shell)
except OSError as e:
logger.error(e)
if e.errno == os.errno.ENOENT: # file (command) not found
logger.error("maybe you haven't installed %s", cmd[0])
return e
stdout, stderr = p.communicate()
if stderr:
logger.error(stderr)
return stderr
#: stdout is bytes, decode for python3
if PY3:
stdout = stdout.decode()
with open(output, mode) as f:
f.write(stdout)
return run_shell
class LiveScriptInjector(web.OutputTransform):
def __init__(self, request):
super(LiveScriptInjector, self).__init__(request)
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if HEAD_END in chunk:
chunk = chunk.replace(HEAD_END, self.script + HEAD_END)
if 'Content-Length' in headers:
length = int(headers['Content-Length']) + len(self.script)
headers['Content-Length'] = str(length)
return status_code, headers, chunk
class LiveScriptContainer(WSGIContainer):
def __init__(self, wsgi_app, script=''):
self.wsgi_app = wsgi_app
self.script = script
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_app(
WSGIContainer.environ(request), start_response)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code, reason = data["status"].split(' ', 1)
status_code = int(status_code)
headers = data["headers"]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if HEAD_END in body:
body = body.replace(HEAD_END, self.script + HEAD_END)
if status_code != 304:
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "server" not in header_set:
headers.append(("Server", "LiveServer"))
start_line = httputil.ResponseStartLine(
"HTTP/1.1", status_code, reason
)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
if key == 'Content-Length':
value = str(len(body))
header_obj.add(key, value)
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
class Server(object):
"""Livereload server interface.
Initialize a server and watch file changes::
server = Server(wsgi_app)
server.serve()
:param app: a wsgi application instance
:param watcher: A Watcher instance, you don't have to initialize
it by yourself. Under Linux, you will want to install
pyinotify and use INotifyWatcher() to avoid wasted
CPU usage.
"""
def __init__(self, app=None, watcher=None):
self.root = None
self.app = app
if not watcher:
watcher_cls = get_watcher_class()
watcher = watcher_cls()
self.watcher = watcher
def watch(self, filepath, func=None, delay=None):
"""Add the given filepath for watcher list.
Once you have intialized a server, watch file changes before
serve the server::
server.watch('static/*.stylus', 'make static')
def alert():
print('foo')
server.watch('foo.txt', alert)
server.serve()
:param filepath: files to be watched, it can be a filepath,
a directory, or a glob pattern
:param func: the function to be called, it can be a string of
shell command, or any callable object without
parameters
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
"""
if isinstance(func, string_types):
func = shell(func)
self.watcher.watch(filepath, func, delay)
def application(self, port, host, liveport=None, debug=None):
LiveReloadHandler.watcher = self.watcher
if liveport is None:
liveport = port
if debug is None and self.app:
debug = True
live_handlers = [
(r'/livereload', LiveReloadHandler),
(r'/forcereload', ForceReloadHandler),
(r'/livereload.js', LiveReloadJSHandler)
]
# The livereload.js snippet.
# Uses JavaScript to dynamically inject the client's hostname.
# This allows for serving on 0.0.0.0.
live_script = escape.utf8((
'<script type="text/javascript">'
'document.write("<script src=''http://"'
' + window.location.hostname + ":{port}/livereload.js''>'
' </"+"script>");'
'</script>'
).format(port=liveport))
web_handlers = self.get_web_handlers(live_script)
class ConfiguredTransform(LiveScriptInjector):
script = live_script
if liveport == port:
handlers = live_handlers + web_handlers
app = web.Application(
handlers=handlers,
debug=debug,
transforms=[ConfiguredTransform]
)
app.listen(port, address=host)
else:
app = web.Application(
handlers=web_handlers,
debug=debug,
transforms=[ConfiguredTransform]
)
app.listen(port, address=host)
live = web.Application(handlers=live_handlers, debug=False)
live.listen(liveport, address=host)
def get_web_handlers(self, script):
if self.app:
fallback = LiveScriptContainer(self.app, script)
return [(r'.*', web.FallbackHandler, {'fallback': fallback})]
return [
(r'/(.*)', StaticFileHandler, {
'path': self.root or '.',
'default_filename': 'index.html',
}),
]
def serve(self, port=5500, liveport=None, host=None, root=None, debug=None,
open_url=False, restart_delay=2, open_url_delay=None):
"""Start serve the server with the given port.
:param port: serve on this port, default is 5500
:param liveport: live reload on this port
:param host: serve on this hostname, default is 127.0.0.1
:param root: serve static on this root directory
:param debug: set debug mode, which autoreloads the app on code changes
via Tornado (and causes polling). Defaults to True when
``self.app`` is set, otherwise False.
:param open_url_delay: open webbrowser after the delay seconds
"""
host = host or '127.0.0.1'
if root is not None:
self.root = root
self._setup_logging()
logger.info('Serving on http://%s:%s' % (host, port))
self.application(port, host, liveport=liveport, debug=debug)
# Async open web browser after 5 sec timeout
if open_url or open_url_delay:
if open_url:
logger.warn('Use `open_url_delay` instead of `open_url`')
sleep = open_url_delay or 5
def opener():
time.sleep(sleep)
webbrowser.open('http://%s:%s' % (host, port))
threading.Thread(target=opener).start()
try:
self.watcher._changes.append(('__livereload__', restart_delay))
LiveReloadHandler.start_tasks()
IOLoop.instance().start()
except KeyboardInterrupt:
logger.info('Shutting down...')
def _setup_logging(self):
logger.setLevel(logging.INFO)
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
# need a tornado logging handler to prevent IOLoop._setup_logging
logging.getLogger('tornado').addHandler(channel)
|
dhennes/pykep
|
refs/heads/master
|
PyKEP/examples/_ex6.py
|
5
|
def run_example6(n_seg=5):
"""
This example demonstrates the optimization of a multiple randezvous mission (low-thrust).
Such a mission (including more asteroids) is also called asteroid hopping
The spacecraft performances, as well as the three asteroids visited, are taken from the GTOC7 problem description.
"""
from PyGMO import algorithm, population
from PyKEP.trajopt import mr_lt_nep
from PyKEP.planet import gtoc7
algo = algorithm.scipy_slsqp(max_iter=500, acc=1e-5, screen_output=True)
prob = mr_lt_nep(
t0=[9600., 9700.],
seq=[gtoc7(5318), gtoc7(14254), gtoc7(7422), gtoc7(5028)],
n_seg=n_seg,
mass=[800., 2000.],
leg_tof=[100., 365.25],
rest=[30., 365.25],
Tmax=0.3,
Isp=3000.,
traj_tof=365.25 * 3.,
objective='mass',
c_tol=1e-05
)
pop = population(prob, 1)
pop = algo.evolve(pop)
solution = pop.champion.x
if prob.feasibility_x(solution):
print("FEASIBILE!!!")
ax = prob.plot(solution)
else:
print("INFEASIBLE :(")
ax = None
return prob, solution, ax
|
ruslanloman/nova
|
refs/heads/master
|
nova/tests/functional/v3/test_flavor_manage.py
|
31
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class FlavorManageSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = 'flavor-manage'
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(FlavorManageSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.'
'Flavormanage')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavor_disabled.'
'Flavor_disabled')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavor_access.'
'Flavor_access')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavorextradata.'
'Flavorextradata')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavor_swap.'
'Flavor_swap')
return f
def _create_flavor(self):
"""Create a flavor."""
subs = {
'flavor_id': 10,
'flavor_name': "test_flavor"
}
response = self._do_post("flavors",
"flavor-create-post-req",
subs)
subs.update(self._get_regexes())
self._verify_response("flavor-create-post-resp", subs, response, 200)
def test_create_flavor(self):
# Get api sample to create a flavor.
self._create_flavor()
def test_delete_flavor(self):
# Get api sample to delete a flavor.
self._create_flavor()
response = self._do_delete("flavors/10")
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
|
ManjiriBirajdar/coala
|
refs/heads/master
|
coalib/collecting/Collectors.py
|
4
|
import functools
import os
import pkg_resources
import itertools
from pyprint.NullPrinter import NullPrinter
from coalib.bears.BEAR_KIND import BEAR_KIND
from coalib.collecting.Importers import iimport_objects
from coala_utils.decorators import yield_once
from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
from coalib.parsing.Globbing import fnmatch, iglob, glob_escape
from coalib.output.printers.LogPrinter import LogPrinter
def _get_kind(bear_class):
try:
return bear_class.kind()
except NotImplementedError:
return None
def _import_bears(file_path, kinds):
# recursive imports:
for bear_list in iimport_objects(file_path,
names='__additional_bears__',
types=list):
for bear_class in bear_list:
if _get_kind(bear_class) in kinds:
yield bear_class
# normal import
for bear_class in iimport_objects(file_path,
attributes='kind',
local=True):
if _get_kind(bear_class) in kinds:
yield bear_class
@yield_once
def icollect(file_paths, ignored_globs=None):
"""
Evaluate globs in file paths and return all matching files.
:param file_paths: file path or list of such that can include globs
:param ignored_globs: list of globs to ignore when matching files
:return: iterator that yields tuple of path of a matching
file, the glob where it was found
"""
if isinstance(file_paths, str):
file_paths = [file_paths]
for file_path in file_paths:
for match in iglob(file_path):
if not ignored_globs or not fnmatch(match, ignored_globs):
yield match, file_path
def collect_files(file_paths, log_printer, ignored_file_paths=None,
limit_file_paths=None):
"""
Evaluate globs in file paths and return all matching files
:param file_paths: file path or list of such that can include globs
:param ignored_file_paths: list of globs that match to-be-ignored files
:param limit_file_paths: list of globs that the files are limited to
:return: list of paths of all matching files
"""
limit_fnmatch = (functools.partial(fnmatch, globs=limit_file_paths)
if limit_file_paths else lambda fname: True)
valid_files = list(filter(lambda fname: os.path.isfile(fname[0]),
icollect(file_paths, ignored_file_paths)))
# Find globs that gave no files and warn the user
if valid_files:
collected_files, file_globs_with_files = zip(*valid_files)
else:
collected_files, file_globs_with_files = [], []
_warn_if_unused_glob(log_printer, file_paths, file_globs_with_files,
"No files matching '{}' were found.")
limited_files = list(filter(limit_fnmatch, collected_files))
return limited_files
def collect_dirs(dir_paths, ignored_dir_paths=None):
"""
Evaluate globs in directory paths and return all matching directories
:param dir_paths: file path or list of such that can include globs
:param ignored_dir_paths: list of globs that match to-be-ignored dirs
:return: list of paths of all matching directories
"""
valid_dirs = list(filter(lambda fname: os.path.isdir(fname[0]),
icollect(dir_paths, ignored_dir_paths)))
if valid_dirs:
collected_dirs, _ = zip(*valid_dirs)
return list(collected_dirs)
else:
return []
@yield_once
def icollect_bears(bear_dirs, bear_globs, kinds, log_printer):
"""
Collect all bears from bear directories that have a matching kind.
:param bear_dirs: directory name or list of such that can contain bears
:param bear_globs: globs of bears to collect
:param kinds: list of bear kinds to be collected
:param log_printer: log_printer to handle logging
:return: iterator that yields a tuple with bear class and
which bear_glob was used to find that bear class.
"""
for bear_dir, dir_glob in filter(lambda x: os.path.isdir(x[0]),
icollect(bear_dirs)):
# Since we get a real directory here and since we
# pass this later to iglob, we need to escape this.
bear_dir = glob_escape(bear_dir)
for bear_glob in bear_globs:
for matching_file in iglob(
os.path.join(bear_dir, bear_glob + '.py')):
try:
for bear in _import_bears(matching_file, kinds):
yield bear, bear_glob
except pkg_resources.VersionConflict as exception:
log_printer.log_exception(
("Unable to collect bears from {file} because there "
"is a conflict with the version of a dependency "
"you have installed. This may be resolved by "
"creating a separate virtual environment for coala "
"or running `pip install {pkg}`. Be aware that the "
"latter solution might break other python packages "
"that depend on the currently installed "
"version.").format(file=matching_file,
pkg=exception.req),
exception, log_level=LOG_LEVEL.WARNING)
except BaseException as exception:
log_printer.log_exception(
"Unable to collect bears from {file}. Probably the "
"file is malformed or the module code raises an "
"exception.".format(file=matching_file),
exception,
log_level=LOG_LEVEL.WARNING)
def collect_bears(bear_dirs, bear_globs, kinds, log_printer,
warn_if_unused_glob=True):
"""
Collect all bears from bear directories that have a matching kind
matching the given globs.
:param bear_dirs: Directory name or list of such that can contain
bears.
:param bear_globs: Globs of bears to collect.
:param kinds: List of bear kinds to be collected.
:param log_printer: log_printer to handle logging.
:param warn_if_unused_glob: True if warning message should be shown if a
glob didn't give any bears.
:return: Tuple of list of matching bear classes based on
kind. The lists are in the same order as kinds.
"""
bears_found = tuple([] for i in range(len(kinds)))
bear_globs_with_bears = set()
for bear, glob in icollect_bears(bear_dirs, bear_globs, kinds, log_printer):
index = kinds.index(_get_kind(bear))
bears_found[index].append(bear)
bear_globs_with_bears.add(glob)
if warn_if_unused_glob:
_warn_if_unused_glob(log_printer, bear_globs, bear_globs_with_bears,
"No bears were found matching '{}'.")
return bears_found
def filter_section_bears_by_languages(bears, languages):
"""
Filters the bears by languages.
:param bears: the dictionary of the sections as keys and list of
bears as values.
:param languages: languages that bears are being filtered on.
:return: new dictionary with filtered out bears that don't match
any language from languages.
"""
new_bears = {}
# All bears with "all" languages supported shall be shown
languages = set(language.lower() for language in languages) | {'all'}
for section in bears.keys():
new_bears[section] = tuple(
bear for bear in bears[section]
if {language.lower() for language in bear.LANGUAGES} & languages)
return new_bears
def filter_capabilities_by_languages(bears, languages):
"""
Filters the bears capabilities by languages.
:param bears: Dictionary with sections as keys and list of bears as
values.
:param languages: Languages that bears are being filtered on.
:return: New dictionary with languages as keys and their bears
capabilities as values. The capabilities are stored in a
tuple of two elements where the first one represents
what the bears can detect, and the second one what they
can fix.
"""
languages = set(language.lower() for language in languages)
language_bears_capabilities = {language: (
set(), set()) for language in languages}
for section_bears in bears.values():
for bear in section_bears:
bear_language = (
({language.lower() for language in bear.LANGUAGES} | {'all'}) &
languages)
language = bear_language.pop() if bear_language else ''
capabilities = (language_bears_capabilities[language]
if language else tuple())
language_bears_capabilities.update(
{language: (capabilities[0] | bear.can_detect,
capabilities[1] | bear.CAN_FIX)}
if language else {})
return language_bears_capabilities
def get_all_bears_names():
from coalib.settings.Section import Section
printer = LogPrinter(NullPrinter())
local_bears, global_bears = collect_bears(
Section("").bear_dirs(),
["**"],
[BEAR_KIND.LOCAL, BEAR_KIND.GLOBAL],
printer,
warn_if_unused_glob=False)
return [bear.name for bear in itertools.chain(local_bears, global_bears)]
def collect_all_bears_from_sections(sections, log_printer):
"""
Collect all kinds of bears from bear directories given in the sections.
:param sections: list of sections so bear_dirs are taken into account
:param log_printer: log_printer to handle logging
:return: tuple of dictionaries of local and global bears
The dictionary key is section class and
dictionary value is a list of Bear classes
"""
local_bears = {}
global_bears = {}
for section in sections:
bear_dirs = sections[section].bear_dirs()
local_bears[section], global_bears[section] = collect_bears(
bear_dirs,
["**"],
[BEAR_KIND.LOCAL, BEAR_KIND.GLOBAL],
log_printer,
warn_if_unused_glob=False)
return local_bears, global_bears
def _warn_if_unused_glob(log_printer, globs, used_globs, message):
"""
Warn if a glob has not been used.
:param log_printer: The log_printer to handle logging.
:param globs: List of globs that were expected to be used.
:param used_globs: List of globs that were actually used.
:param message: Warning message to display if a glob is unused.
The glob which was unused will be added using
.format()
"""
unused_globs = set(globs) - set(used_globs)
for glob in unused_globs:
log_printer.warn(message.format(glob))
def collect_registered_bears_dirs(entrypoint):
"""
Searches setuptools for the entrypoint and returns the bear
directories given by the module.
:param entrypoint: The entrypoint to find packages with.
:return: List of bear directories.
"""
collected_dirs = []
for ep in pkg_resources.iter_entry_points(entrypoint):
registered_package = None
try:
registered_package = ep.load()
except pkg_resources.DistributionNotFound:
continue
collected_dirs.append(os.path.abspath(
os.path.dirname(registered_package.__file__)))
return collected_dirs
|
JustAkan/lge-kernel-gproj
|
refs/heads/cm-12.1
|
tools/perf/python/twatch.py
|
7370
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
lukeis/selenium
|
refs/heads/master
|
py/selenium/webdriver/remote/errorhandler.py
|
2
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import ElementNotSelectableException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import InvalidCookieDomainException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import InvalidSelectorException
from selenium.common.exceptions import ImeNotAvailableException
from selenium.common.exceptions import ImeActivationFailedException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import UnableToSetCookieException
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import ErrorInResponseException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import MoveTargetOutOfBoundsException
try:
basestring
except NameError: # Python 3.x
basestring = str
class ErrorCode(object):
"""
Error codes defined in the WebDriver wire protocol.
"""
# Keep in sync with org.openqa.selenium.remote.ErrorCodes and errorcodes.h
SUCCESS = 0
NO_SUCH_ELEMENT = [7, 'no such element']
NO_SUCH_FRAME = [8, 'no such frame']
UNKNOWN_COMMAND = [9, 'unknown command']
STALE_ELEMENT_REFERENCE = [10, 'stale element reference']
ELEMENT_NOT_VISIBLE = [11, 'element not visible']
INVALID_ELEMENT_STATE = [12, 'invalid element state']
UNKNOWN_ERROR = [13, 'unknown error']
ELEMENT_IS_NOT_SELECTABLE = [15, 'element not selectable']
JAVASCRIPT_ERROR = [17, 'javascript error']
XPATH_LOOKUP_ERROR = [19, 'invalid selector']
TIMEOUT = [21, 'timeout']
NO_SUCH_WINDOW = [23, 'no such window']
INVALID_COOKIE_DOMAIN = [24, 'invalid cookie domain']
UNABLE_TO_SET_COOKIE = [25, 'unable to set cookie']
UNEXPECTED_ALERT_OPEN = [26, 'unexpected alert open']
NO_ALERT_OPEN = [27, 'no such alert']
SCRIPT_TIMEOUT = [28, 'script timeout']
INVALID_ELEMENT_COORDINATES = [29, 'invalid element coordinates']
IME_NOT_AVAILABLE = [30, 'ime not available']
IME_ENGINE_ACTIVATION_FAILED = [31, 'ime engine activation failed']
INVALID_SELECTOR = [32, 'invalid selector']
MOVE_TARGET_OUT_OF_BOUNDS = [34, 'move target out of bounds']
INVALID_XPATH_SELECTOR = [51, 'invalid selector']
INVALID_XPATH_SELECTOR_RETURN_TYPER = [52, 'invalid selector']
METHOD_NOT_ALLOWED = [405, 'unsupported operation']
class ErrorHandler(object):
"""
Handles errors returned by the WebDriver server.
"""
def check_response(self, response):
"""
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
"""
status = response.get('status', None)
if status is None or status == ErrorCode.SUCCESS:
return
value = None
message = response.get("message", "")
screen = response.get("screen", "")
stacktrace = None
if isinstance(status, int):
value_json = response.get('value', None)
if value_json and isinstance(value_json, basestring):
import json
try:
value = json.loads(value_json)
status = value.get('error', None)
if status is None:
status = value["status"]
message = value["value"]
if not isinstance(message, basestring):
try:
message = message['message']
except TypeError:
message = None
else:
message = value.get('message', None)
except ValueError:
pass
exception_class = ErrorInResponseException
if status in ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status in ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status in ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status in ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status in ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status in ErrorCode.INVALID_ELEMENT_STATE:
exception_class = InvalidElementStateException
elif status in ErrorCode.INVALID_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER:
exception_class = InvalidSelectorException
elif status in ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status in ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = WebDriverException
elif status in ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = WebDriverException
elif status in ErrorCode.TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.SCRIPT_TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.UNKNOWN_ERROR:
exception_class = WebDriverException
elif status in ErrorCode.UNEXPECTED_ALERT_OPEN:
exception_class = UnexpectedAlertPresentException
elif status in ErrorCode.NO_ALERT_OPEN:
exception_class = NoAlertPresentException
elif status in ErrorCode.IME_NOT_AVAILABLE:
exception_class = ImeNotAvailableException
elif status in ErrorCode.IME_ENGINE_ACTIVATION_FAILED:
exception_class = ImeActivationFailedException
elif status in ErrorCode.MOVE_TARGET_OUT_OF_BOUNDS:
exception_class = MoveTargetOutOfBoundsException
else:
exception_class = WebDriverException
if value == '' or value is None:
value = response['value']
if isinstance(value, basestring):
if exception_class == ErrorInResponseException:
raise exception_class(response, value)
raise exception_class(value)
if message == "" and 'message' in value:
message = value['message']
screen = None
if 'screen' in value:
screen = value['screen']
stacktrace = None
if 'stackTrace' in value and value['stackTrace']:
stacktrace = []
try:
for frame in value['stackTrace']:
line = self._value_or_default(frame, 'lineNumber', '')
file = self._value_or_default(frame, 'fileName', '<anonymous>')
if line:
file = "%s:%s" % (file, line)
meth = self._value_or_default(frame, 'methodName', '<anonymous>')
if 'className' in frame:
meth = "%s.%s" % (frame['className'], meth)
msg = " at %s (%s)"
msg = msg % (meth, file)
stacktrace.append(msg)
except TypeError:
pass
if exception_class == ErrorInResponseException:
raise exception_class(response, message)
elif exception_class == UnexpectedAlertPresentException and 'alert' in value:
raise exception_class(message, screen, stacktrace, value['alert'].get('text'))
raise exception_class(message, screen, stacktrace)
def _value_or_default(self, obj, key, default):
return obj[key] if key in obj else default
|
acarmel/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/media/_base/matcher/__init__.py
|
81
|
from .main import Matcher
def autoload():
return Matcher()
config = []
|
jayclassless/tidypy
|
refs/heads/master
|
test/test_finder.py
|
1
|
# -*- coding: utf-8 -*-
import os
import sys
from tidypy import Finder, get_default_config
def fix_paths(paths):
if sys.platform == 'win32':
paths = [
path.replace('/', '\\')
for path in paths
]
return paths
def test_exclude():
if hasattr(os, 'mkfifo') and not os.path.exists('test/project1/testfifo'):
os.mkfifo('test/project1/testfifo')
cfg = get_default_config()
cfg['exclude'] = [
r'invalid',
r'\.pyc$',
r'project1/module.+$',
]
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'data/broken.json',
'data/broken.po',
'data/broken.pot',
'data/broken.rst',
'data/broken.yaml',
'input.yaml',
'pyproject.toml',
'setup.cfg',
'setup.py',
'project1/__init__.py',
'project1/broken.py',
'project1/koi8r.py',
'project1/utf8.py',
'project1b/__init__.py',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.files()
])
assert expected == actual
def test_files_filter():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'setup.py',
'project1/__init__.py',
'project1/broken.py',
'project1/koi8r.py',
'project1/module1.py',
'project1/module2.py',
'project1/utf8.py',
'project1b/__init__.py',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.files(filters=[r'\.py$'])
])
assert expected == actual
def test_directories():
cfg = get_default_config()
cfg['exclude'] = ['project1b']
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'.',
'data',
'project1',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.directories()
])
assert expected == actual
def test_directories_filters():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'data',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.directories(filters=[r'data'])
])
assert expected == actual
def test_directories_containing():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'project1',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.directories(containing=[r'module1.py'])
])
assert expected == actual
def test_packages():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'project1',
'project1b',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.packages()
])
assert expected == actual
def test_packages_filters():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'project1',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.packages(filters=[r'1$'])
])
assert expected == actual
def test_modules():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'project1/__init__.py',
'project1/broken.py',
'project1/koi8r.py',
'project1/module1.py',
'project1/module2.py',
'project1/utf8.py',
'project1b/__init__.py',
'setup.py'
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.modules()
])
assert expected == actual
def test_modules_filters():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted(fix_paths([
'project1/module1.py',
]))
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.modules(filters=[r'module1'])
])
assert expected == actual
def test_topmost_directories():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
if sys.platform == 'win32':
expected = sorted([
'c:\\foo\\bar',
'c:\\else\\where',
])
actual = sorted(finder.topmost_directories([
'c:\\foo\\bar',
'c:\\foo\\bar\\baz',
'c:\\foo\\bar\\blah',
'c:\\foo\\bar\\blah\\a\\b',
'c:\\else\\where',
]))
else:
expected = sorted([
'/foo/bar',
'/else/where',
])
actual = sorted(finder.topmost_directories([
'/foo/bar',
'/foo/bar/baz',
'/foo/bar/blah',
'/foo/bar/blah/a/b',
'/else/where'
]))
assert expected == actual
assert finder.topmost_directories([]) == []
assert finder.topmost_directories(None) == []
def test_sys_paths():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = sorted([
'.',
])
actual = sorted([
os.path.relpath(f, 'test/project1')
for f in finder.sys_paths()
])
assert expected == actual
def test_read_file():
cfg = get_default_config()
finder = Finder('test/project1', cfg)
expected = "# -*- coding: utf-8 -*-\n\ntest = 'ҖՄڇឈ'\n\n"
assert expected == finder.read_file('test/project1/project1/utf8.py').replace('\r\n', '\n')
expected = "# -*- coding: koi8-r -*-\n\ntest = '©©© ©©©©©© ©©©©©©©©©©©'\n\n#foo = 1\n\n"
assert expected == finder.read_file('test/project1/project1/koi8r.py').replace('\r\n', '\n')
|
jmesteve/saas3
|
refs/heads/master
|
openerp/addons_extra/account_vat_extend/account_vat.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_vat_declaration(osv.osv_memory):
_inherit = 'account.vat.declaration'
_columns = {
'display_no_movement': fields.boolean('Display No Movement'),
}
_defaults = {
'display_no_movement':True,
'display_detail':True,
}
def create_vat(self, cr, uid, ids, context=None):
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
datas['model'] = 'account.tax.code'
datas['form'] = self.read(cr, uid, ids, context=context)[0]
for field in datas['form'].keys():
if isinstance(datas['form'][field], tuple):
datas['form'][field] = datas['form'][field][0]
datas['form']['company_id'] = self.pool.get('account.tax.code').browse(cr, uid, [datas['form']['chart_tax_id']], context=context)[0].company_id.id
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.vat.declaration_extend',
'datas': datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
CCPorg/WWW-InternetCoin-Ver-631-Original
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
ILiedAboutCake/twitchAPI
|
refs/heads/master
|
scrape.py
|
1
|
#Requires https://github.com/adoxa/ansicon to run on windows cmd (cleanly) -ILiedAboutCake 2015.
#also needs requests library on windows, get it with "python -m pip install -U requests"
import threading
import urllib2
import Queue
import time
import json
import csv
strims = ['destiny', 'nathanias', 'nl_kripp', 'phantoml0rd', 'lethalfrag', 'totalbiscuit', 'sodapoppin', 'kaceytron','timthetatman',
'trick2g', 'piglet', 'saintvicious', 'riotgames', 'imaqtpie', 'tsm_theoddone', 'voyboy', 'aphromoo','kaylovespie','fenn3r',
'forsenlol', 'swiftor', 'itmejp', 'arteezy', 'summit1g', 'dendi', 'aimostfamous','twitch','trumpsc','lolpoli',
'tayzondaygames', 'dansgaming', 'goldglove', 'uknighted', 'defrancogames', 'nvidia', 'reckful', 'reynad27','towelliee','saltyteemo',
'dinglederper', 'itshafu', 'alinity', 'legendarylea', 'livibee', 'kaitlyn', 'tigerlily___', 'alisha12287','lirik','sheebslol',
'wintergaming', 'naniwasc2', 'basetradetv', 'gsl', 'avilo', 'taketv', 'desrowfighting', 'egjd','kristiplays','wcs','2mgovercsquared',
'crank', 'wcs_america', 'wcs_europe', 'eghuk', 'rotterdam08', 'rootcatz', 'incontroltv', 'dragon','lagtvmaximusblack','streamerhouse',
'dotademon','starladder3','athenelive','forsenlol','gretorptv','bacon_donut','ellohime','cdewx','monstercat','machinima',
'kneecoleslaw','theoriginalweed','kylelandrypiano','meclipse','taymoo','watchmeblink1','steel_tv','kolento','tarik_tv','sacriel',
'richardlewisreports', 'twitchplayspokemon', 'day9tv', 'lycangtv', 'followgrubby', 'deadmau5', 'riotgames','riotgames2',
'hikotv','cro_','syndicate','nightblue3','fatefalls','szyzyg','thatsportsgamer','almostfamous','ajkcsgo']
threadCount = 15 #define threads as a static number or use, len(strims) to run each as a thread. DO NOT DO THIS UNLESS YOU WANT TO GET BANNED FROM TWITCH
sleepTime = 60 #define time to reload the queue
#start up the queue
queue = Queue.Queue()
#sets colored console stuffs
class bcolors:
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
#multithreaded handler for grabbing API data
class ThreadGet(threading.Thread):
def __init__(self, queue, name):
threading.Thread.__init__(self)
self.queue = queue
self.name = name
def run(self):
while True:
streamer = self.queue.get()
skipCSV = False
#endpoints
CHATTER_ENDPOINT = "http://tmi.twitch.tv/group/user/" + streamer + "/chatters"
VIEWER_ENDPOINT = "https://api.twitch.tv/kraken/streams/" + streamer
UPTIME_ENDPOINT = "https://nightdev.com/hosted/uptime.php?channel=" + streamer
#Timestamp
timeStamp = time.strftime("%d/%m/%y %H:%M:%S", time.localtime())
#gets chatter count
try:
responseChatter = urllib2.urlopen(CHATTER_ENDPOINT)
except urllib2.HTTPError as e:
print bcolors.FAIL + "<--- " + timeStamp + " <" + self.name + "> (" + streamer + "): API Failed (chatters). Code " + str(e.code)
skipCSV = True
except urllib2.URLError as e:
print bcolors.FAIL + "<--- " + timeStamp + " <" + self.name + "> (" + streamer + "): API Failed (chatters). Code " + str(e.reason)
skipCSV = True
try:
chatterObj = json.loads(responseChatter.read())
chatters = chatterObj['chatter_count']
except (TypeError, ValueError):
chatters = 0
#get viewer count
try:
responseViewer = urllib2.urlopen(VIEWER_ENDPOINT)
except urllib2.HTTPError as e:
print bcolors.FAIL + "<--- " + timeStamp + " <" + self.name + "> (" + streamer + "): API Failed (viewers). Code " + str(e.code)
skipCSV = True
except urllib2.URLError as e:
print bcolors.FAIL + "<--- " + timeStamp + " <" + self.name + "> (" + streamer + "): API Failed (viewers). Code " + str(e.reason)
skipCSV = True
try:
viewerObj = json.loads(responseViewer.read())
viewers = viewerObj['stream']['viewers']
except (TypeError, ValueError):
viewers = 0
#get stream uptime
try:
responseUptime = urllib2.urlopen(UPTIME_ENDPOINT)
except urllib2.HTTPError as e:
print bcolors.FAIL + "<--- " + timeStamp + " <" + self.name + "> (" + streamer + "): Uptime Failed. Code " + str(e.code)
skipCSV = True
except urllib2.URLError as e:
print bcolors.FAIL + "<--- " + timeStamp + " <" + self.name + "> (" + streamer + "): Uptime Failed. Code " + str(e.reason)
skipCSV = True
try:
uptime = (responseUptime.read())
except (TypeError, ValueError):
uptime = "no"
uptime = uptime.replace("The channel is not live.", "no");
uptime = uptime.replace(" ", "").replace(",", " ");
uptime = uptime.replace("minutes", "m").replace("minute", "m");
uptime = uptime.replace("hours", "h").replace("hour", "h");
uptime = uptime.replace("days", "d").replace("day", "d");
#update console and CSV
if skipCSV == True:
print bcolors.FAIL + "<--- " + timeStamp + " <" + self.name + "> (" + streamer + "): incomplete data, skipping :("
elif uptime == "no":
with open(streamer + '.csv', 'ab') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
writer.writerow([timeStamp, chatters, viewers])
print bcolors.WARNING + "---> " + timeStamp + " <" + self.name + "> (" + streamer + "): " + str(chatters) + " c, " + str(viewers) + " v"
else:
with open(streamer + '.csv', 'ab') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
writer.writerow([timeStamp, chatters, viewers, uptime])
print bcolors.OKGREEN + "---> " + timeStamp + " <" + self.name + "> (" + streamer + "): " + str(chatters) + " c, " + str(viewers) + " v, up " + uptime
time.sleep(0.1)
self.queue.task_done()
def main():
for i in range(threadCount):
t = ThreadGet(queue, str(i + 1).zfill(2))
t.setDaemon(False)
t.start()
while True:
print bcolors.WARNING + "<--- Reloading " + str(len(strims)) + " items into queue, " + str(threading.activeCount()-1) + " Threads currently alive!"
for streamer in strims:
queue.put(streamer)
time.sleep(sleepTime)
queue.join()
main()
|
jolyonb/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/catalog/migrations/0002_catalogintegration_username.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='catalogintegration',
name='service_username',
field=models.CharField(default=b'lms_catalog_service_user', help_text='Username created for Course Catalog Integration, e.g. lms_catalog_service_user.', max_length=100),
),
]
|
adelton/django
|
refs/heads/master
|
tests/template_tests/test_engine.py
|
199
|
import os
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from .utils import ROOT, TEMPLATE_DIR
OTHER_DIR = os.path.join(ROOT, 'other_templates')
@ignore_warnings(category=RemovedInDjango110Warning)
class DeprecatedRenderToStringTest(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
libraries={'custom': 'template_tests.templatetags.custom'},
)
def test_basic_context(self):
self.assertEqual(
self.engine.render_to_string('test_context.html', {'obj': 'test'}),
'obj:test\n',
)
def test_existing_context_kept_clean(self):
context = Context({'obj': 'before'})
output = self.engine.render_to_string(
'test_context.html', {'obj': 'after'}, context_instance=context,
)
self.assertEqual(output, 'obj:after\n')
self.assertEqual(context['obj'], 'before')
def test_no_empty_dict_pushed_to_stack(self):
"""
#21741 -- An empty dict should not be pushed to the context stack when
render_to_string is called without a context argument.
"""
# The stack should have a length of 1, corresponding to the builtins
self.assertEqual(
'1',
self.engine.render_to_string('test_context_stack.html').strip(),
)
self.assertEqual(
'1',
self.engine.render_to_string(
'test_context_stack.html',
context_instance=Context()
).strip(),
)
class LoaderTests(SimpleTestCase):
def test_origin(self):
engine = Engine(dirs=[TEMPLATE_DIR], debug=True)
template = engine.get_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
def test_loader_priority(self):
"""
#21460 -- Check that the order of template loader works.
"""
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
def test_cached_loader_priority(self):
"""
Check that the order of template loader works. Refs #21460.
"""
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
@ignore_warnings(category=RemovedInDjango110Warning)
class TemplateDirsOverrideTests(SimpleTestCase):
DIRS = ((OTHER_DIR, ), [OTHER_DIR])
def setUp(self):
self.engine = Engine()
def test_render_to_string(self):
for dirs in self.DIRS:
self.assertEqual(
self.engine.render_to_string('test_dirs.html', dirs=dirs),
'spam eggs\n',
)
def test_get_template(self):
for dirs in self.DIRS:
template = self.engine.get_template('test_dirs.html', dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
def test_select_template(self):
for dirs in self.DIRS:
template = self.engine.select_template(['test_dirs.html'], dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
|
betoesquivel/fil2014
|
refs/heads/master
|
filenv/lib/python2.7/site-packages/south/v2.py
|
94
|
"""
API versioning file; we can tell what kind of migrations things are
by what class they inherit from (if none, it's a v1).
"""
from south.utils import ask_for_it_by_name
class BaseMigration(object):
def gf(self, field_name):
"Gets a field by absolute reference."
field = ask_for_it_by_name(field_name)
field.model = FakeModel
return field
class SchemaMigration(BaseMigration):
pass
class DataMigration(BaseMigration):
# Data migrations shouldn't be dry-run
no_dry_run = True
class FakeModel(object):
"Fake model so error messages on fields don't explode"
pass
|
bitcraft/pyglet
|
refs/heads/master
|
tests/interactive/window/event_button.py
|
1
|
"""Test that mouse button events work correctly.
Expected behaviour:
One window will be opened. Click within this window and check the console
output for mouse events.
- Buttons 1, 2, 4 correspond to left, middle, right, respectively.
- No events for scroll wheel
- Modifiers are correct
Close the window or press ESC to end the test.
"""
import unittest
from pyglet import window
from pyglet.window import key
class EVENT_BUTTON(unittest.TestCase):
def on_mouse_press(self, x, y, button, modifiers):
print('Mouse button %d pressed at %f,%f with %s' %
(button, x, y, key.modifiers_string(modifiers)))
def on_mouse_release(self, x, y, button, modifiers):
print('Mouse button %d released at %f,%f with %s' %
(button, x, y, key.modifiers_string(modifiers)))
def test_button(self):
w = window.Window(200, 200)
w.push_handlers(self)
while not w.has_exit:
w.dispatch_events()
w.close()
|
goodhacker/enjarify
|
refs/heads/master
|
enjarify/jvm/optimization/__init__.py
|
178
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
matthewelse/micropython
|
refs/heads/emscripten
|
tests/basics/class_binop.py
|
116
|
class foo(object):
def __init__(self, value):
self.x = value
def __eq__(self, other):
print('eq')
return self.x == other.x
def __lt__(self, other):
print('lt')
return self.x < other.x
def __gt__(self, other):
print('gt')
return self.x > other.x
def __le__(self, other):
print('le')
return self.x <= other.x
def __ge__(self, other):
print('ge')
return self.x >= other.x
for i in range(3):
for j in range(3):
print(foo(i) == foo(j))
print(foo(i) < foo(j))
print(foo(i) > foo(j))
print(foo(i) <= foo(j))
print(foo(i) >= foo(j))
|
AlexRobson/nilmtk
|
refs/heads/master
|
nilmtk/utils.py
|
3
|
from __future__ import print_function, division
import numpy as np
import pandas as pd
import networkx as nx
from copy import deepcopy
from os.path import isdir, dirname, abspath
from os import getcwd
from inspect import currentframe, getfile, getsourcefile
from sys import getfilesystemencoding, stdout
from IPython.core.display import HTML, display
from collections import OrderedDict
import datetime
from nilmtk.datastore import DataStore, HDFDataStore, CSVDataStore, Key
def show_versions():
"""Prints versions of various dependencies"""
output = OrderedDict()
output["Date"] = str(datetime.datetime.now())
import sys
import platform
output["Platform"] = str(platform.platform())
system_information = sys.version_info
output["System version"] = "{}.{}".format(system_information.major,
system_information.minor)
PACKAGES = ["nilmtk", "nilm_metadata", "numpy", "matplotlib", "pandas", "sklearn"]
for package_name in PACKAGES:
key = package_name + " version"
try:
exec("import " + package_name)
except ImportError:
output[key] = "Not found"
else:
output[key] = eval(package_name + ".__version__")
try:
print(pd.show_versions())
except:
pass
else:
print("")
for k, v in output.iteritems():
print("{}: {}".format(k, v))
def timedelta64_to_secs(timedelta):
"""Convert `timedelta` to seconds.
Parameters
----------
timedelta : np.timedelta64
Returns
-------
float : seconds
"""
if len(timedelta) == 0:
return np.array([])
else:
return timedelta / np.timedelta64(1, 's')
def tree_root(graph):
"""Returns the object that is the root of the tree.
Parameters
----------
graph : networkx.Graph
"""
# from http://stackoverflow.com/a/4123177/732596
assert isinstance(graph, nx.Graph)
roots = [node for node,in_degree in graph.in_degree_iter() if in_degree==0]
n_roots = len(roots)
if n_roots > 1:
raise RuntimeError('Tree has more than one root!')
if n_roots == 0:
raise RuntimeError('Tree has no root!')
return roots[0]
def nodes_adjacent_to_root(graph):
root = tree_root(graph)
return graph.successors(root)
def index_of_column_name(df, name):
for i, col_name in enumerate(df.columns):
if col_name == name:
return i
raise KeyError(name)
def find_nearest(known_array, test_array):
"""Find closest value in `known_array` for each element in `test_array`.
Parameters
----------
known_array : numpy array
consisting of scalar values only; shape: (m, 1)
test_array : numpy array
consisting of scalar values only; shape: (n, 1)
Returns
-------
indices : numpy array; shape: (n, 1)
For each value in `test_array` finds the index of the closest value
in `known_array`.
residuals : numpy array; shape: (n, 1)
For each value in `test_array` finds the difference from the closest
value in `known_array`.
"""
# from http://stackoverflow.com/a/20785149/732596
index_sorted = np.argsort(known_array)
known_array_sorted = known_array[index_sorted]
idx1 = np.searchsorted(known_array_sorted, test_array)
idx2 = np.clip(idx1 - 1, 0, len(known_array_sorted)-1)
idx3 = np.clip(idx1, 0, len(known_array_sorted)-1)
diff1 = known_array_sorted[idx3] - test_array
diff2 = test_array - known_array_sorted[idx2]
indices = index_sorted[np.where(diff1 <= diff2, idx3, idx2)]
residuals = test_array - known_array[indices]
return indices, residuals
def container_to_string(container, sep='_'):
if isinstance(container, str):
string = container
else:
try:
string = sep.join([str(element) for element in container])
except TypeError:
string = str(container)
return string
def simplest_type_for(values):
n_values = len(values)
if n_values == 1:
return list(values)[0]
elif n_values == 0:
return
else:
return tuple(values)
def flatten_2d_list(list2d):
list1d = []
for item in list2d:
if isinstance(item, basestring):
list1d.append(item)
else:
try:
len(item)
except TypeError:
list1d.append(item)
else:
list1d.extend(item)
return list1d
def get_index(data):
"""
Parameters
----------
data : pandas.DataFrame or Series or DatetimeIndex
Returns
-------
index : the index for the DataFrame or Series
"""
if isinstance(data, (pd.DataFrame, pd.Series)):
index = data.index
elif isinstance(data, pd.DatetimeIndex):
index = data
else:
raise TypeError('wrong type for `data`.')
return index
def convert_to_timestamp(t):
"""
Parameters
----------
t : str or pd.Timestamp or datetime or None
Returns
-------
pd.Timestamp or None
"""
return None if t is None else pd.Timestamp(t)
def get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
def dict_to_html(dictionary):
def format_string(value):
try:
if isinstance(value, basestring) and 'http' in value:
html = '<a href="{url}">{url}</a>'.format(url=value)
else:
html = '{}'.format(value)
except UnicodeEncodeError:
html = ''
return html
html = '<ul>'
for key, value in dictionary.iteritems():
html += '<li><strong>{}</strong>: '.format(key)
if isinstance(value, list):
html += '<ul>'
for item in value:
html += '<li>{}</li>'.format(format_string(item))
html += '</ul>'
elif isinstance(value, dict):
html += dict_to_html(value)
else:
html += format_string(value)
html += '</li>'
html += '</ul>'
return html
def print_dict(dictionary):
html = dict_to_html(dictionary)
display(HTML(html))
def offset_alias_to_seconds(alias):
"""Seconds for each period length."""
dr = pd.date_range('00:00', periods=2, freq=alias)
return (dr[-1] - dr[0]).total_seconds()
def check_directory_exists(d):
if not isdir(d):
raise IOError("Directory '{}' does not exist.".format(d))
def tz_localize_naive(timestamp, tz):
if tz is None:
return timestamp
if timestamp is None or pd.isnull(timestamp):
return pd.NaT
timestamp = pd.Timestamp(timestamp)
if timestamp_is_naive(timestamp):
timestamp = timestamp.tz_localize('UTC')
return timestamp.tz_convert(tz)
def get_tz(df):
index = df.index
try:
tz = index.tz
except AttributeError:
tz = None
return tz
def timestamp_is_naive(timestamp):
"""
Parameters
----------
timestamp : pd.Timestamp or datetime.datetime
Returns
-------
True if `timestamp` is naive (i.e. if it does not have a
timezone associated with it). See:
https://docs.python.org/2/library/datetime.html#available-types
"""
if timestamp.tzinfo is None:
return True
elif timestamp.tzinfo.utcoffset(timestamp) is None:
return True
else:
return False
def get_datastore(filename, format, mode='a'):
"""
Parameters
----------
filename : string
format : 'CSV' or 'HDF'
mode : 'a' (append) or 'w' (write), optional
Returns
-------
metadata : dict
"""
if filename is not None:
if format == 'HDF':
return HDFDataStore(filename, mode)
elif format == 'CSV':
return CSVDataStore(filename)
else:
raise ValueError('format not recognised')
else:
ValueError('filename is None')
def normalise_timestamp(timestamp, freq):
"""Returns the nearest Timestamp to `timestamp` which would be
in the set of timestamps returned by pd.DataFrame.resample(freq=freq)
"""
timestamp = pd.Timestamp(timestamp)
series = pd.Series(np.NaN, index=[timestamp])
resampled = series.resample(freq)
return resampled.index[0]
def print_on_line(*strings):
print(*strings, end="")
stdout.flush()
def append_or_extend_list(lst, value):
if value is None:
return
elif isinstance(value, list):
lst.extend(value)
else:
lst.append(value)
def convert_to_list(list_like):
return [] if list_like is None else list(list_like)
def most_common(lst):
"""Returns the most common entry in lst."""
lst = list(lst)
counts = {item:lst.count(item) for item in set(lst)}
counts = pd.Series(counts)
counts.sort()
most_common = counts.index[-1]
return most_common
def capitalise_first_letter(string):
return string[0].upper() + string[1:]
def capitalise_index(index):
labels = list(index)
for i, label in enumerate(labels):
labels[i] = capitalise_first_letter(label)
return labels
def capitalise_legend(ax):
legend_handles = ax.get_legend_handles_labels()
labels = capitalise_index(legend_handles[1])
ax.legend(legend_handles[0], labels)
return ax
|
alexmorozov/django
|
refs/heads/master
|
tests/utils_tests/test_tree.py
|
429
|
import copy
import unittest
from django.utils.tree import Node
class NodeTests(unittest.TestCase):
def setUp(self):
self.node1_children = [('a', 1), ('b', 2)]
self.node1 = Node(self.node1_children)
self.node2 = Node()
def test_str(self):
self.assertEqual(str(self.node1), "(DEFAULT: ('a', 1), ('b', 2))")
self.assertEqual(str(self.node2), "(DEFAULT: )")
def test_repr(self):
self.assertEqual(repr(self.node1),
"<Node: (DEFAULT: ('a', 1), ('b', 2))>")
self.assertEqual(repr(self.node2), "<Node: (DEFAULT: )>")
def test_len(self):
self.assertEqual(len(self.node1), 2)
self.assertEqual(len(self.node2), 0)
def test_bool(self):
self.assertTrue(self.node1)
self.assertFalse(self.node2)
def test_contains(self):
self.assertIn(('a', 1), self.node1)
self.assertNotIn(('a', 1), self.node2)
def test_add(self):
# start with the same children of node1 then add an item
node3 = Node(self.node1_children)
node3_added_child = ('c', 3)
# add() returns the added data
self.assertEqual(node3.add(node3_added_child, Node.default),
node3_added_child)
# we added exactly one item, len() should reflect that
self.assertEqual(len(self.node1) + 1, len(node3))
self.assertEqual(str(node3), "(DEFAULT: ('a', 1), ('b', 2), ('c', 3))")
def test_negate(self):
# negated is False by default
self.assertFalse(self.node1.negated)
self.node1.negate()
self.assertTrue(self.node1.negated)
self.node1.negate()
self.assertFalse(self.node1.negated)
def test_deepcopy(self):
node4 = copy.copy(self.node1)
node5 = copy.deepcopy(self.node1)
self.assertIs(self.node1.children, node4.children)
self.assertIsNot(self.node1.children, node5.children)
|
subutai/nupic.research
|
refs/heads/master
|
nupic/research/frameworks/vernon/mixins/oml.py
|
2
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
from copy import deepcopy
import numpy as np
import pandas as pd
import torch
from scipy import stats
from tabulate import tabulate
from torch import nn
from torch.nn.init import kaiming_normal_, zeros_
from torch.optim import Adam
from torch.utils.data import DataLoader
from nupic.research.frameworks.pytorch.model_utils import (
evaluate_model,
filter_params,
train_model,
)
class OnlineMetaLearning(object):
"""
Implements methods specific to OML, according to original implementation
Reference: https://github.com/khurramjaved96/mrcl
"""
def setup_experiment(self, config):
"""
Add following variables to config
:param config: Dictionary containing the configuration parameters
- run_meta_test: whether or not to run the meta-testing phase
- reset_output_params: whether to reset (i.e. re-init) the output layer
params prior to meta-test training
- reset_task_params: whether to reset (i.e. re-init) the output layer
params corresponding to a given task, prior to
meta-train training on that task
- lr_sweep_range: list of learning rates to attempt meta-test training.
The best one, according to the meta-test test set,
will be chosen and used for the meta-testing phase.
- run_lr_sweep: whether to run grid search over lr's for meta-test
training; if false, the first lr in lr_sweep_range
will be used; defaults to True.
- num_meta_test_classes: list of number of classes to train and test over
for meta-testing
- num_lr_search_runs: number of runs to attempt in the lr grid search;
the one that achieves the highest test-test accuracy
the most times, i.e. the mode, will be chosen for the
meta-testing phase.
- num_meta_testing_runs: number of meta-testing phases to run
- test_train_sample_size: number of images per class to sample from for
meta-testing training. The rest of the
images will be used for meta-test testing.
- test_train_params: list of regex patterns identifying which params to
update during meta-test training
- output_layer_params: list of names for the output layer; if this is given
and `reset_output_params=True` these will be reset
prior to meta-test training
"""
super().setup_experiment(config)
self.run_meta_test = config.get("run_meta_test", False)
self.reset_output_params = config.get("reset_output_params", True)
self.reset_task_params = config.get("reset_task_params", True)
self.lr_sweep_range = config.get("lr_sweep_range", [1e-1, 1e-2, 1e-3])
self.run_lr_sweep = config.get("run_lr_sweep", True)
self.num_lr_search_runs = config.get("num_lr_search_runs", 5)
self.num_meta_testing_runs = config.get("num_meta_testing_runs", 15)
self.num_meta_test_classes = config.get("num_meta_test_classes",
[10, 50, 100, 200, 600])
assert len(self.lr_sweep_range) > 0
# Resolve the names of the meta-test training params.
assert "test_train_params" in config
test_train_named_params = filter_params(
self.model,
include_patterns=config["test_train_params"]
)
self.test_train_param_names = list(test_train_named_params.keys())
self.logger.info(f"Setup: test_train_param_names={self.test_train_param_names}")
# Resolve the names of the output layer params.
if self.reset_output_params or self.reset_task_params:
assert "output_layer_params" in config
output_named_params = filter_params(
self.model,
include_names=config.get("output_layer_params", [])
)
self.output_param_names = list(output_named_params.keys())
self.logger.info(f"Setup: output_param_names={self.output_param_names}")
def create_loaders(self, config):
super().create_loaders(config)
# Load eval set.
data = self.load_dataset(config, train=False)
inds = self.compute_class_indices(config, data)
self.test_class_indices = inds # used short hand for line length
train_loader = self.create_test_train_dataloader(config, data, deepcopy(inds))
test_loader = self.create_test_test_dataloader(config, data, deepcopy(inds))
train_eval_loader = self.create_test_train_eval_dataloader(config, data,
deepcopy(inds))
self.test_train_loader = train_loader
self.test_test_loader = test_loader
self.test_train_eval_loader = train_eval_loader
self.num_classes_eval = min(
config.get("num_classes_eval", 50),
self.test_train_loader.sampler.num_classes
)
@classmethod
def create_test_train_sampler(cls, config, dataset, class_indices):
"""Sampler for meta-test training."""
sample_size = config.get("test_train_sample_size", 15)
class_indices = cls.partition_class_indices(class_indices,
mode="train",
sample_size=sample_size)
return cls.create_sampler(config, dataset, class_indices)
@classmethod
def create_test_test_sampler(cls, config, dataset, class_indices):
"""Sampler for meta-test testing."""
sample_size = config.get("test_train_sample_size", 15)
class_indices = cls.partition_class_indices(class_indices,
mode="test",
sample_size=sample_size)
return cls.create_sampler(config, dataset, class_indices)
@classmethod
def create_test_train_dataloader(cls, config, dataset, class_indices):
sampler = cls.create_test_train_sampler(config, dataset, class_indices)
return DataLoader(
dataset=dataset,
batch_size=config.get("test_train_batch_size", 1),
shuffle=False,
num_workers=config.get("workers", 0),
sampler=sampler,
pin_memory=torch.cuda.is_available(),
)
@classmethod
def create_test_train_eval_dataloader(cls, config, dataset, class_indices):
"""
Exactly the same as the test-train loader, but with a potentially
larger batch size for evaluation.
"""
sampler = cls.create_test_train_sampler(config, dataset, class_indices)
return DataLoader(
dataset=dataset,
batch_size=config.get("test_train_eval_batch_size",
config.get("test_test_batch_size", 1)),
shuffle=False,
num_workers=config.get("workers", 0),
sampler=sampler,
pin_memory=torch.cuda.is_available(),
)
@classmethod
def create_test_test_dataloader(cls, config, dataset, class_indices):
sampler = cls.create_test_test_sampler(config, dataset, class_indices)
return DataLoader(
dataset=dataset,
batch_size=config.get("test_test_batch_size", 1),
shuffle=False,
num_workers=config.get("workers", 0),
sampler=sampler,
pin_memory=torch.cuda.is_available(),
)
def pre_task(self, tasks):
"""Re-initialize task params prior to meta-train training."""
super().pre_task(tasks)
if self.reset_task_params:
output_params = self.get_named_output_params().values()
for p in output_params:
if p.dim() == 2:
for t in tasks:
task_weights = p[t, :].unsqueeze(0)
nn.init.kaiming_normal_(task_weights)
def run_epoch(self):
results = super().run_epoch()
if self.current_epoch == self.epochs and self.run_meta_test:
# Accumulate results.
table = [] # for printout
dataframe = [] # for saving
headers = ["Num Classes", "Meta-test test", "Meta-test train", "LR"]
# Meta-training phase complete, perform meta-testing phase
for num_classes in self.num_meta_test_classes:
# Only sample as many classes as there are in the meta-test set.
if num_classes > self.num_classes_eval:
break
meta_test_accs = self.run_meta_testing_phase(num_classes)
test_train_accs, test_test_accs, lr = meta_test_accs
print(f"Accuracy for meta-testing phase over {num_classes} num classes")
mu_test = np.mean(test_test_accs)
sd_test = np.std(test_test_accs)
mu_train = np.mean(test_train_accs)
sd_train = np.std(test_train_accs)
test_acc_str = f"{mu_test:0.2f} ± {sd_test:0.2f}"
train_acc_str = f"{mu_train:0.2f} ± {sd_train:0.2f}"
results.update(**{
f"mean_test_test_acc_{num_classes}_classes": mu_test,
f"mean_test_train_acc_{num_classes}_classes": mu_train,
})
print(f" test accs: {test_acc_str}")
print(f" train accs: {train_acc_str}")
print()
num_runs = len(test_train_accs)
table.append((num_classes, test_acc_str, train_acc_str, lr))
dataframe.extend(zip([num_classes] * num_runs,
test_test_accs,
test_train_accs,
[lr] * num_runs))
print("Meta-testing results:")
print(tabulate(table, headers=headers, tablefmt="pipe"))
# Save results to csv
if self.logdir is not None:
meta_test_path = os.path.join(self.logdir, "meta_test_accuracies.csv")
meta_test_acc_df = pd.DataFrame(dataframe, columns=headers)
meta_test_acc_df.to_csv(meta_test_path)
# Return results. When it's not the last epoch, this is returned as is.
return results
def find_best_lr(self, num_classes_learned):
"""
This is a simple hyper-parameter search for a good lr:
1) Sample num_classes_learned classes
2) Train over the sampled classes; once for each lr
3) Evaluate the model on a held-out set
4) Repeat as many times as desired and pick the lr that performs the best
the most number times
"""
lr_all = []
# Grid search over lr
for _ in range(0, self.num_lr_search_runs):
# Choose num_classes_learned random classes to train and then test on.
new_tasks = np.random.choice(
self.num_classes_eval, num_classes_learned, replace=False
)
max_acc = -1000
for lr in self.lr_sweep_range:
# Reset output layer weights.
if self.reset_output_params:
output_params = self.get_named_output_params()
self.reset_params(output_params.values())
# Meta-test training.
test_train_param = self.get_named_test_train_params()
optim = Adam(test_train_param.values(), lr=lr)
for task in new_tasks:
self.test_train_loader.sampler.set_active_tasks(task)
train_model(
model=self.get_model(),
loader=self.test_train_loader,
optimizer=optim,
device=self.device,
criterion=self._loss_function,
)
# Meta-test testing.
self.test_test_loader.sampler.set_active_tasks(new_tasks)
results = evaluate_model(
model=self.get_model(),
loader=self.test_test_loader,
device=self.device,
criterion=self._loss_function,
)
correct = results["total_correct"]
acc = correct / len(self.test_test_loader.sampler.indices)
if (acc > max_acc):
max_acc = acc
max_lr = lr
lr_all.append(max_lr)
best_lr = float(stats.mode(lr_all)[0][0])
return best_lr
def run_meta_testing_phase(self, num_classes_learned):
"""
Run the meta-testing phase: train over num_classes_learned and then test over a
held-out set comprised of those same classes (aka the meta-test test set). This
shows the model's ability to conduct continual learning in a way that allows
generalization. As well, at the end of this phase, this function also evaluates
the models performance on the meta-test training set to evaluate it's ability to
memorize without forgetting.
"""
# Decide on the lr to use.
if self.run_lr_sweep:
lr = self.find_best_lr(num_classes_learned)
else:
lr = self.lr_sweep_range[-1]
meta_test_test_accuracies = []
meta_test_train_accuracies = []
for _ in range(0, self.num_meta_testing_runs):
# Choose num_classes_learned random classes to train and then test on.
new_tasks = np.random.choice(
self.num_classes_eval, num_classes_learned, replace=False
)
# Reset output layer weights.
if self.reset_output_params:
output_params = self.get_named_output_params()
self.reset_params(output_params.values())
# Meta-testing training.
test_train_param = self.get_named_test_train_params()
optim = Adam(test_train_param.values(), lr=lr)
for task in new_tasks:
self.test_train_loader.sampler.set_active_tasks(task)
train_model(
model=self.get_model(),
loader=self.test_train_loader,
optimizer=optim,
device=self.device,
criterion=self._loss_function,
)
# Meta-testing testing (using the test-test set).
self.test_test_loader.sampler.set_active_tasks(new_tasks)
results = evaluate_model(
model=self.model,
loader=self.test_test_loader,
device=self.device,
criterion=self._loss_function,
)
correct = results["total_correct"]
acc = correct / len(self.test_test_loader.sampler.indices)
meta_test_test_accuracies.append(acc)
# Meta-testing testing (using the test-train set).
self.test_train_eval_loader.sampler.set_active_tasks(new_tasks)
results = evaluate_model(
model=self.get_model(),
loader=self.test_train_eval_loader,
device=self.device,
criterion=self._loss_function,
)
correct = results["total_correct"]
acc = correct / len(self.test_train_eval_loader.sampler.indices)
meta_test_train_accuracies.append(acc)
return meta_test_train_accuracies, meta_test_test_accuracies, lr
def get_named_test_train_params(self):
"""Filter out the params from test_train_param_names."""
return self._get_params_by_names(self.test_train_param_names)
def get_named_output_params(self):
"""Filter out the params from output_param_names."""
return self._get_params_by_names(self.output_param_names)
def reset_params(self, params):
"""Helper function to reinitialize params."""
for param in params:
if len(param.shape) > 1:
kaiming_normal_(param)
else:
zeros_(param)
@classmethod
def get_execution_order(cls):
eo = super().get_execution_order()
eo["setup_experiment"].append("OML meta-testing setup")
eo["run_epoch"].append("Run meta testing phase at end of training.")
eo["create_loaders"].append("Create loaders for the meta testing phase")
eo["pre_task"].append("Reset the output params for upcoming tasks.")
return eo
|
chillbu/cblib
|
refs/heads/master
|
thirdparty/gtest-1.7.0/test/gtest_xml_output_unittest.py
|
1815
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
|
yorkie/node-gyp
|
refs/heads/master
|
gyp/pylib/gyp/MSVSUtil.py
|
566
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
|
mlperf/inference_results_v0.7
|
refs/heads/master
|
closed/Lenovo/code/ssd-mobilenet/tensorrt/preprocess_data.py
|
12
|
#!/usr/bin/python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
import argparse
import numpy as np
import shutil
from code.common import logging, BENCHMARKS
from code.common.image_preprocessor import ImagePreprocessor, center_crop, resize_with_aspectratio
import cv2
import math
def preprocess_coco_for_ssdmobilenet(data_dir, preprocessed_data_dir, formats, overwrite=False, cal_only=False, val_only=False):
def loader(file):
image = cv2.imread(file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.array(cv2.resize(image, (300, 300), interpolation=cv2.INTER_LINEAR)).astype(np.float32)
image = image.transpose((2, 0, 1))
image = (2.0 / 255.0) * image - 1.0
return image
def quantizer(image):
# Dynamic range of image is [-1.0, 1.0]
image_int8 = image * 127.0
return image_int8.astype(dtype=np.int8, order='C')
preprocessor = ImagePreprocessor(loader, quantizer)
if not val_only:
# Preprocess calibration set. FP32 only because calibrator always takes FP32 input.
preprocessor.run(os.path.join(data_dir, "coco", "train2017"),
os.path.join(preprocessed_data_dir, "coco", "train2017", "SSDMobileNet"),
"data_maps/coco/cal_map.txt", ["fp32"], overwrite)
if not cal_only:
# Preprocess validation set.
preprocessor.run(os.path.join(data_dir, "coco", "val2017"),
os.path.join(preprocessed_data_dir, "coco", "val2017", "SSDMobileNet"),
"data_maps/coco/val_map.txt", formats, overwrite)
def copy_coco_annotations(data_dir, preprocessed_data_dir):
src_dir = os.path.join(data_dir, "coco/annotations")
dst_dir = os.path.join(preprocessed_data_dir, "coco/annotations")
if not os.path.exists(dst_dir):
shutil.copytree(src_dir, dst_dir)
def main():
# Parse arguments to identify the data directory with the input images
# and the output directory for the preprocessed images.
# The data dicretory is assumed to have the following structure:
# <data_dir>
# └── coco
# ├── annotations
# ├── train2017
# └── val2017
# And the output directory will have the following structure:
# <preprocessed_data_dir>
# └── coco
# ├── annotations
# ├── train2017
# │ └── SSDMobileNet
# │ └── fp32
# └── val2017
# └── SSDMobileNet
# ├── int8_chw4
# └── int8_linear
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", "-d",
help="Specifies the directory containing the input images.",
default="build/data"
)
parser.add_argument(
"--preprocessed_data_dir", "-o",
help="Specifies the output directory for the preprocessed data.",
default="build/preprocessed_data"
)
parser.add_argument(
"--formats", "-t",
help="Comma-separated list of formats. Choices: fp32, int8_linear, int8_chw4.",
default="default"
)
parser.add_argument(
"--overwrite", "-f",
help="Overwrite existing files.",
action="store_true"
)
parser.add_argument(
"--cal_only",
help="Only preprocess calibration set.",
action="store_true"
)
parser.add_argument(
"--val_only",
help="Only preprocess validation set.",
action="store_true"
)
args = parser.parse_args()
data_dir = args.data_dir
preprocessed_data_dir = args.preprocessed_data_dir
formats = args.formats.split(",")
overwrite = args.overwrite
cal_only = args.cal_only
val_only = args.val_only
default_formats = ["int8_linear", "int8_chw4"]
# Now, actually preprocess the input images
logging.info("Loading and preprocessing images. This might take a while...")
if args.formats == "default":
formats = default_formats
preprocess_coco_for_ssdmobilenet(data_dir, preprocessed_data_dir, formats, overwrite, cal_only, val_only)
# Copy annotations from data_dir to preprocessed_data_dir.
copy_coco_annotations(data_dir, preprocessed_data_dir)
logging.info("Preprocessing done.")
if __name__ == '__main__':
main()
|
svn2github/django
|
refs/heads/master
|
django/contrib/sitemaps/tests/base.py
|
87
|
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import TestCase
class SitemapTestsBase(TestCase):
protocol = 'http'
domain = 'example.com' if Site._meta.installed else 'testserver'
urls = 'django.contrib.sitemaps.tests.urls.http'
def setUp(self):
self.base_url = '%s://%s' % (self.protocol, self.domain)
self.old_USE_L10N = settings.USE_L10N
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
self.old_Site_meta_installed = Site._meta.installed
# Create a user that will double as sitemap content
User.objects.create_user('testuser', 'test@example.com', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
Site._meta.installed = self.old_Site_meta_installed
|
icebreaker/Templatizer-Templates
|
refs/heads/master
|
python-cmdapp/app.py
|
1
|
#!/usr/bin/env python
__author__ = '%AUTHOR%'
__description__ = '%DESCRIPTION%'
__version__ = (0, 1, 0)
__license__ = '%LICENSE%'
import sys
import os
import logging
def main(argv):
""" Main """
if len(argv) < 2:
print('%NAME% v%d.%d.%d' % __version__)
print('usage: %s [options]' % os.path.splitext(os.path.basename(argv[0]))[0])
print('')
print('Options:')
print('\t-v, --version\tshows the version number')
print('\t-d, --debug\tenables debug output')
return -1
if '-v' in argv or '--version' in argv:
print('%d.%d.%d' % __version__) # print version
return 0
if argv[1] == '-d' or argv[1] == '--debug':
argv.pop(1) # remove this item
logging.basicConfig(level=logging.DEBUG)
return 0
if __name__ == '__main__':
exit(main(sys.argv))
|
andrewyoung1991/scons
|
refs/heads/master
|
test/MSVS/vs-8.0-variant_dir.py
|
5
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that we can generate Visual Studio 8.0 project (.vcproj) and
solution (.sln) files that look correct when using a variant_dir.
"""
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
host_arch = test.get_vs_host_arch()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['8.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_8_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_8_0
SConscript_contents = TestSConsMSVS.SConscript_contents_8_0
test.subdir('src')
test.write('SConstruct', """\
SConscript('src/SConscript', variant_dir='build')
""")
test.write(['src', 'SConscript'], SConscript_contents%{'HOST_ARCH': host_arch})
test.run(arguments=".")
project_guid = "{25F6CE89-8E22-2910-8B6E-FFE6DC1E2792}"
vcproj = test.read(['src', 'Test.vcproj'], 'r')
expect = test.msvs_substitute(expected_vcprojfile, '8.0', None, 'SConstruct',
project_guid=project_guid)
# don't compare the pickled data
assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj)
test.must_exist(test.workpath('src', 'Test.sln'))
sln = test.read(['src', 'Test.sln'], 'r')
expect = test.msvs_substitute(expected_slnfile, '8.0', 'src',
project_guid=project_guid)
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.must_match(['build', 'Test.vcproj'], """\
This is just a placeholder file.
The real project file is here:
%s
""" % test.workpath('src', 'Test.vcproj'),
mode='r')
test.must_match(['build', 'Test.sln'], """\
This is just a placeholder file.
The real workspace file is here:
%s
""" % test.workpath('src', 'Test.sln'),
mode='r')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
blueprintmrk/graphy
|
refs/heads/master
|
graphy/backends/google_chart_api/__init__.py
|
205
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend which can generate charts using the Google Chart API."""
from graphy import line_chart
from graphy import bar_chart
from graphy import pie_chart
from graphy.backends.google_chart_api import encoders
def _GetChartFactory(chart_class, display_class):
"""Create a factory method for instantiating charts with displays.
Returns a method which, when called, will create & return a chart with
chart.display already populated.
"""
def Inner(*args, **kwargs):
chart = chart_class(*args, **kwargs)
chart.display = display_class(chart)
return chart
return Inner
# These helper methods make it easy to get chart objects with display
# objects already setup. For example, this:
# chart = google_chart_api.LineChart()
# is equivalent to:
# chart = line_chart.LineChart()
# chart.display = google_chart_api.encoders.LineChartEncoder(chart)
#
# (If there's some chart type for which a helper method isn't available, you
# can always just instantiate the correct encoder manually, like in the 2nd
# example above).
# TODO: fix these so they have nice docs in ipython (give them __doc__)
LineChart = _GetChartFactory(line_chart.LineChart, encoders.LineChartEncoder)
Sparkline = _GetChartFactory(line_chart.Sparkline, encoders.SparklineEncoder)
BarChart = _GetChartFactory(bar_chart.BarChart, encoders.BarChartEncoder)
PieChart = _GetChartFactory(pie_chart.PieChart, encoders.PieChartEncoder)
|
everestial/g2gtools
|
refs/heads/master
|
g2gtools/bed.py
|
1
|
# -*- coding: utf-8 -*-
#
# Collection of functions related to BED files
#
# 0-based
#
from collections import namedtuple
from .chain import ChainFile
from .exceptions import G2GBedError, G2GLocationError
from .g2g_utils import get_logger
import g2g_fileutils as g2g_fu
LOG = get_logger()
bed_fields = ["chrom", "start", "end", "name", "score", "strand", "extra"]
BEDRecord = namedtuple("BEDRecord", bed_fields)
class BED(object):
"""
Simple BED object for parsing BED files
Supports transparent gzip decompression.
"""
def __init__(self, filename):
if not filename:
raise G2GBedError("A filename must be supplied")
self.filename = filename
self.current_line = None
self.current_line_is_bed = False
self.current_record = None
self.reader = g2g_fu.open_resource(filename)
self.nitems = None
self.current_line_no = 0
def __iter__(self):
return self
def next(self):
self.current_line = self.reader.next()
self.current_line_no += 1
while self.current_line and len(self.current_line.strip()) == 0:
self.current_line = self.reader.next()
self.current_line_no += 1
if self.current_line.startswith("track"):
self.current_line = self.current_line.strip()
self.current_line_is_bed = False
self.current_record = None
return None
self.current_line_is_bed = True
elem = self.current_line.strip().split("\t")
if not self.nitems:
self.nitems = len(elem)
else:
if self.nitems != len(elem):
raise G2GBedError("Improperly formatted BED file")
try:
bed_data = {'chrom': elem[0],
'start': int(elem[1]),
'end': int(elem[2]),
'name': elem[3] if self.nitems > 3 else None,
'score': elem[4] if self.nitems > 4 else None,
'strand': elem[5] if self.nitems > 5 else None,
'extra': elem[6:] if self.nitems > 6 else None}
self.current_record = BEDRecord(**bed_data)
return self.current_record
except IndexError, ie:
LOG.debug(ie.message)
raise G2GBedError("Improperly formatted BED file, line number: {0}, line: {1}".format(self.current_line_no, self.current_line))
except ValueError, ve:
LOG.debug(ve.message)
raise G2GBedError("Improperly formatted BED file, line number: {0}, line: {1}".format(self.current_line_no, self.current_line))
def convert_bed_file(chain_file, input_file, output_file, reverse=False):
"""
Convert BED coordinates.
The mappings of coordinates are stored in the :class:`.chain.ChainFile` object.
:param chain_file: chain file used for conversion
:type chain_file: :class:`.chain.ChainFile`
:param str file_in: the input BED file
:type file_in: string
:param file_out: the output BED file
:type file_out: string
:param reverse: reverse direction of original chain file
:type reverse: boolean
:return: Nothing
"""
if not isinstance(chain_file, ChainFile):
chain_file = g2g_fu.check_file(chain_file)
input_file = g2g_fu.check_file(input_file)
output_file_name = g2g_fu.check_file(output_file, 'w')
unmapped_file_name = "{0}.unmapped".format(output_file_name)
LOG.info("CHAIN FILE: {0}".format(chain_file))
LOG.info("INPUT FILE: {0}".format(input_file))
LOG.info("OUTPUT FILE: {0}".format(output_file_name))
LOG.info("UNMAPPED FILE: {0}".format(unmapped_file_name))
if not isinstance(chain_file, ChainFile):
LOG.info("Parsing chain file...")
chain_file = ChainFile(chain_file, reverse=reverse)
LOG.info("Chain file parsed")
bed_out = open(output_file_name, "w")
bed_unmapped_file = open(unmapped_file_name, "w")
LOG.info("Converting BED file")
bed_file = BED(input_file)
total = 0
success = 0
fail = 0
# BED is 0 based, bx-python is 0 based
try:
for record in bed_file:
# skip over "track" lines
if not bed_file.current_line_is_bed:
bed_out.write(bed_file.current_line)
bed_out.write("\n")
continue
total += 1
mappings = chain_file.find_mappings(record.chrom, record.start, record.end)
# unmapped
if mappings:
success += 1
else:
LOG.debug("Fail due to no mappings")
bed_unmapped_file.write(bed_file.current_line)
fail += 1
continue
start = mappings[0].to_start
end = mappings[-1].to_end
LOG.debug("({0}, {1}) => ({2}, {3})".format(record.start, record.end, start, end))
elems = bed_file.current_line.split()
elems[1] = start
elems[2] = end
bed_out.write("\t".join(map(str, elems)))
bed_out.write("\n")
bed_out.close()
bed_unmapped_file.close()
LOG.info("Converted {0} of {1} records".format(success, total))
except G2GLocationError, le:
LOG.error("{0}: {1}".format(le.message, bed_file.current_line))
LOG.info('BED file converted')
|
laiqiqi886/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/tkinter/filedialog.py
|
108
|
"""File selection dialog classes.
Classes:
- FileDialog
- LoadFileDialog
- SaveFileDialog
This module also presents tk common file dialogues, it provides interfaces
to the native file dialogues available in Tk 4.2 and newer, and the
directory dialogue available in Tk 8.3 and newer.
These interfaces were written by Fredrik Lundh, May 1997.
"""
from tkinter import *
from tkinter.dialog import Dialog
from tkinter import commondialog
import os
import fnmatch
dialogstates = {}
class FileDialog:
"""Standard file selection dialog -- no checks on selected file.
Usage:
d = FileDialog(master)
fname = d.go(dir_or_file, pattern, default, key)
if fname is None: ...canceled...
else: ...open file...
All arguments to go() are optional.
The 'key' argument specifies a key in the global dictionary
'dialogstates', which keeps track of the values for the directory
and pattern arguments, overriding the values passed in (it does
not keep track of the default argument!). If no key is specified,
the dialog keeps no memory of previous state. Note that memory is
kept even when the dialog is canceled. (All this emulates the
behavior of the Macintosh file selection dialogs.)
"""
title = "File Selection Dialog"
def __init__(self, master, title=None):
if title is None: title = self.title
self.master = master
self.directory = None
self.top = Toplevel(master)
self.top.title(title)
self.top.iconname(title)
self.botframe = Frame(self.top)
self.botframe.pack(side=BOTTOM, fill=X)
self.selection = Entry(self.top)
self.selection.pack(side=BOTTOM, fill=X)
self.selection.bind('<Return>', self.ok_event)
self.filter = Entry(self.top)
self.filter.pack(side=TOP, fill=X)
self.filter.bind('<Return>', self.filter_command)
self.midframe = Frame(self.top)
self.midframe.pack(expand=YES, fill=BOTH)
self.filesbar = Scrollbar(self.midframe)
self.filesbar.pack(side=RIGHT, fill=Y)
self.files = Listbox(self.midframe, exportselection=0,
yscrollcommand=(self.filesbar, 'set'))
self.files.pack(side=RIGHT, expand=YES, fill=BOTH)
btags = self.files.bindtags()
self.files.bindtags(btags[1:] + btags[:1])
self.files.bind('<ButtonRelease-1>', self.files_select_event)
self.files.bind('<Double-ButtonRelease-1>', self.files_double_event)
self.filesbar.config(command=(self.files, 'yview'))
self.dirsbar = Scrollbar(self.midframe)
self.dirsbar.pack(side=LEFT, fill=Y)
self.dirs = Listbox(self.midframe, exportselection=0,
yscrollcommand=(self.dirsbar, 'set'))
self.dirs.pack(side=LEFT, expand=YES, fill=BOTH)
self.dirsbar.config(command=(self.dirs, 'yview'))
btags = self.dirs.bindtags()
self.dirs.bindtags(btags[1:] + btags[:1])
self.dirs.bind('<ButtonRelease-1>', self.dirs_select_event)
self.dirs.bind('<Double-ButtonRelease-1>', self.dirs_double_event)
self.ok_button = Button(self.botframe,
text="OK",
command=self.ok_command)
self.ok_button.pack(side=LEFT)
self.filter_button = Button(self.botframe,
text="Filter",
command=self.filter_command)
self.filter_button.pack(side=LEFT, expand=YES)
self.cancel_button = Button(self.botframe,
text="Cancel",
command=self.cancel_command)
self.cancel_button.pack(side=RIGHT)
self.top.protocol('WM_DELETE_WINDOW', self.cancel_command)
# XXX Are the following okay for a general audience?
self.top.bind('<Alt-w>', self.cancel_command)
self.top.bind('<Alt-W>', self.cancel_command)
def go(self, dir_or_file=os.curdir, pattern="*", default="", key=None):
if key and key in dialogstates:
self.directory, pattern = dialogstates[key]
else:
dir_or_file = os.path.expanduser(dir_or_file)
if os.path.isdir(dir_or_file):
self.directory = dir_or_file
else:
self.directory, default = os.path.split(dir_or_file)
self.set_filter(self.directory, pattern)
self.set_selection(default)
self.filter_command()
self.selection.focus_set()
self.top.wait_visibility() # window needs to be visible for the grab
self.top.grab_set()
self.how = None
self.master.mainloop() # Exited by self.quit(how)
if key:
directory, pattern = self.get_filter()
if self.how:
directory = os.path.dirname(self.how)
dialogstates[key] = directory, pattern
self.top.destroy()
return self.how
def quit(self, how=None):
self.how = how
self.master.quit() # Exit mainloop()
def dirs_double_event(self, event):
self.filter_command()
def dirs_select_event(self, event):
dir, pat = self.get_filter()
subdir = self.dirs.get('active')
dir = os.path.normpath(os.path.join(self.directory, subdir))
self.set_filter(dir, pat)
def files_double_event(self, event):
self.ok_command()
def files_select_event(self, event):
file = self.files.get('active')
self.set_selection(file)
def ok_event(self, event):
self.ok_command()
def ok_command(self):
self.quit(self.get_selection())
def filter_command(self, event=None):
dir, pat = self.get_filter()
try:
names = os.listdir(dir)
except OSError:
self.master.bell()
return
self.directory = dir
self.set_filter(dir, pat)
names.sort()
subdirs = [os.pardir]
matchingfiles = []
for name in names:
fullname = os.path.join(dir, name)
if os.path.isdir(fullname):
subdirs.append(name)
elif fnmatch.fnmatch(name, pat):
matchingfiles.append(name)
self.dirs.delete(0, END)
for name in subdirs:
self.dirs.insert(END, name)
self.files.delete(0, END)
for name in matchingfiles:
self.files.insert(END, name)
head, tail = os.path.split(self.get_selection())
if tail == os.curdir: tail = ''
self.set_selection(tail)
def get_filter(self):
filter = self.filter.get()
filter = os.path.expanduser(filter)
if filter[-1:] == os.sep or os.path.isdir(filter):
filter = os.path.join(filter, "*")
return os.path.split(filter)
def get_selection(self):
file = self.selection.get()
file = os.path.expanduser(file)
return file
def cancel_command(self, event=None):
self.quit()
def set_filter(self, dir, pat):
if not os.path.isabs(dir):
try:
pwd = os.getcwd()
except OSError:
pwd = None
if pwd:
dir = os.path.join(pwd, dir)
dir = os.path.normpath(dir)
self.filter.delete(0, END)
self.filter.insert(END, os.path.join(dir or os.curdir, pat or "*"))
def set_selection(self, file):
self.selection.delete(0, END)
self.selection.insert(END, os.path.join(self.directory, file))
class LoadFileDialog(FileDialog):
"""File selection dialog which checks that the file exists."""
title = "Load File Selection Dialog"
def ok_command(self):
file = self.get_selection()
if not os.path.isfile(file):
self.master.bell()
else:
self.quit(file)
class SaveFileDialog(FileDialog):
"""File selection dialog which checks that the file may be created."""
title = "Save File Selection Dialog"
def ok_command(self):
file = self.get_selection()
if os.path.exists(file):
if os.path.isdir(file):
self.master.bell()
return
d = Dialog(self.top,
title="Overwrite Existing File Question",
text="Overwrite existing file %r?" % (file,),
bitmap='questhead',
default=1,
strings=("Yes", "Cancel"))
if d.num != 0:
return
else:
head, tail = os.path.split(file)
if not os.path.isdir(head):
self.master.bell()
return
self.quit(file)
# For the following classes and modules:
#
# options (all have default values):
#
# - defaultextension: added to filename if not explicitly given
#
# - filetypes: sequence of (label, pattern) tuples. the same pattern
# may occur with several patterns. use "*" as pattern to indicate
# all files.
#
# - initialdir: initial directory. preserved by dialog instance.
#
# - initialfile: initial file (ignored by the open dialog). preserved
# by dialog instance.
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
# - multiple: if true user may select more than one file
#
# options for the directory chooser:
#
# - initialdir, parent, title: see above
#
# - mustexist: if true, user must pick an existing directory
#
class _Dialog(commondialog.Dialog):
def _fixoptions(self):
try:
# make sure "filetypes" is a tuple
self.options["filetypes"] = tuple(self.options["filetypes"])
except KeyError:
pass
def _fixresult(self, widget, result):
if result:
# keep directory and filename until next time
# convert Tcl path objects to strings
try:
result = result.string
except AttributeError:
# it already is a string
pass
path, file = os.path.split(result)
self.options["initialdir"] = path
self.options["initialfile"] = file
self.filename = result # compatibility
return result
#
# file dialogs
class Open(_Dialog):
"Ask for a filename to open"
command = "tk_getOpenFile"
def _fixresult(self, widget, result):
if isinstance(result, tuple):
# multiple results:
result = tuple([getattr(r, "string", r) for r in result])
if result:
path, file = os.path.split(result[0])
self.options["initialdir"] = path
# don't set initialfile or filename, as we have multiple of these
return result
if not widget.tk.wantobjects() and "multiple" in self.options:
# Need to split result explicitly
return self._fixresult(widget, widget.tk.splitlist(result))
return _Dialog._fixresult(self, widget, result)
class SaveAs(_Dialog):
"Ask for a filename to save as"
command = "tk_getSaveFile"
# the directory dialog has its own _fix routines.
class Directory(commondialog.Dialog):
"Ask for a directory"
command = "tk_chooseDirectory"
def _fixresult(self, widget, result):
if result:
# convert Tcl path objects to strings
try:
result = result.string
except AttributeError:
# it already is a string
pass
# keep directory until next time
self.options["initialdir"] = result
self.directory = result # compatibility
return result
#
# convenience stuff
def askopenfilename(**options):
"Ask for a filename to open"
return Open(**options).show()
def asksaveasfilename(**options):
"Ask for a filename to save as"
return SaveAs(**options).show()
def askopenfilenames(**options):
"""Ask for multiple filenames to open
Returns a list of filenames or empty list if
cancel button selected
"""
options["multiple"]=1
return Open(**options).show()
# FIXME: are the following perhaps a bit too convenient?
def askopenfile(mode = "r", **options):
"Ask for a filename to open, and returned the opened file"
filename = Open(**options).show()
if filename:
return open(filename, mode)
return None
def askopenfiles(mode = "r", **options):
"""Ask for multiple filenames and return the open file
objects
returns a list of open file objects or an empty list if
cancel selected
"""
files = askopenfilenames(**options)
if files:
ofiles=[]
for filename in files:
ofiles.append(open(filename, mode))
files=ofiles
return files
def asksaveasfile(mode = "w", **options):
"Ask for a filename to save as, and returned the opened file"
filename = SaveAs(**options).show()
if filename:
return open(filename, mode)
return None
def askdirectory (**options):
"Ask for a directory, and return the file name"
return Directory(**options).show()
# --------------------------------------------------------------------
# test stuff
def test():
"""Simple test program."""
root = Tk()
root.withdraw()
fd = LoadFileDialog(root)
loadfile = fd.go(key="test")
fd = SaveFileDialog(root)
savefile = fd.go(key="test")
print(loadfile, savefile)
# Since the file name may contain non-ASCII characters, we need
# to find an encoding that likely supports the file name, and
# displays correctly on the terminal.
# Start off with UTF-8
enc = "utf-8"
import sys
# See whether CODESET is defined
try:
import locale
locale.setlocale(locale.LC_ALL,'')
enc = locale.nl_langinfo(locale.CODESET)
except (ImportError, AttributeError):
pass
# dialog for openening files
openfilename=askopenfilename(filetypes=[("all files", "*")])
try:
fp=open(openfilename,"r")
fp.close()
except:
print("Could not open File: ")
print(sys.exc_info()[1])
print("open", openfilename.encode(enc))
# dialog for saving files
saveasfilename=asksaveasfilename()
print("saveas", saveasfilename.encode(enc))
if __name__ == '__main__':
test()
|
dgladkov/django
|
refs/heads/master
|
tests/order_with_respect_to/models.py
|
61
|
"""
Tests for the order_with_respect_to Meta attribute.
"""
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class Question(models.Model):
text = models.CharField(max_length=200)
@python_2_unicode_compatible
class Answer(models.Model):
text = models.CharField(max_length=200)
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = 'question'
def __str__(self):
return six.text_type(self.text)
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=200)
parent = models.ForeignKey("self", models.SET_NULL, related_name="children", null=True)
class Meta:
order_with_respect_to = "parent"
def __str__(self):
return self.title
# order_with_respect_to points to a model with a OneToOneField primary key.
class Entity(models.Model):
pass
class Dimension(models.Model):
entity = models.OneToOneField('Entity', primary_key=True, on_delete=models.CASCADE)
class Component(models.Model):
dimension = models.ForeignKey('Dimension', on_delete=models.CASCADE)
class Meta:
order_with_respect_to = 'dimension'
|
misterdanb/micropython
|
refs/heads/master
|
tests/basics/dict_setdefault.py
|
116
|
d = {}
print(d.setdefault(1))
print(d.setdefault(1))
print(d.setdefault(5, 42))
print(d.setdefault(5, 1))
print(d[1])
print(d[5])
d.pop(5)
print(d.setdefault(5, 1))
print(d[1])
print(d[5])
|
objmagic/heron
|
refs/heads/master
|
heron/common/src/python/utils/misc/pplan_helper.py
|
8
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''pplan_helper.py'''
import socket
from heron.proto import topology_pb2
from heron.common.src.python.utils.log import Log
from heron.common.src.python.utils.topology import TopologyContext, ICustomGrouping
import heron.common.src.python.pex_loader as pex_loader
from .custom_grouping_helper import CustomGroupingHelper
from .serializer import default_serializer
# pylint: disable=too-many-instance-attributes
class PhysicalPlanHelper(object):
"""Helper class for accessing Physical Plan
:ivar pplan: Physical Plan protobuf message
:ivar topology_pex_abs_path: Topology pex file's absolute path
:ivar my_instance_id: instance id for this instance
:ivar my_instance: Instance protobuf message for this instance
:ivar my_component_name: component name for this instance
:ivar my_task_id: global task id for this instance
:ivar is_spout: ``True`` if it's spout, ``False`` if it's bolt
:ivar hostname: hostname of this instance
:ivar my_component: Component protobuf message for this instance
:ivar context: Topology context if set, otherwise ``None``
"""
def __init__(self, pplan, instance_id, topology_pex_abs_path):
self.pplan = pplan
self.my_instance_id = instance_id
self.my_instance = None
self.topology_pex_abs_path = topology_pex_abs_path
# get my instance
for instance in pplan.instances:
if instance.instance_id == self.my_instance_id:
self.my_instance = instance
break
if self.my_instance is None:
raise RuntimeError("There was no instance that matched my id: %s" % self.my_instance_id)
self.my_component_name = self.my_instance.info.component_name
self.my_task_id = self.my_instance.info.task_id
# get spout or bolt
self._my_spbl, self.is_spout = self._get_my_spout_or_bolt(pplan.topology)
# Map <stream id -> number of fields in that stream's schema>
self._output_schema = dict()
outputs = self._my_spbl.outputs
# setup output schema
for out_stream in outputs:
self._output_schema[out_stream.stream.id] = len(out_stream.schema.keys)
self.hostname = socket.gethostname()
self.my_component = self._my_spbl.comp
self.context = None
# setup for custom grouping
self.custom_grouper = CustomGroupingHelper()
self._setup_custom_grouping(pplan.topology)
def _get_my_spout_or_bolt(self, topology):
my_spbl = None
for spbl in list(topology.spouts) + list(topology.bolts):
if spbl.comp.name == self.my_component_name:
if my_spbl is not None:
raise RuntimeError("Duplicate my component found")
my_spbl = spbl
if isinstance(my_spbl, topology_pb2.Spout):
is_spout = True
elif isinstance(my_spbl, topology_pb2.Bolt):
is_spout = False
else:
raise RuntimeError("My component neither spout nor bolt")
return my_spbl, is_spout
def check_output_schema(self, stream_id, tup):
"""Checks if a given stream_id and tuple matches with the output schema
:type stream_id: str
:param stream_id: stream id into which tuple is sent
:type tup: list
:param tup: tuple that is going to be sent
"""
# do some checking to make sure that the number of fields match what's expected
size = self._output_schema.get(stream_id, None)
if size is None:
raise RuntimeError("%s emitting to stream %s but was not declared in output fields"
% (self.my_component_name, stream_id))
elif size != len(tup):
raise RuntimeError("Number of fields emitted in stream %s does not match what's expected. "
"Expected: %s, Observed: %s" % (stream_id, size, len(tup)))
def get_my_spout(self):
"""Returns spout instance, or ``None`` if bolt is assigned"""
if self.is_spout:
return self._my_spbl
else:
return None
def get_my_bolt(self):
"""Returns bolt instance, or ``None`` if spout is assigned"""
if self.is_spout:
return None
else:
return self._my_spbl
def get_topology_state(self):
"""Returns the current topology state"""
return self.pplan.topology.state
def is_topology_running(self):
"""Checks whether topology is currently running"""
return self.pplan.topology.state == topology_pb2.TopologyState.Value("RUNNING")
def is_topology_paused(self):
"""Checks whether topology is currently paused"""
return self.pplan.topology.state == topology_pb2.TopologyState.Value("PAUSED")
def is_topology_killed(self):
"""Checks whether topology is already killed"""
return self.pplan.topology.state == topology_pb2.TopologyState.Value("KILLED")
def get_topology_config(self):
"""Returns the topology config"""
if self.pplan.topology.HasField("topology_config"):
return self._get_dict_from_config(self.pplan.topology.topology_config)
else:
return {}
def set_topology_context(self, metrics_collector):
"""Sets a new topology context"""
Log.debug("Setting topology context")
cluster_config = self.get_topology_config()
cluster_config.update(self._get_dict_from_config(self.my_component.config))
task_to_component_map = self._get_task_to_comp_map()
self.context = TopologyContext(cluster_config, self.pplan.topology, task_to_component_map,
self.my_task_id, metrics_collector, self.topology_pex_abs_path)
@staticmethod
def _get_dict_from_config(topology_config):
"""Converts Config protobuf message to python dictionary
Values are converted according to the rules below:
- Number string (e.g. "12" or "1.2") is appropriately converted to ``int`` or ``float``
- Boolean string ("true", "True", "false" or "False") is converted to built-in boolean type
(i.e. ``True`` or ``False``)
- Normal string is inserted to dict as is
- Serialized value is deserialized and inserted as a corresponding Python object
"""
config = {}
for kv in topology_config.kvs:
if kv.HasField("value"):
assert kv.type == topology_pb2.ConfigValueType.Value("STRING_VALUE")
# value is string
if PhysicalPlanHelper._is_number(kv.value):
config[kv.key] = PhysicalPlanHelper._get_number(kv.value)
elif kv.value.lower() in ("true", "false"):
config[kv.key] = True if kv.value.lower() == "true" else False
else:
config[kv.key] = kv.value
elif kv.HasField("serialized_value") and \
kv.type == topology_pb2.ConfigValueType.Value("PYTHON_SERIALIZED_VALUE"):
# deserialize that
config[kv.key] = default_serializer.deserialize(kv.serialized_value)
else:
assert kv.HasField("type")
Log.error("Unsupported config <key:value> found: %s, with type: %s"
% (str(kv), str(kv.type)))
continue
return config
@staticmethod
def _is_number(string):
try:
float(string)
return True
except ValueError:
return False
@staticmethod
def _get_number(string):
try:
return int(string)
except ValueError:
return float(string)
def _get_task_to_comp_map(self):
ret = {}
for instance in self.pplan.instances:
ret[instance.info.task_id] = instance.info.component_name
return ret
##### custom grouping related #####
def _setup_custom_grouping(self, topology):
"""Checks whether there are any bolts that consume any of my streams using custom grouping"""
for i in range(len(topology.bolts)):
for in_stream in topology.bolts[i].inputs:
if in_stream.stream.component_name == self.my_component_name and \
in_stream.gtype == topology_pb2.Grouping.Value("CUSTOM"):
# this bolt takes my output in custom grouping manner
if in_stream.type == topology_pb2.CustomGroupingObjectType.Value("PYTHON_OBJECT"):
grouping_class_path = default_serializer.deserialize(in_stream.custom_grouping_object)
pex_loader.load_pex(self.topology_pex_abs_path)
grouping_cls = \
pex_loader.import_and_get_class(self.topology_pex_abs_path, grouping_class_path)
custom_grouping_obj = grouping_cls()
assert isinstance(custom_grouping_obj, ICustomGrouping)
self.custom_grouper.add(in_stream.stream.id,
self._get_taskids_for_component(topology.bolts[i].comp.name),
custom_grouping_obj,
self.my_component_name)
elif in_stream.type == topology_pb2.CustomGroupingObjectType.Value("JAVA_OBJECT"):
raise NotImplementedError("Java-serialized custom grouping is not yet supported "
"for python topology")
else:
raise ValueError("Unrecognized custom grouping type found: %s" % str(in_stream.type))
def _get_taskids_for_component(self, component_name):
return [instance.info.task_id for instance in self.pplan.instances
if instance.info.component_name == component_name]
def prepare_custom_grouping(self, context):
"""Prepares for custom grouping for this component
:param context: Topology context
"""
self.custom_grouper.prepare(context)
def choose_tasks_for_custom_grouping(self, stream_id, values):
"""Choose target task ids for custom grouping
:return: task ids
"""
return self.custom_grouper.choose_tasks(stream_id, values)
|
slevenhagen/odoo-npg
|
refs/heads/8.0
|
addons/l10n_ca/__openerp__.py
|
260
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Canada - Accounting',
'version': '1.2',
'author': 'Savoir-faire Linux',
'website': 'http://www.savoirfairelinux.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the English and French - Canadian accounting chart in OpenERP.
===========================================================================================
Canadian accounting charts and localizations.
Fiscal positions
----------------
When considering taxes to be applied, it is the province where the delivery occurs that matters.
Therefore we decided to implement the most common case in the fiscal positions: delivery is the
responsibility of the supplier and done at the customer location.
Some examples:
1) You have a customer from another province and you deliver to his location.
On the customer, set the fiscal position to his province.
2) You have a customer from another province. However this customer comes to your location
with their truck to pick up products. On the customer, do not set any fiscal position.
3) An international supplier doesn't charge you any tax. Taxes are charged at customs
by the customs broker. On the supplier, set the fiscal position to International.
4) An international supplier charge you your provincial tax. They are registered with your
provincial government and remit taxes themselves. On the supplier, do not set any fiscal
position.
""",
'depends': [
'base',
'account',
'base_iban',
'base_vat',
'account_chart',
'account_anglo_saxon'
],
'data': [
'account_chart_en.xml',
'account_tax_code_en.xml',
'account_chart_template_en.xml',
'account_tax_en.xml',
'fiscal_templates_en.xml',
'account_chart_fr.xml',
'account_tax_code_fr.xml',
'account_chart_template_fr.xml',
'account_tax_fr.xml',
'fiscal_templates_fr.xml',
'l10n_ca_wizard.xml'
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
StefanRijnhart/odoo
|
refs/heads/master
|
addons/website_sale_delivery/__openerp__.py
|
321
|
{
'name': 'eCommerce Delivery',
'category': 'Website',
'summary': 'Add Delivery Costs to Online Sales',
'website': 'https://www.odoo.com/page/e-commerce',
'version': '1.0',
'description': """
Delivery Costs
==============
""",
'author': 'OpenERP SA',
'depends': ['website_sale', 'delivery'],
'data': [
'views/website_sale_delivery.xml',
'views/website_sale_delivery_view.xml',
'security/ir.model.access.csv',
],
'demo': [],
'qweb': [],
'installable': True,
}
|
oculusstorystudio/kraken
|
refs/heads/develop_OSS
|
Python/kraken/core/maths/mat33.py
|
1
|
"""Kraken - maths.matrix module.
Classes:
Mat33 -- Matrix 3 transform object.
"""
from kraken.core.kraken_system import ks
from kraken.core.maths.math_object import MathObject
from kraken.core.maths.vec3 import Vec3
class Mat33(MathObject):
"""3x3 Matrix object."""
def __init__(self, row0=None, row1=None, row2=None):
"""Initialize and set values in the 3x3 matrix."""
super(Mat33, self).__init__()
if ks.getRTValTypeName(row0) == 'Mat33':
self._rtval = row0
else:
self._rtval = ks.rtVal('Mat33')
if isinstance(row0, Mat33):
self.setRows(row0=row0.row0, row1=row0.row1, row2=row0.row2)
elif row0 is not None and row1 is not None and row2 is not None:
self.setRows(row0, row1, row2)
def __str__(self):
"""Return a string representation of the 3x3 matrix."""
return "Mat33(" + str(self.row0) + ", " + str(self.row1) + ", " + str(self.row2) + ")"
@property
def row0(self):
"""Gets row 0 of this matrix.
Returns:
Vec3: Row 0 vector.
"""
return Vec3(self._rtval.row0)
@row0.setter
def row0(self, value):
"""Sets row 0 as the input vector.
Args:
value (Vec3): Vector to set row 0 as.
Returns:
bool: True if successful.
"""
self._rtval.row0 = ks.rtVal('Vec3', value)
return True
@property
def row1(self):
"""Gets row 1 of this matrix.
Returns:
Vec3: row 1 vector.
"""
return Vec3(self._rtval.row1)
@row1.setter
def row1(self, value):
"""Sets row 1 as the input vector.
Args:
value (Vec3): Vector to set row 1 as.
Returns:
bool: True if successful.
"""
self._rtval.row1 = ks.rtVal('Vec3', value)
return True
@property
def row2(self):
"""Gets row 2 of this matrix.
Returns:
Vec3: row 2 vector.
"""
return Vec3(self._rtval.row2)
@row2.setter
def row2(self, value):
"""Sets row 2 as the input vector.
Args:
value (Vec3): Vector to set row 2 as.
Returns:
bool: True if successful.
"""
self._rtval.row2 = ks.rtVal('Vec3', value)
return True
def __eq__(self, other):
return self.equal(other)
def __ne__(self, other):
return not self.equal(other)
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.subtract(other)
def __mul__(self, other):
return self.multiply(other)
def clone(self):
"""Returns a clone of the Mat33.
Returns:
Mat33: The cloned Mat33.
"""
mat33 = Mat33()
mat33.row0 = self.row0.clone()
mat33.row1 = self.row1.clone()
mat33.row2 = self.row2.clone()
return mat33
def setRows(self, row0, row1, row2):
"""Setter from vectors, row-wise.
Args:
row0 (Vec3): Vector to use to set row 0.
row1 (Vec3): Vector to use to set row 1.
row2 (Vec3): Vector to use to set row 2.
Returns:
bool: True if successful.
"""
self._rtval.setRows('', ks.rtVal('Vec3', row0), ks.rtVal('Vec3', row1), ks.rtVal('Vec3', row2))
return True
def setColumns(self, col0, col1, col2):
"""Setter from vectors, column-wise.
Args:
col0 (Vec3): Vector to use to set column 0.
col1 (Vec3): Vector to use to set column 1.
col2 (Vec3): Vector to use to set column 2.
Returns:
bool: True if successful.
"""
self._rtval.setColumns('', ks.rtVal('Vec3', col0), ks.rtVal('Vec3', col1), ks.rtVal('Vec3', col2))
return True
def setNull(self):
"""Setting all components of the matrix to 0.0.
Returns:
bool: True if successful.
"""
self._rtval.setNull('')
return True
def setIdentity(self):
"""Sets this matrix to the identity matrix.
Returns:
bool: True if successful.
"""
self._rtval.setIdentity('')
return True
def setDiagonal(self, v):
"""Sets the diagonal components of this matrix to a scalar.
Args:
v (float): value to set diagonals to.
Returns:
bool: True if successful.
"""
self._rtval.setDiagonal('', ks.rtVal('Scalar', v))
return True
def setDiagonalVec3(self, v):
"""Sets the diagonal components of this matrix to the components of a
vector.
Args:
v (Vec3): Vector to set diagonals to.
Returns:
bool: True if successful.
"""
self._rtval.setDiagonal('', ks.rtVal('Vec3', v))
return True
def equal(self, other):
"""Checks equality of this Matrix33 with another.
Args:
other (Mat33): Other matrix to check equality with.
Returns:
bool: True if equal.
"""
return self._rtval.equal('Boolean', ks.rtVal('Mat33', other)).getSimpleType()
def almostEqual(self, other, precision=None):
"""Checks almost equality of this Matrix33 with another.
Args:
other (Mat33): Other matrix to check equality with.
precision (float): precision value.
Returns:
bool: True if almost equal.
"""
if precision is not None:
return self._rtval.almostEqual('Boolean', ks.rtVal('Mat33', other), ks.rtVal('Scalar', precision)).getSimpleType()
else:
return self._rtval.almostEqual('Boolean', ks.rtVal('Mat33', other)).getSimpleType()
def add(self, other):
"""Overload method for the add operator.
Args:
other (Mat33): Other matrix to add to this one.
Returns:
Mat33: New Mat33 of the sum of the two Mat33's.
"""
return Mat33(self._rtval.add('Mat33', ks.rtVal('Mat33', other)))
def subtract(self, other):
"""Overload method for the subtract operator.
Args:
other (Mat33): Other matrix to subtract from this one.
Returns:
Mat33: New Mat33 of the difference of the two Mat33's.
"""
return Mat33(self._rtval.subtract('Mat33', ks.rtVal('Mat33', other)))
def multiply(self, other):
"""Overload method for the multiply operator.
Args:
other (Mat33): Other matrix to multiply from this one.
Returns:
Mat33: New Mat33 of the product of the two Mat33's.
"""
return Mat33(self._rtval.multiply('Mat33', ks.rtVal('Mat33', other)))
def multiplyScalar(self, other):
"""Product of this matrix and a scalar.
Args:
other (float): scalar value to multiply this matrix by.
Returns:
Mat33: Product of the multiplication of the scalar and this matrix.
"""
return Mat33(self._rtval.multiplyScalar('Mat33', ks.rtVal('Scalar', other)))
def multiplyVector(self, other):
"""Returns the product of this matrix and a vector.
Args:
other (Vec3): Vector to multiply this matrix by.
Returns:
Vec3: product of the multiplication of the Vec3 and this matrix.
"""
return Vec3(self._rtval.multiplyVector('Vec3', ks.rtVal('Vec3', other)))
def divideScalar(self, other):
"""Divides this matrix and a scalar.
Args:
other (float): value to divide this matrix by.
Returns:
Mat33: Quotient of the division of the matrix by the scalar.
"""
return Mat33(self._rtval.divideScalar('Mat33', other))
def determinant(self):
"""Gets the determinant of this matrix.
Returns:
float: Determinant of this matrix.
"""
return self._rtval.determinant('Scalar').getSimpleType()
def adjoint(self):
"""Gets the adjoint matrix of this matrix.
Returns:
Mat33: Adjoint of this matrix.
"""
return Mat33(self._rtval.adjoint('Mat33'))
def inverse(self):
"""Get the inverse matrix of this matrix.
Returns:
Mat33: Inverse of this matrix.
"""
return Mat33(self._rtval.inverse('Mat33'))
def inverse_safe(self):
"""Get the inverse matrix of this matrix, always checking the
determinant value.
Returns:
Mat33: Safe inverse of this matrix.
"""
return Mat33(self._rtval.inverse_safe('Mat33'))
def transpose(self):
"""Get the transposed matrix of this matrix.
Returns:
Mat33: Transpose of this matrix.
"""
return Mat33(self._rtval.transpose('Mat33'))
|
chienlieu2017/it_management
|
refs/heads/master
|
odoo/addons/pos_discount/__init__.py
|
1516
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
|
Chasego/codi
|
refs/heads/master
|
leetcode/209-Minimum-Size-Subarray-Sum/MinSizeSubarrSum_001.py
|
5
|
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
i, j = 0, 0
tmp, res = 0, len(nums)
if sum(nums) < s:
return 0
while j < len(nums):
if tmp < s:
tmp += nums[j]
j += 1
else:
while tmp >= s:
tmp -= nums[i]
i += 1
if j - i + 1 < res:
res = j - i + 1
while tmp >= s:
tmp -= nums[i]
i += 1
if j - i + 1 < res:
res = j - i + 1
return res
|
HonzaKral/django
|
refs/heads/master
|
tests/utils_tests/test_safestring.py
|
278
|
from __future__ import unicode_literals
from django.template import Context, Template
from django.test import SimpleTestCase
from django.utils import html, six, text
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import lazy
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
lazystr = lazy(force_text, six.text_type)
lazybytes = lazy(force_bytes, bytes)
class customescape(six.text_type):
def __html__(self):
# implement specific and obviously wrong escaping
# in order to be able to tell for sure when it runs
return self.replace('<', '<<').replace('>', '>>')
class SafeStringTest(SimpleTestCase):
def assertRenderEqual(self, tpl, expected, **context):
context = Context(context)
tpl = Template(tpl)
self.assertEqual(tpl.render(context), expected)
def test_mark_safe(self):
s = mark_safe('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s|force_escape }}', 'a&b', s=s)
def test_mark_safe_object_implementing_dunder_html(self):
e = customescape('<a&b>')
s = mark_safe(e)
self.assertIs(s, e)
self.assertRenderEqual('{{ s }}', '<<a&b>>', s=s)
self.assertRenderEqual('{{ s|force_escape }}', '<a&b>', s=s)
def test_mark_safe_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_safe(s), SafeData)
self.assertIsInstance(mark_safe(b), SafeData)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_safe(s))
def test_mark_safe_object_implementing_dunder_str(self):
class Obj(object):
def __str__(self):
return '<obj>'
s = mark_safe(Obj())
self.assertRenderEqual('{{ s }}', '<obj>', s=s)
def test_mark_safe_result_implements_dunder_html(self):
self.assertEqual(mark_safe('a&b').__html__(), 'a&b')
def test_mark_safe_lazy_result_implements_dunder_html(self):
self.assertEqual(mark_safe(lazystr('a&b')).__html__(), 'a&b')
def test_mark_for_escaping(self):
s = mark_for_escaping('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_for_escaping(s))
def test_mark_for_escaping_object_implementing_dunder_html(self):
e = customescape('<a&b>')
s = mark_for_escaping(e)
self.assertIs(s, e)
self.assertRenderEqual('{{ s }}', '<<a&b>>', s=s)
self.assertRenderEqual('{{ s|force_escape }}', '<a&b>', s=s)
def test_mark_for_escaping_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_for_escaping(s), EscapeData)
self.assertIsInstance(mark_for_escaping(b), EscapeData)
self.assertRenderEqual('{% autoescape off %}{{ s }}{% endautoescape %}', 'a&b', s=mark_for_escaping(s))
def test_mark_for_escaping_object_implementing_dunder_str(self):
class Obj(object):
def __str__(self):
return '<obj>'
s = mark_for_escaping(Obj())
self.assertRenderEqual('{{ s }}', '<obj>', s=s)
def test_add_lazy_safe_text_and_safe_text(self):
s = html.escape(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
s = html.escapejs(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
s = text.slugify(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
|
BirkbeckCTP/janeway
|
refs/heads/master
|
src/core/templatetags/article.py
|
1
|
from django import template
register = template.Library()
@register.simple_tag()
def article_active_user_review(request, article):
return article.active_review_request_for_user(request.user)
|
vnsofthe/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_invoice_state.py
|
340
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_invoice_confirm(osv.osv_memory):
"""
This wizard will confirm the all the selected draft invoices
"""
_name = "account.invoice.confirm"
_description = "Confirm the selected invoices"
def invoice_confirm(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_ids = context.get('active_ids', []) or []
proxy = self.pool['account.invoice']
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('draft', 'proforma', 'proforma2'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be confirmed as they are not in 'Draft' or 'Pro-Forma' state."))
record.signal_workflow('invoice_open')
return {'type': 'ir.actions.act_window_close'}
class account_invoice_cancel(osv.osv_memory):
"""
This wizard will cancel the all the selected invoices.
If in the journal, the option allow cancelling entry is not selected then it will give warning message.
"""
_name = "account.invoice.cancel"
_description = "Cancel the Selected Invoices"
def invoice_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
proxy = self.pool['account.invoice']
active_ids = context.get('active_ids', []) or []
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state in ('cancel','paid'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be cancelled as they are already in 'Cancelled' or 'Done' state."))
record.signal_workflow('invoice_cancel')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ODM2/ODMToolsPython
|
refs/heads/master
|
odmtools/gui/pnlScript.py
|
1
|
import os
import wx
from wx.lib.pubsub import pub as Publisher
from odmtools.controller import odmHighlightSTC
ID_NEW = 101
ID_OPEN = 102
ID_SAVE = 103
ID_SAVE_AS = 104
ID_EXECUTE_BUTTON = 300
ID_EXECUTE_SELECTION_BUTTON = 301
ID_EXECUTE_LINE_BUTTON = 302
#wildcard = "Python Source (*.py, .py)|*" #All files (*.*)|*.*"
wildcard = "Python source (*.py)|*.py|" \
"All files (*.*)|*.*"
class pnlScript(wx.Panel):
def __init__(self, parent, id=wx.ID_ANY, name="", pos=(0, 0), size=(200, 200)):
#super(pnlScript, self).__init__(parent, id, name=name, pos=pos, size=size, style=0)
wx.Panel.__init__(self, parent, id)
self.console = parent.txtPythonConsole
self.control = odmHighlightSTC.highlightSTC(self)
self.parent = parent
# self.control = stc.StyledTextCtrl(self, 1, style=wx.TE_MULTILINE)
# Set up menu
#filemenu = wx.Menu()
# use ID_ for future easy reference -- much better than "48", "404", etc.
# The & character indicates the shortcut key
#filemenu.Append(ID_NEW, "&New", "New file")
#filemenu.Append(ID_OPEN, "&Open Existing", "Append to an existing file")
#filemenu.AppendSeparator()
#filemenu.Append(ID_SAVE, "&Save", " Save current file")
#filemenu.Append(ID_SAVE_AS, "Save &As...", " Save to specific file")
# create the menubar
#menuBar = wx.MenuBar()
#menuBar.Append(filemenu, "&File")
#self.SetMenuBar(menuBar)
#wx.EVT_MENU(self, ID_NEW, self.OnNew)
#wx.EVT_MENU(self, ID_OPEN, self.OnOpen)
#wx.EVT_MENU(self, ID_SAVE, self.OnSave)
#wx.EVT_MENU(self, ID_SAVE_AS, self.OnSaveAs)
# Set up execute buttons
self.sizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.executeButton = wx.Button(self, ID_EXECUTE_BUTTON, "&Execute")
self.executeButton.Bind(wx.EVT_BUTTON, self.OnExecute)
self.sizer2.Add(self.executeButton, 1, wx.ALIGN_LEFT)
self.executeSelectionButton = wx.Button(self, ID_EXECUTE_SELECTION_BUTTON, "Execute &Selection")
self.executeSelectionButton.Bind(wx.EVT_BUTTON, self.OnExecuteSelection)
self.sizer2.Add(self.executeSelectionButton, 1, wx.ALIGN_LEFT)
self.executeLineButton = wx.Button(self, ID_EXECUTE_LINE_BUTTON, "Execute &Line")
self.executeLineButton.Bind(wx.EVT_BUTTON, self.OnExecuteLine)
self.sizer2.Add(self.executeLineButton, 1, wx.ALIGN_LEFT)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.sizer2, 0, wx.EXPAND)
self.sizer.Add(self.control, 1, wx.EXPAND)
self.SetSizer(self.sizer)
# self.SetAutoLayout(1)
self.sizer.Fit(self)
self.dirname = ''
self.filename = ''
self._styles = [None] * 32
self._free = 1
def newScript(self):
self.filename = ''
self.control.SetText('')
# self.SetTitle("Editing a new file")
Publisher.sendMessage("script.title", title="Editing a new file")
record_service = self.parent.getRecordService()
print"Parent=%s" % self.parent
record_service.write_header()
def getOverwriteDialog(self):
return wx.MessageBox("Please check that your script has been saved before it is overwritten. "
"Would you like to save it now? \n\nSelecting 'No' will delete anything you "
"may have in the script", 'Save Script?', wx.CANCEL | wx.YES_NO | wx.ICON_EXCLAMATION | wx.NO_DEFAULT)
def OnNew(self, e):
## Check if data already exists
if len(self.control.GetText()) > 0:
val = self.getOverwriteDialog()
if val == wx.YES:
if self.OnSaveAs(e):
self.newScript()
elif val == wx.NO:
self.newScript()
else:
pass
else:
self.newScript()
def OnOpen(self, e):
## Check if data already exists
if len(self.control.GetText()) > 0:
val = self.getOverwriteDialog()
if val == wx.YES:
self.OnSaveAs(e)
elif val == wx.CANCEL:
return
elif val == wx.NO:
pass
dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", wildcard, wx.OPEN | wx.CHANGE_DIR | wx.MULTIPLE )
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
# Open the file and set its contents into the edit window
filehandle = open(os.path.join(self.dirname, self.filename), 'r')
if filehandle:
self.control.SetText(filehandle.read())
self.control.EmptyUndoBuffer()
filehandle.close()
# self.SetTitle("Editing: %s" % self.filename)
Publisher.sendMessage("script.title", title="Editing: %s" % self.filename)
else:
pass
dlg.Destroy()
def OnSave(self, e):
if self.filename:
self.OnSaveAs(e)
else:
saved_text = self.control.GetText()
filehandle = open(os.path.join(self.dirname, self.filename), 'w')
filehandle.write(saved_text)
filehandle.close()
self.setTitle("Editing: %s" % self.filename)
def OnSaveAs(self, e):
dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", wildcard, wx.SAVE | wx.OVERWRITE_PROMPT)
result = dlg.ShowModal()
if result == wx.ID_OK:
saved_text = self.control.GetText()
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
filehandle = open(os.path.join(self.dirname, self.filename), 'w')
filehandle.write(saved_text)
filehandle.close()
# self.SetTitle("Editing: %s" % self.filename)
self.setTitle("Editing: %s" % self.filename)
dlg.Destroy()
return True
elif result == wx.ID_CANCEL:
dlg.Destroy()
return False
def runCommand(self, text):
# get ahold of record service and turn it off do i need a publisher command?
self.parent.record_service.toggle_record(False)
for line in text.splitlines():#("\n"):
self.console.shell.run(line)
#self.console.shell.run("\n")
self.parent.record_service.toggle_record(True)
#restart recording
def OnExecute(self, e):
self.runCommand(self.control.GetText())
# l1 = len(self.control.GetText().split('\n'))
# l2 = len(self.control.GetText().split('\r'))
#
# print("length by '\\n': %s \n length by '\\r': %s"%(l1,l2))
# for l in self.control.GetText().split('\n'):
# self.runCommand(l)
def OnExecuteSelection(self, e):
self.runCommand(self.control.GetSelectedTextRaw())
# for l in self.control.GetSelectedTextRaw().split('\n'):
# self.runCommand(l)
def OnExecuteLine(self, e):
text = self.control.GetSelectedTextRaw()
if text == "":
text = self.control.GetLine(self.control.GetCurrentLine())
self.runCommand(text)
def newKeyPressed(self):
if self.filename:
title = "Editing: %s*" % self.filename
self.setTitle(title)
def setTitle(self, title):
Publisher.sendMessage("script.title", title=title)
def getStyle(self, c='black'):
"""
Returns a style for a given colour if one exists. If no style
exists for the colour, make a new style.
If we run out of styles, (only 32 allowed here) we go to the top
of the list and reuse previous styles.
"""
free = self._free
if c and isinstance(c, (str, unicode)):
c = c.lower()
else:
c = 'black'
try:
style = self._styles.index(c)
return style
except ValueError:
style = free
self._styles[style] = c
self.control.StyleSetForeground(style, wx.NamedColour(c))
free += 1
if free > 31:
free = 0
self._free = free
return style
def write(self, text, c=None):
"""
Add the text to the end of the control using color c which
should be suitable for feeding directly to wx.NamedColour.
'text' should be a unicode string or contain only ascii data.
"""
style = self.getStyle(c)
lenText = len(text.encode('utf8'))
end = self.control.GetLength()
## self.control.DocumentEnd()
self.control.AppendText(text)
## self.control.AddStyledText(text)
self.control.StartStyling(end, 31)
self.control.SetStyling(lenText, style)
self.control.EnsureCaretVisible()
self.control.onUpdateUI(None)
__call__ = write
|
marco-lancini/Showcase
|
refs/heads/master
|
django/middleware/common.py
|
157
|
import re
from django.conf import settings
from django import http
from django.core.mail import mail_managers
from django.utils.http import urlquote
from django.core import urlresolvers
from django.utils.hashcompat import md5_constructor
from django.utils.log import getLogger
logger = getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s' % request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not _is_valid_path(request.path_info, urlconf) and
_is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError, (""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1])
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.is_secure() and 'https' or 'http',
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.GET:
newurl += '?' + request.META['QUERY_STRING']
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"Send broken link emails and calculate the Etag, if needed."
if response.status_code == 404:
if settings.SEND_BROKEN_LINK_EMAILS and not settings.DEBUG:
# If the referrer was from an internal link or a non-search-engine site,
# send a note to the managers.
domain = request.get_host()
referer = request.META.get('HTTP_REFERER', None)
is_internal = _is_internal_request(domain, referer)
path = request.get_full_path()
if referer and not _is_ignorable_404(path) and (is_internal or '?' not in referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers("Broken %slink on %s" % ((is_internal and 'INTERNAL ' or ''), domain),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\nIP address: %s\n" \
% (referer, request.get_full_path(), ua, ip),
fail_silently=True)
return response
# Use ETags, if requested.
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
else:
etag = '"%s"' % md5_constructor(response.content).hexdigest()
if response.status_code >= 200 and response.status_code < 300 and request.META.get('HTTP_IF_NONE_MATCH') == etag:
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
def _is_ignorable_404(uri):
"""
Returns True if a 404 at the given URL *shouldn't* notify the site managers.
"""
for start in settings.IGNORABLE_404_STARTS:
if uri.startswith(start):
return True
for end in settings.IGNORABLE_404_ENDS:
if uri.endswith(end):
return True
return False
def _is_internal_request(domain, referer):
"""
Returns true if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return referer is not None and re.match("^https?://%s/" % re.escape(domain), referer)
def _is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
urlresolvers.resolve(path, urlconf)
return True
except urlresolvers.Resolver404:
return False
|
MoRgUiJu/morguiju.repo
|
refs/heads/master
|
plugin.video.pelisalacarta/core/logger.py
|
4
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------------------
# Logger (kodi)
#------------------------------------------------------------
from core import config
loggeractive = (config.get_setting("debug")=="true")
import xbmc
def log_enable(active):
global loggeractive
loggeractive = active
def info(texto):
if loggeractive:
try:
xbmc.log(texto)
except:
# FIXME: ¿Esto de que falle al poner un log no se puede resolver con un encode("ascii",errors="ignore") ?
validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~."
stripped = ''.join(c for c in texto if c in validchars)
xbmc.log("(stripped) "+stripped)
def debug(texto):
if loggeractive:
try:
import inspect
import os
last=inspect.stack()[1]
modulo= os.path.basename(os.path.splitext(last[1])[0])
funcion= last [3]
texto= " [" + modulo + "." + funcion + "] " + texto
xbmc.log("######## DEBUG #########")
xbmc.log(texto)
except:
validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~."
stripped = ''.join(c for c in texto if c in validchars)
xbmc.log("(stripped) "+stripped)
def error(texto):
if loggeractive:
try:
import inspect
import os
last=inspect.stack()[1]
modulo= os.path.basename(os.path.splitext(last[1])[0])
funcion= last [3]
texto= " [" + modulo + "." + funcion + "] " + texto
xbmc.log("######## ERROR #########")
xbmc.log(texto)
except:
validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~."
stripped = ''.join(c for c in texto if c in validchars)
xbmc.log("(stripped) "+stripped)
|
chugle/myapp
|
refs/heads/master
|
gluon/html.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import cgi
import os
import re
import copy
import types
import urllib
import base64
import sanitizer
import itertools
import decoder
import copy_reg
import cPickle
import marshal
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
from storage import Storage
from utils import web2py_uuid, simple_hash, compare
from highlight import highlight
regex_crlf = re.compile('\r|\n')
join = ''.join
# name2codepoint is incomplete respect to xhtml (and xml): 'apos' is missing.
entitydefs = dict(map(lambda (
k, v): (k, unichr(v).encode('utf-8')), name2codepoint.iteritems()))
entitydefs.setdefault('apos', u"'".encode('utf-8'))
__all__ = [
'A',
'B',
'BEAUTIFY',
'BODY',
'BR',
'BUTTON',
'CENTER',
'CAT',
'CODE',
'COL',
'COLGROUP',
'DIV',
'EM',
'EMBED',
'FIELDSET',
'FORM',
'H1',
'H2',
'H3',
'H4',
'H5',
'H6',
'HEAD',
'HR',
'HTML',
'I',
'IFRAME',
'IMG',
'INPUT',
'LABEL',
'LEGEND',
'LI',
'LINK',
'OL',
'UL',
'MARKMIN',
'MENU',
'META',
'OBJECT',
'ON',
'OPTION',
'P',
'PRE',
'SCRIPT',
'OPTGROUP',
'SELECT',
'SPAN',
'STRONG',
'STYLE',
'TABLE',
'TAG',
'TD',
'TEXTAREA',
'TH',
'THEAD',
'TBODY',
'TFOOT',
'TITLE',
'TR',
'TT',
'URL',
'XHTML',
'XML',
'xmlescape',
'embed64',
]
def xmlescape(data, quote=True):
"""
returns an escaped string of the provided data
:param data: the data to be escaped
:param quote: optional (default False)
"""
# first try the xml function
if hasattr(data, 'xml') and callable(data.xml):
return data.xml()
# otherwise, make it a string
if not isinstance(data, (str, unicode)):
data = str(data)
elif isinstance(data, unicode):
data = data.encode('utf8', 'xmlcharrefreplace')
# ... and do the escaping
data = cgi.escape(data, quote).replace("'", "'")
return data
def call_as_list(f,*a,**b):
if not isinstance(f, (list,tuple)):
f = [f]
for item in f:
item(*a,**b)
def truncate_string(text, length, dots='...'):
text = text.decode('utf-8')
if len(text) > length:
text = text[:length - len(dots)].encode('utf-8') + dots
return text
def URL(
a=None,
c=None,
f=None,
r=None,
args=None,
vars=None,
anchor='',
extension=None,
env=None,
hmac_key=None,
hash_vars=True,
salt=None,
user_signature=None,
scheme=None,
host=None,
port=None,
encode_embedded_slash=False,
url_encode=True
):
"""
generate a URL
example::
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':1, 'q':2}, anchor='1'))
'/a/c/f/x/y/z?p=1&q=2#1'
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':(1,3), 'q':2}, anchor='1'))
'/a/c/f/x/y/z?p=1&p=3&q=2#1'
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':(3,1), 'q':2}, anchor='1'))
'/a/c/f/x/y/z?p=3&p=1&q=2#1'
>>> str(URL(a='a', c='c', f='f', anchor='1+2'))
'/a/c/f#1%2B2'
>>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'],
... vars={'p':(1,3), 'q':2}, anchor='1', hmac_key='key'))
'/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f#1'
>>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z']))
'/a/c/f/w/x/y/z'
>>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z'], encode_embedded_slash=True))
'/a/c/f/w%2Fx/y%2Fz'
>>> str(URL(a='a', c='c', f='f', args=['%(id)d'], url_encode=False))
'/a/c/f/%(id)d'
>>> str(URL(a='a', c='c', f='f', args=['%(id)d'], url_encode=True))
'/a/c/f/%25%28id%29d'
>>> str(URL(a='a', c='c', f='f', vars={'id' : '%(id)d' }, url_encode=False))
'/a/c/f?id=%(id)d'
>>> str(URL(a='a', c='c', f='f', vars={'id' : '%(id)d' }, url_encode=True))
'/a/c/f?id=%25%28id%29d'
>>> str(URL(a='a', c='c', f='f', anchor='%(id)d', url_encode=False))
'/a/c/f#%(id)d'
>>> str(URL(a='a', c='c', f='f', anchor='%(id)d', url_encode=True))
'/a/c/f#%25%28id%29d'
generates a url '/a/c/f' corresponding to application a, controller c
and function f. If r=request is passed, a, c, f are set, respectively,
to r.application, r.controller, r.function.
The more typical usage is:
URL(r=request, f='index') that generates a url for the index function
within the present application and controller.
:param a: application (default to current if r is given)
:param c: controller (default to current if r is given)
:param f: function (default to current if r is given)
:param r: request (optional)
:param args: any arguments (optional)
:param vars: any variables (optional)
:param anchor: anchorname, without # (optional)
:param hmac_key: key to use when generating hmac signature (optional)
:param hash_vars: which of the vars to include in our hmac signature
True (default) - hash all vars, False - hash none of the vars,
iterable - hash only the included vars ['key1','key2']
:param scheme: URI scheme (True, 'http' or 'https', etc); forces absolute URL (optional)
:param host: string to force absolute URL with host (True means http_host)
:param port: optional port number (forces absolute URL)
:raises SyntaxError: when no application, controller or function is
available
:raises SyntaxError: when a CRLF is found in the generated url
"""
from rewrite import url_out # done here in case used not-in web2py
if args in (None, []):
args = []
vars = vars or {}
application = None
controller = None
function = None
if not isinstance(args, (list, tuple)):
args = [args]
if not r:
if a and not c and not f:
(f, a, c) = (a, c, f)
elif a and c and not f:
(c, f, a) = (a, c, f)
from globals import current
if hasattr(current, 'request'):
r = current.request
if r:
application = r.application
controller = r.controller
function = r.function
env = r.env
if extension is None and r.extension != 'html':
extension = r.extension
if a:
application = a
if c:
controller = c
if f:
if not isinstance(f, str):
if hasattr(f, '__name__'):
function = f.__name__
else:
raise SyntaxError(
'when calling URL, function or function name required')
elif '/' in f:
if f.startswith("/"):
f = f[1:]
items = f.split('/')
function = f = items[0]
args = items[1:] + args
else:
function = f
# if the url gets a static resource, don't force extention
if controller == 'static':
extension = None
if '.' in function:
function, extension = function.rsplit('.', 1)
function2 = '%s.%s' % (function, extension or 'html')
if not (application and controller and function):
raise SyntaxError('not enough information to build the url (%s %s %s)' % (application, controller, function))
if args:
if url_encode:
if encode_embedded_slash:
other = '/' + '/'.join([urllib.quote(str(
x), '') for x in args])
else:
other = args and urllib.quote(
'/' + '/'.join([str(x) for x in args]))
else:
other = args and ('/' + '/'.join([str(x) for x in args]))
else:
other = ''
if other.endswith('/'):
other += '/' # add trailing slash to make last trailing empty arg explicit
list_vars = []
for (key, vals) in sorted(vars.items()):
if key == '_signature':
continue
if not isinstance(vals, (list, tuple)):
vals = [vals]
for val in vals:
list_vars.append((key, val))
if user_signature:
from globals import current
if current.session.auth:
hmac_key = current.session.auth.hmac_key
if hmac_key:
# generate an hmac signature of the vars & args so can later
# verify the user hasn't messed with anything
h_args = '/%s/%s/%s%s' % (application, controller, function2, other)
# how many of the vars should we include in our hash?
if hash_vars is True: # include them all
h_vars = list_vars
elif hash_vars is False: # include none of them
h_vars = ''
else: # include just those specified
if hash_vars and not isinstance(hash_vars, (list, tuple)):
hash_vars = [hash_vars]
h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars]
# re-assembling the same way during hash authentication
message = h_args + '?' + urllib.urlencode(sorted(h_vars))
sig = simple_hash(
message, hmac_key or '', salt or '', digest_alg='sha1')
# add the signature into vars
list_vars.append(('_signature', sig))
if list_vars:
if url_encode:
other += '?%s' % urllib.urlencode(list_vars)
else:
other += '?%s' % '&'.join(['%s=%s' % var[:2] for var in list_vars])
if anchor:
if url_encode:
other += '#' + urllib.quote(str(anchor))
else:
other += '#' + (str(anchor))
if extension:
function += '.' + extension
if regex_crlf.search(join([application, controller, function, other])):
raise SyntaxError('CRLF Injection Detected')
url = url_out(r, env, application, controller, function,
args, other, scheme, host, port)
return url
def verifyURL(request, hmac_key=None, hash_vars=True, salt=None, user_signature=None):
"""
Verifies that a request's args & vars have not been tampered with by the user
:param request: web2py's request object
:param hmac_key: the key to authenticate with, must be the same one previously
used when calling URL()
:param hash_vars: which vars to include in our hashing. (Optional)
Only uses the 1st value currently
True (or undefined) means all, False none,
an iterable just the specified keys
do not call directly. Use instead:
URL.verify(hmac_key='...')
the key has to match the one used to generate the URL.
>>> r = Storage()
>>> gv = Storage(p=(1,3),q=2,_signature='a32530f0d0caa80964bb92aad2bedf8a4486a31f')
>>> r.update(dict(application='a', controller='c', function='f', extension='html'))
>>> r['args'] = ['x', 'y', 'z']
>>> r['get_vars'] = gv
>>> verifyURL(r, 'key')
True
>>> verifyURL(r, 'kay')
False
>>> r.get_vars.p = (3, 1)
>>> verifyURL(r, 'key')
True
>>> r.get_vars.p = (3, 2)
>>> verifyURL(r, 'key')
False
"""
if not '_signature' in request.get_vars:
return False # no signature in the request URL
# check if user_signature requires
if user_signature:
from globals import current
if not current.session or not current.session.auth:
return False
hmac_key = current.session.auth.hmac_key
if not hmac_key:
return False
# get our sig from request.get_vars for later comparison
original_sig = request.get_vars._signature
# now generate a new hmac for the remaining args & vars
vars, args = request.get_vars, request.args
# remove the signature var since it was not part of our signed message
request.get_vars.pop('_signature')
# join all the args & vars into one long string
# always include all of the args
other = args and urllib.quote('/' + '/'.join([str(x) for x in args])) or ''
h_args = '/%s/%s/%s.%s%s' % (request.application,
request.controller,
request.function,
request.extension,
other)
# but only include those vars specified (allows more flexibility for use with
# forms or ajax)
list_vars = []
for (key, vals) in sorted(vars.items()):
if not isinstance(vals, (list, tuple)):
vals = [vals]
for val in vals:
list_vars.append((key, val))
# which of the vars are to be included?
if hash_vars is True: # include them all
h_vars = list_vars
elif hash_vars is False: # include none of them
h_vars = ''
else: # include just those specified
# wrap in a try - if the desired vars have been removed it'll fail
try:
if hash_vars and not isinstance(hash_vars, (list, tuple)):
hash_vars = [hash_vars]
h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars]
except:
# user has removed one of our vars! Immediate fail
return False
# build the full message string with both args & vars
message = h_args + '?' + urllib.urlencode(sorted(h_vars))
# hash with the hmac_key provided
sig = simple_hash(message, str(hmac_key), salt or '', digest_alg='sha1')
# put _signature back in get_vars just in case a second call to URL.verify is performed
# (otherwise it'll immediately return false)
request.get_vars['_signature'] = original_sig
# return whether or not the signature in the request matched the one we just generated
# (I.E. was the message the same as the one we originally signed)
return compare(original_sig, sig)
URL.verify = verifyURL
ON = True
class XmlComponent(object):
"""
Abstract root for all Html components
"""
# TODO: move some DIV methods to here
def xml(self):
raise NotImplementedError
def __mul__(self, n):
return CAT(*[self for i in range(n)])
def __add__(self, other):
if isinstance(self, CAT):
components = self.components
else:
components = [self]
if isinstance(other, CAT):
components += other.components
else:
components += [other]
return CAT(*components)
def add_class(self, name):
""" add a class to _class attribute """
c = self['_class']
classes = (set(c.split()) if c else set()) | set(name.split())
self['_class'] = ' '.join(classes) if classes else None
return self
def remove_class(self, name):
""" remove a class from _class attribute """
c = self['_class']
classes = (set(c.split()) if c else set()) - set(name.split())
self['_class'] = ' '.join(classes) if classes else None
return self
class XML(XmlComponent):
"""
use it to wrap a string that contains XML/HTML so that it will not be
escaped by the template
example:
>>> XML('<h1>Hello</h1>').xml()
'<h1>Hello</h1>'
"""
def __init__(
self,
text,
sanitize=False,
permitted_tags=[
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'table', 'tr', 'td', 'div',
'strong','span',
],
allowed_attributes={
'a': ['href', 'title', 'target'],
'img': ['src', 'alt'],
'blockquote': ['type'],
'td': ['colspan'],
},
):
"""
:param text: the XML text
:param sanitize: sanitize text using the permitted tags and allowed
attributes (default False)
:param permitted_tags: list of permitted tags (default: simple list of
tags)
:param allowed_attributes: dictionary of allowed attributed (default
for A, IMG and BlockQuote).
The key is the tag; the value is a list of allowed attributes.
"""
if sanitize:
text = sanitizer.sanitize(text, permitted_tags,
allowed_attributes)
if isinstance(text, unicode):
text = text.encode('utf8', 'xmlcharrefreplace')
elif not isinstance(text, str):
text = str(text)
self.text = text
def xml(self):
return self.text
def __str__(self):
return self.text
def __add__(self, other):
return '%s%s' % (self, other)
def __radd__(self, other):
return '%s%s' % (other, self)
def __cmp__(self, other):
return cmp(str(self), str(other))
def __hash__(self):
return hash(str(self))
# why was this here? Break unpickling in sessions
# def __getattr__(self, name):
# return getattr(str(self), name)
def __getitem__(self, i):
return str(self)[i]
def __getslice__(self, i, j):
return str(self)[i:j]
def __iter__(self):
for c in str(self):
yield c
def __len__(self):
return len(str(self))
def flatten(self, render=None):
"""
return the text stored by the XML object rendered by the render function
"""
if render:
return render(self.text, None, {})
return self.text
def elements(self, *args, **kargs):
"""
to be considered experimental since the behavior of this method is questionable
another options could be TAG(self.text).elements(*args,**kargs)
"""
return []
### important to allow safe session.flash=T(....)
def XML_unpickle(data):
return marshal.loads(data)
def XML_pickle(data):
return XML_unpickle, (marshal.dumps(str(data)),)
copy_reg.pickle(XML, XML_pickle, XML_unpickle)
class DIV(XmlComponent):
"""
HTML helper, for easy generating and manipulating a DOM structure.
Little or no validation is done.
Behaves like a dictionary regarding updating of attributes.
Behaves like a list regarding inserting/appending components.
example::
>>> DIV('hello', 'world', _style='color:red;').xml()
'<div style=\"color:red;\">helloworld</div>'
all other HTML helpers are derived from DIV.
_something=\"value\" attributes are transparently translated into
something=\"value\" HTML attributes
"""
# name of the tag, subclasses should update this
# tags ending with a '/' denote classes that cannot
# contain components
tag = 'div'
def __init__(self, *components, **attributes):
"""
:param *components: any components that should be nested in this element
:param **attributes: any attributes you want to give to this element
:raises SyntaxError: when a stand alone tag receives components
"""
if self.tag[-1:] == '/' and components:
raise SyntaxError('<%s> tags cannot have components'
% self.tag)
if len(components) == 1 and isinstance(components[0], (list, tuple)):
self.components = list(components[0])
else:
self.components = list(components)
self.attributes = attributes
self._fixup()
# converts special attributes in components attributes
self.parent = None
for c in self.components:
self._setnode(c)
self._postprocessing()
def update(self, **kargs):
"""
dictionary like updating of the tag attributes
"""
for (key, value) in kargs.iteritems():
self[key] = value
return self
def append(self, value):
"""
list style appending of components
>>> a=DIV()
>>> a.append(SPAN('x'))
>>> print a
<div><span>x</span></div>
"""
self._setnode(value)
ret = self.components.append(value)
self._fixup()
return ret
def insert(self, i, value):
"""
list style inserting of components
>>> a=DIV()
>>> a.insert(0,SPAN('x'))
>>> print a
<div><span>x</span></div>
"""
self._setnode(value)
ret = self.components.insert(i, value)
self._fixup()
return ret
def __getitem__(self, i):
"""
gets attribute with name 'i' or component #i.
If attribute 'i' is not found returns None
:param i: index
if i is a string: the name of the attribute
otherwise references to number of the component
"""
if isinstance(i, str):
try:
return self.attributes[i]
except KeyError:
return None
else:
return self.components[i]
def __setitem__(self, i, value):
"""
sets attribute with name 'i' or component #i.
:param i: index
if i is a string: the name of the attribute
otherwise references to number of the component
:param value: the new value
"""
self._setnode(value)
if isinstance(i, (str, unicode)):
self.attributes[i] = value
else:
self.components[i] = value
def __delitem__(self, i):
"""
deletes attribute with name 'i' or component #i.
:param i: index
if i is a string: the name of the attribute
otherwise references to number of the component
"""
if isinstance(i, str):
del self.attributes[i]
else:
del self.components[i]
def __len__(self):
"""
returns the number of included components
"""
return len(self.components)
def __nonzero__(self):
"""
always return True
"""
return True
def _fixup(self):
"""
Handling of provided components.
Nothing to fixup yet. May be overridden by subclasses,
eg for wrapping some components in another component or blocking them.
"""
return
def _wrap_components(self, allowed_parents,
wrap_parent=None,
wrap_lambda=None):
"""
helper for _fixup. Checks if a component is in allowed_parents,
otherwise wraps it in wrap_parent
:param allowed_parents: (tuple) classes that the component should be an
instance of
:param wrap_parent: the class to wrap the component in, if needed
:param wrap_lambda: lambda to use for wrapping, if needed
"""
components = []
for c in self.components:
if isinstance(c, allowed_parents):
pass
elif wrap_lambda:
c = wrap_lambda(c)
else:
c = wrap_parent(c)
if isinstance(c, DIV):
c.parent = self
components.append(c)
self.components = components
def _postprocessing(self):
"""
Handling of attributes (normally the ones not prefixed with '_').
Nothing to postprocess yet. May be overridden by subclasses
"""
return
def _traverse(self, status, hideerror=False):
# TODO: docstring
newstatus = status
for c in self.components:
if hasattr(c, '_traverse') and callable(c._traverse):
c.vars = self.vars
c.request_vars = self.request_vars
c.errors = self.errors
c.latest = self.latest
c.session = self.session
c.formname = self.formname
c['hideerror'] = hideerror or \
self.attributes.get('hideerror', False)
newstatus = c._traverse(status, hideerror) and newstatus
# for input, textarea, select, option
# deal with 'value' and 'validation'
name = self['_name']
if newstatus:
newstatus = self._validate()
self._postprocessing()
elif 'old_value' in self.attributes:
self['value'] = self['old_value']
self._postprocessing()
elif name and name in self.vars:
self['value'] = self.vars[name]
self._postprocessing()
if name:
self.latest[name] = self['value']
return newstatus
def _validate(self):
"""
nothing to validate yet. May be overridden by subclasses
"""
return True
def _setnode(self, value):
if isinstance(value, DIV):
value.parent = self
def _xml(self):
"""
helper for xml generation. Returns separately:
- the component attributes
- the generated xml of the inner components
Component attributes start with an underscore ('_') and
do not have a False or None value. The underscore is removed.
A value of True is replaced with the attribute name.
:returns: tuple: (attributes, components)
"""
# get the attributes for this component
# (they start with '_', others may have special meanings)
attr = []
for key, value in self.attributes.iteritems():
if key[:1] != '_':
continue
name = key[1:]
if value is True:
value = name
elif value is False or value is None:
continue
attr.append((name, value))
data = self.attributes.get('data',{})
for key, value in data.iteritems():
name = 'data-' + key
value = data[key]
attr.append((name,value))
attr.sort()
fa = ''
for name,value in attr:
fa += ' %s="%s"' % (name, xmlescape(value, True))
# get the xml for the inner components
co = join([xmlescape(component) for component in
self.components])
return (fa, co)
def xml(self):
"""
generates the xml for this component.
"""
(fa, co) = self._xml()
if not self.tag:
return co
if self.tag[-1:] == '/':
# <tag [attributes] />
return '<%s%s />' % (self.tag[:-1], fa)
# else: <tag [attributes]> inner components xml </tag>
return '<%s%s>%s</%s>' % (self.tag, fa, co, self.tag)
def __str__(self):
"""
str(COMPONENT) returns equals COMPONENT.xml()
"""
return self.xml()
def flatten(self, render=None):
"""
return the text stored by the DIV object rendered by the render function
the render function must take text, tagname, and attributes
render=None is equivalent to render=lambda text, tag, attr: text
>>> markdown = lambda text,tag=None,attributes={}: \
{None: re.sub('\s+',' ',text), \
'h1':'#'+text+'\\n\\n', \
'p':text+'\\n'}.get(tag,text)
>>> a=TAG('<h1>Header</h1><p>this is a test</p>')
>>> a.flatten(markdown)
'#Header\\n\\nthis is a test\\n'
"""
text = ''
for c in self.components:
if isinstance(c, XmlComponent):
s = c.flatten(render)
elif render:
s = render(str(c))
else:
s = str(c)
text += s
if render:
text = render(text, self.tag, self.attributes)
return text
regex_tag = re.compile('^[\w\-\:]+')
regex_id = re.compile('#([\w\-]+)')
regex_class = re.compile('\.([\w\-]+)')
regex_attr = re.compile('\[([\w\-\:]+)=(.*?)\]')
def elements(self, *args, **kargs):
"""
find all component that match the supplied attribute dictionary,
or None if nothing could be found
All components of the components are searched.
>>> a = DIV(DIV(SPAN('x'),3,DIV(SPAN('y'))))
>>> for c in a.elements('span',first_only=True): c[0]='z'
>>> print a
<div><div><span>z</span>3<div><span>y</span></div></div></div>
>>> for c in a.elements('span'): c[0]='z'
>>> print a
<div><div><span>z</span>3<div><span>z</span></div></div></div>
It also supports a syntax compatible with jQuery
>>> a=TAG('<div><span><a id="1-1" u:v=$>hello</a></span><p class="this is a test">world</p></div>')
>>> for e in a.elements('div a#1-1, p.is'): print e.flatten()
hello
world
>>> for e in a.elements('#1-1'): print e.flatten()
hello
>>> a.elements('a[u:v=$]')[0].xml()
'<a id="1-1" u:v="$">hello</a>'
>>> a=FORM( INPUT(_type='text'), SELECT(range(1)), TEXTAREA() )
>>> for c in a.elements('input, select, textarea'): c['_disabled'] = 'disabled'
>>> a.xml()
'<form action="#" enctype="multipart/form-data" method="post"><input disabled="disabled" type="text" /><select disabled="disabled"><option value="0">0</option></select><textarea cols="40" disabled="disabled" rows="10"></textarea></form>'
Elements that are matched can also be replaced or removed by specifying
a "replace" argument (note, a list of the original matching elements
is still returned as usual).
>>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc'))))
>>> b = a.elements('span.abc', replace=P('x', _class='xyz'))
>>> print a
<div><div><p class="xyz">x</p><div><p class="xyz">x</p><p class="xyz">x</p></div></div></div>
"replace" can be a callable, which will be passed the original element and
should return a new element to replace it.
>>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc'))))
>>> b = a.elements('span.abc', replace=lambda el: P(el[0], _class='xyz'))
>>> print a
<div><div><p class="xyz">x</p><div><p class="xyz">y</p><p class="xyz">z</p></div></div></div>
If replace=None, matching elements will be removed completely.
>>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc'))))
>>> b = a.elements('span', find='y', replace=None)
>>> print a
<div><div><span class="abc">x</span><div><span class="abc">z</span></div></div></div>
If a "find_text" argument is specified, elements will be searched for text
components that match find_text, and any matching text components will be
replaced (find_text is ignored if "replace" is not also specified).
Like the "find" argument, "find_text" can be a string or a compiled regex.
>>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='abc'), SPAN('z', _class='abc'))))
>>> b = a.elements(find_text=re.compile('x|y|z'), replace='hello')
>>> print a
<div><div><span class="abc">hello</span><div><span class="abc">hello</span><span class="abc">hello</span></div></div></div>
If other attributes are specified along with find_text, then only components
that match the specified attributes will be searched for find_text.
>>> a = DIV(DIV(SPAN('x', _class='abc'), DIV(SPAN('y', _class='efg'), SPAN('z', _class='abc'))))
>>> b = a.elements('span.efg', find_text=re.compile('x|y|z'), replace='hello')
>>> print a
<div><div><span class="abc">x</span><div><span class="efg">hello</span><span class="abc">z</span></div></div></div>
"""
if len(args) == 1:
args = [a.strip() for a in args[0].split(',')]
if len(args) > 1:
subset = [self.elements(a, **kargs) for a in args]
return reduce(lambda a, b: a + b, subset, [])
elif len(args) == 1:
items = args[0].split()
if len(items) > 1:
subset = [a.elements(' '.join(
items[1:]), **kargs) for a in self.elements(items[0])]
return reduce(lambda a, b: a + b, subset, [])
else:
item = items[0]
if '#' in item or '.' in item or '[' in item:
match_tag = self.regex_tag.search(item)
match_id = self.regex_id.search(item)
match_class = self.regex_class.search(item)
match_attr = self.regex_attr.finditer(item)
args = []
if match_tag:
args = [match_tag.group()]
if match_id:
kargs['_id'] = match_id.group(1)
if match_class:
kargs['_class'] = re.compile('(?<!\w)%s(?!\w)' %
match_class.group(1).replace('-', '\\-').replace(':', '\\:'))
for item in match_attr:
kargs['_' + item.group(1)] = item.group(2)
return self.elements(*args, **kargs)
# make a copy of the components
matches = []
# check if the component has an attribute with the same
# value as provided
check = True
tag = getattr(self, 'tag').replace('/', '')
if args and tag not in args:
check = False
for (key, value) in kargs.iteritems():
if key not in ['first_only', 'replace', 'find_text']:
if isinstance(value, (str, int)):
if self[key] != str(value):
check = False
elif key in self.attributes:
if not value.search(str(self[key])):
check = False
else:
check = False
if 'find' in kargs:
find = kargs['find']
is_regex = not isinstance(find, (str, int))
for c in self.components:
if (isinstance(c, str) and ((is_regex and find.search(c)) or
(str(find) in c))):
check = True
# if found, return the component
if check:
matches.append(self)
first_only = kargs.get('first_only', False)
replace = kargs.get('replace', False)
find_text = replace is not False and kargs.get('find_text', False)
is_regex = not isinstance(find_text, (str, int, bool))
find_components = not (check and first_only)
def replace_component(i):
if replace is None:
del self[i]
elif callable(replace):
self[i] = replace(self[i])
else:
self[i] = replace
# loop the components
if find_text or find_components:
for i, c in enumerate(self.components):
if check and find_text and isinstance(c, str) and \
((is_regex and find_text.search(c)) or (str(find_text) in c)):
replace_component(i)
if find_components and isinstance(c, XmlComponent):
child_matches = c.elements(*args, **kargs)
if len(child_matches):
if not find_text and replace is not False and child_matches[0] is c:
replace_component(i)
if first_only:
return child_matches
matches.extend(child_matches)
return matches
def element(self, *args, **kargs):
"""
find the first component that matches the supplied attribute dictionary,
or None if nothing could be found
Also the components of the components are searched.
"""
kargs['first_only'] = True
elements = self.elements(*args, **kargs)
if not elements:
# we found nothing
return None
return elements[0]
def siblings(self, *args, **kargs):
"""
find all sibling components that match the supplied argument list
and attribute dictionary, or None if nothing could be found
"""
sibs = [s for s in self.parent.components if not s == self]
matches = []
first_only = False
if 'first_only' in kargs:
first_only = kargs.pop('first_only')
for c in sibs:
try:
check = True
tag = getattr(c, 'tag').replace("/", "")
if args and tag not in args:
check = False
for (key, value) in kargs.iteritems():
if c[key] != value:
check = False
if check:
matches.append(c)
if first_only:
break
except:
pass
return matches
def sibling(self, *args, **kargs):
"""
find the first sibling component that match the supplied argument list
and attribute dictionary, or None if nothing could be found
"""
kargs['first_only'] = True
sibs = self.siblings(*args, **kargs)
if not sibs:
return None
return sibs[0]
class CAT(DIV):
tag = ''
def TAG_unpickler(data):
return cPickle.loads(data)
def TAG_pickler(data):
d = DIV()
d.__dict__ = data.__dict__
marshal_dump = cPickle.dumps(d)
return (TAG_unpickler, (marshal_dump,))
class __tag__(DIV):
def __init__(self,name,*a,**b):
DIV.__init__(self,*a,**b)
self.tag = name
copy_reg.pickle(__tag__, TAG_pickler, TAG_unpickler)
class __TAG__(XmlComponent):
"""
TAG factory example::
>>> print TAG.first(TAG.second('test'), _key = 3)
<first key=\"3\"><second>test</second></first>
"""
def __getitem__(self, name):
return self.__getattr__(name)
def __getattr__(self, name):
if name[-1:] == '_':
name = name[:-1] + '/'
if isinstance(name, unicode):
name = name.encode('utf-8')
return lambda *a,**b: __tag__(name,*a,**b)
def __call__(self, html):
return web2pyHTMLParser(decoder.decoder(html)).tree
TAG = __TAG__()
class HTML(DIV):
"""
There are four predefined document type definitions.
They can be specified in the 'doctype' parameter:
-'strict' enables strict doctype
-'transitional' enables transitional doctype (default)
-'frameset' enables frameset doctype
-'html5' enables HTML 5 doctype
-any other string will be treated as user's own doctype
'lang' parameter specifies the language of the document.
Defaults to 'en'.
See also :class:`DIV`
"""
tag = 'html'
strict = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n'
transitional = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n'
frameset = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">\n'
html5 = '<!DOCTYPE HTML>\n'
def xml(self):
lang = self['lang']
if not lang:
lang = 'en'
self.attributes['_lang'] = lang
doctype = self['doctype']
if doctype is None:
doctype = self.transitional
elif doctype == 'strict':
doctype = self.strict
elif doctype == 'transitional':
doctype = self.transitional
elif doctype == 'frameset':
doctype = self.frameset
elif doctype == 'html5':
doctype = self.html5
elif doctype == '':
doctype = ''
else:
doctype = '%s\n' % doctype
(fa, co) = self._xml()
return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag)
class XHTML(DIV):
"""
This is XHTML version of the HTML helper.
There are three predefined document type definitions.
They can be specified in the 'doctype' parameter:
-'strict' enables strict doctype
-'transitional' enables transitional doctype (default)
-'frameset' enables frameset doctype
-any other string will be treated as user's own doctype
'lang' parameter specifies the language of the document and the xml document.
Defaults to 'en'.
'xmlns' parameter specifies the xml namespace.
Defaults to 'http://www.w3.org/1999/xhtml'.
See also :class:`DIV`
"""
tag = 'html'
strict = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
transitional = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
frameset = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">\n'
xmlns = 'http://www.w3.org/1999/xhtml'
def xml(self):
xmlns = self['xmlns']
if xmlns:
self.attributes['_xmlns'] = xmlns
else:
self.attributes['_xmlns'] = self.xmlns
lang = self['lang']
if not lang:
lang = 'en'
self.attributes['_lang'] = lang
self.attributes['_xml:lang'] = lang
doctype = self['doctype']
if doctype:
if doctype == 'strict':
doctype = self.strict
elif doctype == 'transitional':
doctype = self.transitional
elif doctype == 'frameset':
doctype = self.frameset
else:
doctype = '%s\n' % doctype
else:
doctype = self.transitional
(fa, co) = self._xml()
return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag)
class HEAD(DIV):
tag = 'head'
class TITLE(DIV):
tag = 'title'
class META(DIV):
tag = 'meta/'
class LINK(DIV):
tag = 'link/'
class SCRIPT(DIV):
tag = 'script'
def xml(self):
(fa, co) = self._xml()
# no escaping of subcomponents
co = '\n'.join([str(component) for component in
self.components])
if co:
# <script [attributes]><!--//--><![CDATA[//><!--
# script body
# //--><!]]></script>
# return '<%s%s><!--//--><![CDATA[//><!--\n%s\n//--><!]]></%s>' % (self.tag, fa, co, self.tag)
return '<%s%s><!--\n%s\n//--></%s>' % (self.tag, fa, co, self.tag)
else:
return DIV.xml(self)
class STYLE(DIV):
tag = 'style'
def xml(self):
(fa, co) = self._xml()
# no escaping of subcomponents
co = '\n'.join([str(component) for component in
self.components])
if co:
# <style [attributes]><!--/*--><![CDATA[/*><!--*/
# style body
# /*]]>*/--></style>
return '<%s%s><!--/*--><![CDATA[/*><!--*/\n%s\n/*]]>*/--></%s>' % (self.tag, fa, co, self.tag)
else:
return DIV.xml(self)
class IMG(DIV):
tag = 'img/'
class SPAN(DIV):
tag = 'span'
class BODY(DIV):
tag = 'body'
class H1(DIV):
tag = 'h1'
class H2(DIV):
tag = 'h2'
class H3(DIV):
tag = 'h3'
class H4(DIV):
tag = 'h4'
class H5(DIV):
tag = 'h5'
class H6(DIV):
tag = 'h6'
class P(DIV):
"""
Will replace ``\\n`` by ``<br />`` if the `cr2br` attribute is provided.
see also :class:`DIV`
"""
tag = 'p'
def xml(self):
text = DIV.xml(self)
if self['cr2br']:
text = text.replace('\n', '<br />')
return text
class STRONG(DIV):
tag = 'strong'
class B(DIV):
tag = 'b'
class BR(DIV):
tag = 'br/'
class HR(DIV):
tag = 'hr/'
class A(DIV):
tag = 'a'
def xml(self):
if not self.components and self['_href']:
self.append(self['_href'])
if self['delete']:
d = "jQuery(this).closest('%s').remove();" % self['delete']
else:
d = ''
if self['component']:
self['_onclick'] = "web2py_component('%s','%s');%sreturn false;" % \
(self['component'], self['target'] or '', d)
self['_href'] = self['_href'] or '#null'
elif self['callback']:
returnfalse = "var e = arguments[0] || window.event; e.cancelBubble=true; if (e.stopPropagation) {e.stopPropagation(); e.stopImmediatePropagation(); e.preventDefault();}"
if d and not self['noconfirm']:
self['_onclick'] = "if(confirm(w2p_ajax_confirm_message||'Are you sure you want to delete this object?')){ajax('%s',[],'%s');%s};%s" % \
(self['callback'], self['target'] or '', d, returnfalse)
else:
self['_onclick'] = "ajax('%s',[],'%s');%sreturn false" % \
(self['callback'], self['target'] or '', d)
self['_href'] = self['_href'] or '#null'
elif self['cid']:
pre = self['pre_call'] + ';' if self['pre_call'] else ''
self['_onclick'] = '%sweb2py_component("%s","%s");%sreturn false;' % \
(pre,self['_href'], self['cid'], d)
return DIV.xml(self)
class BUTTON(DIV):
tag = 'button'
class EM(DIV):
tag = 'em'
class EMBED(DIV):
tag = 'embed/'
class TT(DIV):
tag = 'tt'
class PRE(DIV):
tag = 'pre'
class CENTER(DIV):
tag = 'center'
class CODE(DIV):
"""
displays code in HTML with syntax highlighting.
:param attributes: optional attributes:
- language: indicates the language, otherwise PYTHON is assumed
- link: can provide a link
- styles: for styles
Example::
{{=CODE(\"print 'hello world'\", language='python', link=None,
counter=1, styles={}, highlight_line=None)}}
supported languages are \"python\", \"html_plain\", \"c\", \"cpp\",
\"web2py\", \"html\".
The \"html\" language interprets {{ and }} tags as \"web2py\" code,
\"html_plain\" doesn't.
if a link='/examples/global/vars/' is provided web2py keywords are linked to
the online docs.
the counter is used for line numbering, counter can be None or a prompt
string.
"""
def xml(self):
language = self['language'] or 'PYTHON'
link = self['link']
counter = self.attributes.get('counter', 1)
highlight_line = self.attributes.get('highlight_line', None)
context_lines = self.attributes.get('context_lines', None)
styles = self['styles'] or {}
return highlight(
join(self.components),
language=language,
link=link,
counter=counter,
styles=styles,
attributes=self.attributes,
highlight_line=highlight_line,
context_lines=context_lines,
)
class LABEL(DIV):
tag = 'label'
class LI(DIV):
tag = 'li'
class UL(DIV):
"""
UL Component.
If subcomponents are not LI-components they will be wrapped in a LI
see also :class:`DIV`
"""
tag = 'ul'
def _fixup(self):
self._wrap_components(LI, LI)
class OL(UL):
tag = 'ol'
class TD(DIV):
tag = 'td'
class TH(DIV):
tag = 'th'
class TR(DIV):
"""
TR Component.
If subcomponents are not TD/TH-components they will be wrapped in a TD
see also :class:`DIV`
"""
tag = 'tr'
def _fixup(self):
self._wrap_components((TD, TH), TD)
class THEAD(DIV):
tag = 'thead'
def _fixup(self):
self._wrap_components(TR, TR)
class TBODY(DIV):
tag = 'tbody'
def _fixup(self):
self._wrap_components(TR, TR)
class TFOOT(DIV):
tag = 'tfoot'
def _fixup(self):
self._wrap_components(TR, TR)
class COL(DIV):
tag = 'col'
class COLGROUP(DIV):
tag = 'colgroup'
class TABLE(DIV):
"""
TABLE Component.
If subcomponents are not TR/TBODY/THEAD/TFOOT-components
they will be wrapped in a TR
see also :class:`DIV`
"""
tag = 'table'
def _fixup(self):
self._wrap_components((TR, TBODY, THEAD, TFOOT, COL, COLGROUP), TR)
class I(DIV):
tag = 'i'
class IFRAME(DIV):
tag = 'iframe'
class INPUT(DIV):
"""
INPUT Component
examples::
>>> INPUT(_type='text', _name='name', value='Max').xml()
'<input name=\"name\" type=\"text\" value=\"Max\" />'
>>> INPUT(_type='checkbox', _name='checkbox', value='on').xml()
'<input checked=\"checked\" name=\"checkbox\" type=\"checkbox\" value=\"on\" />'
>>> INPUT(_type='radio', _name='radio', _value='yes', value='yes').xml()
'<input checked=\"checked\" name=\"radio\" type=\"radio\" value=\"yes\" />'
>>> INPUT(_type='radio', _name='radio', _value='no', value='yes').xml()
'<input name=\"radio\" type=\"radio\" value=\"no\" />'
the input helper takes two special attributes value= and requires=.
:param value: used to pass the initial value for the input field.
value differs from _value because it works for checkboxes, radio,
textarea and select/option too.
- for a checkbox value should be '' or 'on'.
- for a radio or select/option value should be the _value
of the checked/selected item.
:param requires: should be None, or a validator or a list of validators
for the value of the field.
"""
tag = 'input/'
def _validate(self):
# # this only changes value, not _value
name = self['_name']
if name is None or name == '':
return True
name = str(name)
request_vars_get = self.request_vars.get
if self['_type'] != 'checkbox':
self['old_value'] = self['value'] or self['_value'] or ''
value = request_vars_get(name, '')
self['value'] = value if not hasattr(value,'file') else None
else:
self['old_value'] = self['value'] or False
value = request_vars_get(name)
if isinstance(value, (tuple, list)):
self['value'] = self['_value'] in value
else:
self['value'] = self['_value'] == value
requires = self['requires']
if requires:
if not isinstance(requires, (list, tuple)):
requires = [requires]
for validator in requires:
(value, errors) = validator(value)
if not errors is None:
self.vars[name] = value
self.errors[name] = errors
break
if not name in self.errors:
self.vars[name] = value
return True
return False
def _postprocessing(self):
t = self['_type']
if not t:
t = self['_type'] = 'text'
t = t.lower()
value = self['value']
if self['_value'] is None or isinstance(self['_value'],cgi.FieldStorage):
_value = None
else:
_value = str(self['_value'])
if '_checked' in self.attributes and not 'value' in self.attributes:
pass
elif t == 'checkbox':
if not _value:
_value = self['_value'] = 'on'
if not value:
value = []
elif value is True:
value = [_value]
elif not isinstance(value, (list, tuple)):
value = str(value).split('|')
self['_checked'] = _value in value and 'checked' or None
elif t == 'radio':
if str(value) == str(_value):
self['_checked'] = 'checked'
else:
self['_checked'] = None
elif not t == 'submit':
if value is None:
self['value'] = _value
elif not isinstance(value, list):
self['_value'] = value
def xml(self):
name = self.attributes.get('_name', None)
if name and hasattr(self, 'errors') \
and self.errors.get(name, None) \
and self['hideerror'] != True:
self['_class'] = (self['_class'] and self['_class']
+ ' ' or '') + 'invalidinput'
return DIV.xml(self) + DIV(
DIV(
self.errors[name], _class='error',
errors=None, _id='%s__error' % name),
_class='error_wrapper').xml()
else:
if self['_class'] and self['_class'].endswith('invalidinput'):
self['_class'] = self['_class'][:-12]
if self['_class'] == '':
self['_class'] = None
return DIV.xml(self)
class TEXTAREA(INPUT):
"""
example::
TEXTAREA(_name='sometext', value='blah '*100, requires=IS_NOT_EMPTY())
'blah blah blah ...' will be the content of the textarea field.
"""
tag = 'textarea'
def _postprocessing(self):
if not '_rows' in self.attributes:
self['_rows'] = 10
if not '_cols' in self.attributes:
self['_cols'] = 40
if not self['value'] is None:
self.components = [self['value']]
elif self.components:
self['value'] = self.components[0]
class OPTION(DIV):
tag = 'option'
def _fixup(self):
if not '_value' in self.attributes:
self.attributes['_value'] = str(self.components[0])
class OBJECT(DIV):
tag = 'object'
class OPTGROUP(DIV):
tag = 'optgroup'
def _fixup(self):
components = []
for c in self.components:
if isinstance(c, OPTION):
components.append(c)
else:
components.append(OPTION(c, _value=str(c)))
self.components = components
class SELECT(INPUT):
"""
example::
>>> from validators import IS_IN_SET
>>> SELECT('yes', 'no', _name='selector', value='yes',
... requires=IS_IN_SET(['yes', 'no'])).xml()
'<select name=\"selector\"><option selected=\"selected\" value=\"yes\">yes</option><option value=\"no\">no</option></select>'
"""
tag = 'select'
def _fixup(self):
components = []
for c in self.components:
if isinstance(c, (OPTION, OPTGROUP)):
components.append(c)
else:
components.append(OPTION(c, _value=str(c)))
self.components = components
def _postprocessing(self):
component_list = []
for c in self.components:
if isinstance(c, OPTGROUP):
component_list.append(c.components)
else:
component_list.append([c])
options = itertools.chain(*component_list)
value = self['value']
if not value is None:
if not self['_multiple']:
for c in options: # my patch
if ((value is not None) and
(str(c['_value']) == str(value))):
c['_selected'] = 'selected'
else:
c['_selected'] = None
else:
if isinstance(value, (list, tuple)):
values = [str(item) for item in value]
else:
values = [str(value)]
for c in options: # my patch
if ((value is not None) and
(str(c['_value']) in values)):
c['_selected'] = 'selected'
else:
c['_selected'] = None
class FIELDSET(DIV):
tag = 'fieldset'
class LEGEND(DIV):
tag = 'legend'
class FORM(DIV):
"""
example::
>>> from validators import IS_NOT_EMPTY
>>> form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY()))
>>> form.xml()
'<form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"test\" type=\"text\" /></form>'
a FORM is container for INPUT, TEXTAREA, SELECT and other helpers
form has one important method::
form.accepts(request.vars, session)
if form is accepted (and all validators pass) form.vars contains the
accepted vars, otherwise form.errors contains the errors.
in case of errors the form is modified to present the errors to the user.
"""
tag = 'form'
def __init__(self, *components, **attributes):
DIV.__init__(self, *components, **attributes)
self.vars = Storage()
self.errors = Storage()
self.latest = Storage()
self.accepted = None # none for not submitted
def assert_status(self, status, request_vars):
return status
def accepts(
self,
request_vars,
session=None,
formname='default',
keepvalues=False,
onvalidation=None,
hideerror=False,
**kwargs
):
"""
kwargs is not used but allows to specify the same interface for FORM and SQLFORM
"""
if request_vars.__class__.__name__ == 'Request':
request_vars = request_vars.post_vars
self.errors.clear()
self.request_vars = Storage()
self.request_vars.update(request_vars)
self.session = session
self.formname = formname
self.keepvalues = keepvalues
# if this tag is a form and we are in accepting mode (status=True)
# check formname and formkey
status = True
changed = False
request_vars = self.request_vars
if session is not None:
formkey = session.get('_formkey[%s]' % formname, None)
# check if user tampering with form and void CSRF
if not formkey or formkey != request_vars._formkey:
status = False
if formname != request_vars._formname:
status = False
if status and session:
# check if editing a record that has been modified by the server
if hasattr(self, 'record_hash') and self.record_hash != formkey:
status = False
self.record_changed = changed = True
status = self._traverse(status, hideerror)
status = self.assert_status(status, request_vars)
if onvalidation:
if isinstance(onvalidation, dict):
onsuccess = onvalidation.get('onsuccess', None)
onfailure = onvalidation.get('onfailure', None)
onchange = onvalidation.get('onchange', None)
if [k for k in onvalidation if not k in (
'onsuccess','onfailure','onchange')]:
raise RuntimeError('Invalid key in onvalidate dict')
if onsuccess and status:
call_as_list(onsuccess,self)
if onfailure and request_vars and not status:
call_as_list(onfailure,self)
status = len(self.errors) == 0
if changed:
if onchange and self.record_changed and \
self.detect_record_change:
call_as_list(onchange,self)
elif status:
call_as_list(onvalidation, self)
if self.errors:
status = False
if not session is None:
if hasattr(self, 'record_hash'):
formkey = self.record_hash
else:
formkey = web2py_uuid()
self.formkey = session['_formkey[%s]' % formname] = formkey
if status and not keepvalues:
self._traverse(False, hideerror)
self.accepted = status
return status
def _postprocessing(self):
if not '_action' in self.attributes:
self['_action'] = '#'
if not '_method' in self.attributes:
self['_method'] = 'post'
if not '_enctype' in self.attributes:
self['_enctype'] = 'multipart/form-data'
def hidden_fields(self):
c = []
attr = self.attributes.get('hidden', {})
if 'hidden' in self.attributes:
c = [INPUT(_type='hidden', _name=key, _value=value)
for (key, value) in attr.iteritems()]
if hasattr(self, 'formkey') and self.formkey:
c.append(INPUT(_type='hidden', _name='_formkey',
_value=self.formkey))
if hasattr(self, 'formname') and self.formname:
c.append(INPUT(_type='hidden', _name='_formname',
_value=self.formname))
return DIV(c, _style="display:none;")
def xml(self):
newform = FORM(*self.components, **self.attributes)
hidden_fields = self.hidden_fields()
if hidden_fields.components:
newform.append(hidden_fields)
return DIV.xml(newform)
def validate(self, **kwargs):
"""
This function validates the form,
you can use it instead of directly form.accepts.
Usage:
In controller
def action():
form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY()))
form.validate() #you can pass some args here - see below
return dict(form=form)
This can receive a bunch of arguments
onsuccess = 'flash' - will show message_onsuccess in response.flash
None - will do nothing
can be a function (lambda form: pass)
onfailure = 'flash' - will show message_onfailure in response.flash
None - will do nothing
can be a function (lambda form: pass)
onchange = 'flash' - will show message_onchange in response.flash
None - will do nothing
can be a function (lambda form: pass)
message_onsuccess
message_onfailure
message_onchange
next = where to redirect in case of success
any other kwargs will be passed for form.accepts(...)
"""
from gluon import current, redirect
kwargs['request_vars'] = kwargs.get(
'request_vars', current.request.post_vars)
kwargs['session'] = kwargs.get('session', current.session)
kwargs['dbio'] = kwargs.get('dbio', False)
# necessary for SQLHTML forms
onsuccess = kwargs.get('onsuccess', 'flash')
onfailure = kwargs.get('onfailure', 'flash')
onchange = kwargs.get('onchange', 'flash')
message_onsuccess = kwargs.get('message_onsuccess',
current.T("Success!"))
message_onfailure = kwargs.get('message_onfailure',
current.T("Errors in form, please check it out."))
message_onchange = kwargs.get('message_onchange',
current.T("Form consecutive submissions not allowed. " +
"Try re-submitting or refreshing the form page."))
next = kwargs.get('next', None)
for key in ('message_onsuccess', 'message_onfailure', 'onsuccess',
'onfailure', 'next', 'message_onchange', 'onchange'):
if key in kwargs:
del kwargs[key]
if self.accepts(**kwargs):
if onsuccess == 'flash':
if next:
current.session.flash = message_onsuccess
else:
current.response.flash = message_onsuccess
elif callable(onsuccess):
onsuccess(self)
if next:
if self.vars:
for key, value in self.vars.iteritems():
next = next.replace('[%s]' % key,
urllib.quote(str(value)))
if not next.startswith('/'):
next = URL(next)
redirect(next)
return True
elif self.errors:
if onfailure == 'flash':
current.response.flash = message_onfailure
elif callable(onfailure):
onfailure(self)
return False
elif hasattr(self, "record_changed"):
if self.record_changed and self.detect_record_change:
if onchange == 'flash':
current.response.flash = message_onchange
elif callable(onchange):
onchange(self)
return False
def process(self, **kwargs):
"""
Perform the .validate() method but returns the form
Usage in controllers:
# directly on return
def action():
#some code here
return dict(form=FORM(...).process(...))
You can use it with FORM, SQLFORM or FORM based plugins
Examples:
#response.flash messages
def action():
form = SQLFORM(db.table).process(message_onsuccess='Sucess!')
retutn dict(form=form)
# callback function
# callback receives True or False as first arg, and a list of args.
def my_callback(status, msg):
response.flash = "Success! "+msg if status else "Errors occured"
# after argument can be 'flash' to response.flash messages
# or a function name to use as callback or None to do nothing.
def action():
return dict(form=SQLFORM(db.table).process(onsuccess=my_callback)
"""
kwargs['dbio'] = kwargs.get('dbio', True)
# necessary for SQLHTML forms
self.validate(**kwargs)
return self
REDIRECT_JS = "window.location='%s';return false"
def add_button(self, value, url, _class=None):
submit = self.element('input[type=submit]')
submit.parent.append(
INPUT(_type="button", _value=value, _class=_class,
_onclick=self.REDIRECT_JS % url))
@staticmethod
def confirm(text='OK', buttons=None, hidden=None):
if not buttons:
buttons = {}
if not hidden:
hidden = {}
inputs = [INPUT(_type='button',
_value=name,
_onclick=FORM.REDIRECT_JS % link)
for name, link in buttons.iteritems()]
inputs += [INPUT(_type='hidden',
_name=name,
_value=value)
for name, value in hidden.iteritems()]
form = FORM(INPUT(_type='submit', _value=text), *inputs)
form.process()
return form
def as_dict(self, flat=False, sanitize=True):
"""EXPERIMENTAL
Sanitize is naive. It should catch any unsafe value
for client retrieval.
"""
SERIALIZABLE = (int, float, bool, basestring, long,
set, list, dict, tuple, Storage, type(None))
UNSAFE = ("PASSWORD", "CRYPT")
d = self.__dict__
def sanitizer(obj):
if isinstance(obj, dict):
for k in obj.keys():
if any([unsafe in str(k).upper() for
unsafe in UNSAFE]):
# erease unsafe pair
obj.pop(k)
else:
# not implemented
pass
return obj
def flatten(obj):
if isinstance(obj, (dict, Storage)):
newobj = obj.copy()
else:
newobj = obj
if sanitize:
newobj = sanitizer(newobj)
if flat:
if type(obj) in SERIALIZABLE:
if isinstance(newobj, (dict, Storage)):
for k in newobj:
newk = flatten(k)
newobj[newk] = flatten(newobj[k])
if k != newk:
newobj.pop(k)
return newobj
elif isinstance(newobj, (list, tuple, set)):
return [flatten(item) for item in newobj]
else:
return newobj
else: return str(newobj)
else: return newobj
return flatten(d)
def as_json(self, sanitize=True):
d = self.as_dict(flat=True, sanitize=sanitize)
from serializers import json
return json(d)
def as_yaml(self, sanitize=True):
d = self.as_dict(flat=True, sanitize=sanitize)
from serializers import yaml
return yaml(d)
def as_xml(self, sanitize=True):
d = self.as_dict(flat=True, sanitize=sanitize)
from serializers import xml
return xml(d)
class BEAUTIFY(DIV):
"""
example::
>>> BEAUTIFY(['a', 'b', {'hello': 'world'}]).xml()
'<div><table><tr><td><div>a</div></td></tr><tr><td><div>b</div></td></tr><tr><td><div><table><tr><td style="font-weight:bold;vertical-align:top">hello</td><td valign="top">:</td><td><div>world</div></td></tr></table></div></td></tr></table></div>'
turns any list, dictionary, etc into decent looking html.
Two special attributes are
:sorted: a function that takes the dict and returned sorted keys
:keyfilter: a funciton that takes a key and returns its representation
or None if the key is to be skipped. By default key[:1]=='_' is skipped.
"""
tag = 'div'
@staticmethod
def no_underscore(key):
if key[:1] == '_':
return None
return key
def __init__(self, component, **attributes):
self.components = [component]
self.attributes = attributes
sorter = attributes.get('sorted', sorted)
keyfilter = attributes.get('keyfilter', BEAUTIFY.no_underscore)
components = []
attributes = copy.copy(self.attributes)
level = attributes['level'] = attributes.get('level', 6) - 1
if '_class' in attributes:
attributes['_class'] += 'i'
if level == 0:
return
for c in self.components:
if hasattr(c, 'value') and not callable(c.value):
if c.value:
components.append(c.value)
if hasattr(c, 'xml') and callable(c.xml):
components.append(c)
continue
elif hasattr(c, 'keys') and callable(c.keys):
rows = []
try:
keys = (sorter and sorter(c)) or c
for key in keys:
if isinstance(key, (str, unicode)) and keyfilter:
filtered_key = keyfilter(key)
else:
filtered_key = str(key)
if filtered_key is None:
continue
value = c[key]
if isinstance(value, types.LambdaType):
continue
rows.append(
TR(
TD(filtered_key, _style='font-weight:bold;vertical-align:top'),
TD(':', _valign='top'),
TD(BEAUTIFY(value, **attributes))))
components.append(TABLE(*rows, **attributes))
continue
except:
pass
if isinstance(c, str):
components.append(str(c))
elif isinstance(c, unicode):
components.append(c.encode('utf8'))
elif isinstance(c, (list, tuple)):
items = [TR(TD(BEAUTIFY(item, **attributes)))
for item in c]
components.append(TABLE(*items, **attributes))
elif isinstance(c, cgi.FieldStorage):
components.append('FieldStorage object')
else:
components.append(repr(c))
self.components = components
class MENU(DIV):
"""
Used to build menus
Optional arguments
_class: defaults to 'web2py-menu web2py-menu-vertical'
ul_class: defaults to 'web2py-menu-vertical'
li_class: defaults to 'web2py-menu-expand'
li_first: defaults to 'web2py-menu-first'
li_last: defaults to 'web2py-menu-last'
Example:
menu = MENU([['name', False, URL(...), [submenu]], ...])
{{=menu}}
"""
tag = 'ul'
def __init__(self, data, **args):
self.data = data
self.attributes = args
self.components = []
if not '_class' in self.attributes:
self['_class'] = 'web2py-menu web2py-menu-vertical'
if not 'ul_class' in self.attributes:
self['ul_class'] = 'web2py-menu-vertical'
if not 'li_class' in self.attributes:
self['li_class'] = 'web2py-menu-expand'
if not 'li_first' in self.attributes:
self['li_first'] = 'web2py-menu-first'
if not 'li_last' in self.attributes:
self['li_last'] = 'web2py-menu-last'
if not 'li_active' in self.attributes:
self['li_active'] = 'web2py-menu-active'
if not 'mobile' in self.attributes:
self['mobile'] = False
def serialize(self, data, level=0):
if level == 0:
ul = UL(**self.attributes)
else:
ul = UL(_class=self['ul_class'])
for item in data:
if isinstance(item,LI):
ul.append(item)
else:
(name, active, link) = item[:3]
if isinstance(link, DIV):
li = LI(link)
elif 'no_link_url' in self.attributes and self['no_link_url'] == link:
li = LI(DIV(name))
elif isinstance(link,dict):
li = LI(A(name, **link))
elif link:
li = LI(A(name, _href=link))
elif not link and isinstance(name, A):
li = LI(name)
else:
li = LI(A(name, _href='#',
_onclick='javascript:void(0);return false;'))
if level == 0 and item == data[0]:
li['_class'] = self['li_first']
elif level == 0 and item == data[-1]:
li['_class'] = self['li_last']
if len(item) > 3 and item[3]:
li['_class'] = self['li_class']
li.append(self.serialize(item[3], level + 1))
if active or ('active_url' in self.attributes and self['active_url'] == link):
if li['_class']:
li['_class'] = li['_class'] + ' ' + self['li_active']
else:
li['_class'] = self['li_active']
if len(item) <= 4 or item[4] == True:
ul.append(li)
return ul
def serialize_mobile(self, data, select=None, prefix=''):
if not select:
select = SELECT(**self.attributes)
for item in data:
if len(item) <= 4 or item[4] == True:
select.append(OPTION(CAT(prefix, item[0]),
_value=item[2], _selected=item[1]))
if len(item) > 3 and len(item[3]):
self.serialize_mobile(
item[3], select, prefix=CAT(prefix, item[0], '/'))
select['_onchange'] = 'window.location=this.value'
return select
def xml(self):
if self['mobile']:
return self.serialize_mobile(self.data, 0).xml()
else:
return self.serialize(self.data, 0).xml()
def embed64(
filename=None,
file=None,
data=None,
extension='image/gif',
):
"""
helper to encode the provided (binary) data into base64.
:param filename: if provided, opens and reads this file in 'rb' mode
:param file: if provided, reads this file
:param data: if provided, uses the provided data
"""
if filename and os.path.exists(file):
fp = open(filename, 'rb')
data = fp.read()
fp.close()
data = base64.b64encode(data)
return 'data:%s;base64,%s' % (extension, data)
def test():
"""
Example:
>>> from validators import *
>>> print DIV(A('click me', _href=URL(a='a', c='b', f='c')), BR(), HR(), DIV(SPAN(\"World\"), _class='unknown')).xml()
<div><a href=\"/a/b/c\">click me</a><br /><hr /><div class=\"unknown\"><span>World</span></div></div>
>>> print DIV(UL(\"doc\",\"cat\",\"mouse\")).xml()
<div><ul><li>doc</li><li>cat</li><li>mouse</li></ul></div>
>>> print DIV(UL(\"doc\", LI(\"cat\", _class='feline'), 18)).xml()
<div><ul><li>doc</li><li class=\"feline\">cat</li><li>18</li></ul></div>
>>> print TABLE(['a', 'b', 'c'], TR('d', 'e', 'f'), TR(TD(1), TD(2), TD(3))).xml()
<table><tr><td>a</td><td>b</td><td>c</td></tr><tr><td>d</td><td>e</td><td>f</td></tr><tr><td>1</td><td>2</td><td>3</td></tr></table>
>>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_EXPR('int(value)<10')))
>>> print form.xml()
<form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" /></form>
>>> print form.accepts({'myvar':'34'}, formname=None)
False
>>> print form.xml()
<form action="#" enctype="multipart/form-data" method="post"><input class="invalidinput" name="myvar" type="text" value="34" /><div class="error_wrapper"><div class="error" id="myvar__error">invalid expression</div></div></form>
>>> print form.accepts({'myvar':'4'}, formname=None, keepvalues=True)
True
>>> print form.xml()
<form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" value=\"4\" /></form>
>>> form=FORM(SELECT('cat', 'dog', _name='myvar'))
>>> print form.accepts({'myvar':'dog'}, formname=None, keepvalues=True)
True
>>> print form.xml()
<form action=\"#\" enctype=\"multipart/form-data\" method=\"post\"><select name=\"myvar\"><option value=\"cat\">cat</option><option selected=\"selected\" value=\"dog\">dog</option></select></form>
>>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_MATCH('^\w+$', 'only alphanumeric!')))
>>> print form.accepts({'myvar':'as df'}, formname=None)
False
>>> print form.xml()
<form action="#" enctype="multipart/form-data" method="post"><input class="invalidinput" name="myvar" type="text" value="as df" /><div class="error_wrapper"><div class="error" id="myvar__error">only alphanumeric!</div></div></form>
>>> session={}
>>> form=FORM(INPUT(value=\"Hello World\", _name=\"var\", requires=IS_MATCH('^\w+$')))
>>> isinstance(form.as_dict(), dict)
True
>>> form.as_dict(flat=True).has_key("vars")
True
>>> isinstance(form.as_json(), basestring) and len(form.as_json(sanitize=False)) > 0
True
>>> if form.accepts({}, session,formname=None): print 'passed'
>>> if form.accepts({'var':'test ', '_formkey': session['_formkey[None]']}, session, formname=None): print 'passed'
"""
pass
class web2pyHTMLParser(HTMLParser):
"""
obj = web2pyHTMLParser(text) parses and html/xml text into web2py helpers.
obj.tree contains the root of the tree, and tree can be manipulated
>>> str(web2pyHTMLParser('hello<div a="b" c=3>wor<ld<span>xxx</span>y<script/>yy</div>zzz').tree)
'hello<div a="b" c="3">wor<ld<span>xxx</span>y<script></script>yy</div>zzz'
>>> str(web2pyHTMLParser('<div>a<span>b</div>c').tree)
'<div>a<span>b</span></div>c'
>>> tree = web2pyHTMLParser('hello<div a="b">world</div>').tree
>>> tree.element(_a='b')['_c']=5
>>> str(tree)
'hello<div a="b" c="5">world</div>'
"""
def __init__(self, text, closed=('input', 'link')):
HTMLParser.__init__(self)
self.tree = self.parent = TAG['']()
self.closed = closed
self.tags = [x for x in __all__ if isinstance(eval(x), DIV)]
self.last = None
self.feed(text)
def handle_starttag(self, tagname, attrs):
if tagname.upper() in self.tags:
tag = eval(tagname.upper())
else:
if tagname in self.closed:
tagname += '/'
tag = TAG[tagname]()
for key, value in attrs:
tag['_' + key] = value
tag.parent = self.parent
self.parent.append(tag)
if not tag.tag.endswith('/'):
self.parent = tag
else:
self.last = tag.tag[:-1]
def handle_data(self, data):
if not isinstance(data, unicode):
try:
data = data.decode('utf8')
except:
data = data.decode('latin1')
self.parent.append(data.encode('utf8', 'xmlcharref'))
def handle_charref(self, name):
if name.startswith('x'):
self.parent.append(unichr(int(name[1:], 16)).encode('utf8'))
else:
self.parent.append(unichr(int(name)).encode('utf8'))
def handle_entityref(self, name):
self.parent.append(entitydefs[name])
def handle_endtag(self, tagname):
# this deals with unbalanced tags
if tagname == self.last:
return
while True:
try:
parent_tagname = self.parent.tag
self.parent = self.parent.parent
except:
raise RuntimeError("unable to balance tag %s" % tagname)
if parent_tagname[:len(tagname)] == tagname: break
def markdown_serializer(text, tag=None, attr=None):
attr = attr or {}
if tag is None:
return re.sub('\s+', ' ', text)
if tag == 'br':
return '\n\n'
if tag == 'h1':
return '#' + text + '\n\n'
if tag == 'h2':
return '#' * 2 + text + '\n\n'
if tag == 'h3':
return '#' * 3 + text + '\n\n'
if tag == 'h4':
return '#' * 4 + text + '\n\n'
if tag == 'p':
return text + '\n\n'
if tag == 'b' or tag == 'strong':
return '**%s**' % text
if tag == 'em' or tag == 'i':
return '*%s*' % text
if tag == 'tt' or tag == 'code':
return '`%s`' % text
if tag == 'a':
return '[%s](%s)' % (text, attr.get('_href', ''))
if tag == 'img':
return '' % (attr.get('_alt', ''), attr.get('_src', ''))
return text
def markmin_serializer(text, tag=None, attr=None):
attr = attr or {}
# if tag is None: return re.sub('\s+',' ',text)
if tag == 'br':
return '\n\n'
if tag == 'h1':
return '# ' + text + '\n\n'
if tag == 'h2':
return '#' * 2 + ' ' + text + '\n\n'
if tag == 'h3':
return '#' * 3 + ' ' + text + '\n\n'
if tag == 'h4':
return '#' * 4 + ' ' + text + '\n\n'
if tag == 'p':
return text + '\n\n'
if tag == 'li':
return '\n- ' + text.replace('\n', ' ')
if tag == 'tr':
return text[3:].replace('\n', ' ') + '\n'
if tag in ['table', 'blockquote']:
return '\n-----\n' + text + '\n------\n'
if tag in ['td', 'th']:
return ' | ' + text
if tag in ['b', 'strong', 'label']:
return '**%s**' % text
if tag in ['em', 'i']:
return "''%s''" % text
if tag in ['tt']:
return '``%s``' % text.strip()
if tag in ['code']:
return '``\n%s``' % text
if tag == 'a':
return '[[%s %s]]' % (text, attr.get('_href', ''))
if tag == 'img':
return '[[%s %s left]]' % (attr.get('_alt', 'no title'), attr.get('_src', ''))
return text
class MARKMIN(XmlComponent):
"""
For documentation: http://web2py.com/examples/static/markmin.html
"""
def __init__(self, text, extra=None, allowed=None, sep='p',
url=None, environment=None, latex='google',
autolinks='default',
protolinks='default',
class_prefix='',
id_prefix='markmin_'):
self.text = text
self.extra = extra or {}
self.allowed = allowed or {}
self.sep = sep
self.url = URL if url == True else url
self.environment = environment
self.latex = latex
self.autolinks = autolinks
self.protolinks = protolinks
self.class_prefix = class_prefix
self.id_prefix = id_prefix
def xml(self):
"""
calls the gluon.contrib.markmin render function to convert the wiki syntax
"""
from contrib.markmin.markmin2html import render
return render(self.text, extra=self.extra,
allowed=self.allowed, sep=self.sep, latex=self.latex,
URL=self.url, environment=self.environment,
autolinks=self.autolinks, protolinks=self.protolinks,
class_prefix=self.class_prefix, id_prefix=self.id_prefix)
def __str__(self):
return self.xml()
def flatten(self, render=None):
"""
return the text stored by the MARKMIN object rendered by the render function
"""
return self.text
def elements(self, *args, **kargs):
"""
to be considered experimental since the behavior of this method is questionable
another options could be TAG(self.text).elements(*args,**kargs)
"""
return [self.text]
if __name__ == '__main__':
import doctest
doctest.testmod()
|
skonto/spark
|
refs/heads/master
|
python/pyspark/tests/test_util.py
|
20
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from py4j.protocol import Py4JJavaError
from pyspark import keyword_only
from pyspark.testing.utils import PySparkTestCase
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
def set(self, x=None, other=None, other_x=None):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
class UtilTests(PySparkTestCase):
def test_py4j_exception_message(self):
from pyspark.util import _exception_message
with self.assertRaises(Py4JJavaError) as context:
# This attempts java.lang.String(null) which throws an NPE.
self.sc._jvm.java.lang.String(None)
self.assertTrue('NullPointerException' in _exception_message(context.exception))
def test_parsing_version_string(self):
from pyspark.util import VersionUtils
self.assertRaises(ValueError, lambda: VersionUtils.majorMinorVersion("abced"))
if __name__ == "__main__":
from pyspark.tests.test_util import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
frew/simpleproto
|
refs/heads/master
|
scons-local-1.1.0/SCons/Tool/dmd.py
|
1
|
"""SCons.Tool.dmd
Tool-specific initialization for the Digital Mars D compiler.
(http://digitalmars.com/d)
Coded by Andy Friesen (andy@ikagames.com)
15 November 2003
There are a number of problems with this script at this point in time.
The one that irritates me the most is the Windows linker setup. The D
linker doesn't have a way to add lib paths on the commandline, as far
as I can see. You have to specify paths relative to the SConscript or
use absolute paths. To hack around it, add '#/blah'. This will link
blah.lib from the directory where SConstruct resides.
Compiler variables:
DC - The name of the D compiler to use. Defaults to dmd or gdmd,
whichever is found.
DPATH - List of paths to search for import modules.
DVERSIONS - List of version tags to enable when compiling.
DDEBUG - List of debug tags to enable when compiling.
Linker related variables:
LIBS - List of library files to link in.
DLINK - Name of the linker to use. Defaults to dmd or gdmd.
DLINKFLAGS - List of linker flags.
Lib tool variables:
DLIB - Name of the lib tool to use. Defaults to lib.
DLIBFLAGS - List of flags to pass to the lib tool.
LIBS - Same as for the linker. (libraries to pull into the .lib)
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dmd.py 3603 2008/10/10 05:46:45 scons"
import os
import string
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner.D
import SCons.Tool
# Adapted from c++.py
def isD(source):
if not source:
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext == '.d':
return 1
return 0
smart_link = {}
smart_lib = {}
def generate(env):
global smart_link
global smart_lib
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
DAction = SCons.Action.Action('$DCOM', '$DCOMSTR')
static_obj.add_action('.d', DAction)
shared_obj.add_action('.d', DAction)
static_obj.add_emitter('.d', SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter('.d', SCons.Defaults.SharedObjectEmitter)
dc = env.Detect(['dmd', 'gdmd'])
env['DC'] = dc
env['DCOM'] = '$DC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -c -of$TARGET $SOURCES'
env['_DINCFLAGS'] = '$( ${_concat(DINCPREFIX, DPATH, DINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['_DVERFLAGS'] = '$( ${_concat(DVERPREFIX, DVERSIONS, DVERSUFFIX, __env__)} $)'
env['_DDEBUGFLAGS'] = '$( ${_concat(DDEBUGPREFIX, DDEBUG, DDEBUGSUFFIX, __env__)} $)'
env['_DFLAGS'] = '$( ${_concat(DFLAGPREFIX, DFLAGS, DFLAGSUFFIX, __env__)} $)'
env['DPATH'] = ['#/']
env['DFLAGS'] = []
env['DVERSIONS'] = []
env['DDEBUG'] = []
if dc:
# Add the path to the standard library.
# This is merely for the convenience of the dependency scanner.
dmd_path = env.WhereIs(dc)
if dmd_path:
x = string.rindex(dmd_path, dc)
phobosDir = dmd_path[:x] + '/../src/phobos'
if os.path.isdir(phobosDir):
env.Append(DPATH = [phobosDir])
env['DINCPREFIX'] = '-I'
env['DINCSUFFIX'] = ''
env['DVERPREFIX'] = '-version='
env['DVERSUFFIX'] = ''
env['DDEBUGPREFIX'] = '-debug='
env['DDEBUGSUFFIX'] = ''
env['DFLAGPREFIX'] = '-'
env['DFLAGSUFFIX'] = ''
env['DFILESUFFIX'] = '.d'
# Need to use the Digital Mars linker/lib on windows.
# *nix can just use GNU link.
if env['PLATFORM'] == 'win32':
env['DLINK'] = '$DC'
env['DLINKCOM'] = '$DLINK -of$TARGET $SOURCES $DFLAGS $DLINKFLAGS $_DLINKLIBFLAGS'
env['DLIB'] = 'lib'
env['DLIBCOM'] = '$DLIB $_DLIBFLAGS -c $TARGET $SOURCES $_DLINKLIBFLAGS'
env['_DLINKLIBFLAGS'] = '$( ${_concat(DLIBLINKPREFIX, LIBS, DLIBLINKSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['_DLIBFLAGS'] = '$( ${_concat(DLIBFLAGPREFIX, DLIBFLAGS, DLIBFLAGSUFFIX, __env__)} $)'
env['DLINKFLAGS'] = []
env['DLIBLINKPREFIX'] = ''
env['DLIBLINKSUFFIX'] = '.lib'
env['DLIBFLAGPREFIX'] = '-'
env['DLIBFLAGSUFFIX'] = ''
env['DLINKFLAGPREFIX'] = '-'
env['DLINKFLAGSUFFIX'] = ''
SCons.Tool.createStaticLibBuilder(env)
# Basically, we hijack the link and ar builders with our own.
# these builders check for the presence of D source, and swap out
# the system's defaults for the Digital Mars tools. If there's no D
# source, then we silently return the previous settings.
linkcom = env.get('LINKCOM')
try:
env['SMART_LINKCOM'] = smart_link[linkcom]
except KeyError:
def _smartLink(source, target, env, for_signature,
defaultLinker=linkcom):
if isD(source):
# XXX I'm not sure how to add a $DLINKCOMSTR variable
# so that it works with this _smartLink() logic,
# and I don't have a D compiler/linker to try it out,
# so we'll leave it alone for now.
return '$DLINKCOM'
else:
return defaultLinker
env['SMART_LINKCOM'] = smart_link[linkcom] = _smartLink
arcom = env.get('ARCOM')
try:
env['SMART_ARCOM'] = smart_lib[arcom]
except KeyError:
def _smartLib(source, target, env, for_signature,
defaultLib=arcom):
if isD(source):
# XXX I'm not sure how to add a $DLIBCOMSTR variable
# so that it works with this _smartLib() logic, and
# I don't have a D compiler/archiver to try it out,
# so we'll leave it alone for now.
return '$DLIBCOM'
else:
return defaultLib
env['SMART_ARCOM'] = smart_lib[arcom] = _smartLib
# It is worth noting that the final space in these strings is
# absolutely pivotal. SCons sees these as actions and not generators
# if it is not there. (very bad)
env['ARCOM'] = '$SMART_ARCOM '
env['LINKCOM'] = '$SMART_LINKCOM '
else: # assuming linux
linkcom = env.get('LINKCOM')
try:
env['SMART_LINKCOM'] = smart_link[linkcom]
except KeyError:
def _smartLink(source, target, env, for_signature,
defaultLinker=linkcom, dc=dc):
if isD(source):
try:
libs = env['LIBS']
except KeyError:
libs = []
if 'phobos' not in libs:
if dc is 'dmd':
env.Append(LIBS = ['phobos'])
elif dc is 'gdmd':
env.Append(LIBS = ['gphobos'])
if 'pthread' not in libs:
env.Append(LIBS = ['pthread'])
if 'm' not in libs:
env.Append(LIBS = ['m'])
return defaultLinker
env['SMART_LINKCOM'] = smart_link[linkcom] = _smartLink
env['LINKCOM'] = '$SMART_LINKCOM '
def exists(env):
return env.Detect(['dmd', 'gdmd'])
|
jonathan-beard/edx-platform
|
refs/heads/master
|
lms/djangoapps/discussion_api/serializers.py
|
41
|
"""
Discussion API serializers
"""
from urllib import urlencode
from urlparse import urlunparse
from django.contrib.auth.models import User as DjangoUser
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from rest_framework import serializers
from discussion_api.permissions import (
NON_UPDATABLE_COMMENT_FIELDS,
NON_UPDATABLE_THREAD_FIELDS,
get_editable_fields,
)
from discussion_api.render import render_body
from django_comment_client.utils import is_comment_too_deep
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
Role,
)
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from lms.lib.comment_client.user import User as CommentClientUser
from lms.lib.comment_client.utils import CommentClientRequestError
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_names
from openedx.core.lib.api.fields import NonEmptyCharField
def get_context(course, request, thread=None):
"""
Returns a context appropriate for use with ThreadSerializer or
(if thread is provided) CommentSerializer.
"""
# TODO: cache staff_user_ids and ta_user_ids if we need to improve perf
staff_user_ids = {
user.id
for role in Role.objects.filter(
name__in=[FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR],
course_id=course.id
)
for user in role.users.all()
}
ta_user_ids = {
user.id
for role in Role.objects.filter(name=FORUM_ROLE_COMMUNITY_TA, course_id=course.id)
for user in role.users.all()
}
requester = request.user
cc_requester = CommentClientUser.from_django_user(requester).retrieve()
cc_requester["course_id"] = course.id
return {
"course": course,
"request": request,
"thread": thread,
# For now, the only groups are cohorts
"group_ids_to_names": get_cohort_names(course),
"is_requester_privileged": requester.id in staff_user_ids or requester.id in ta_user_ids,
"staff_user_ids": staff_user_ids,
"ta_user_ids": ta_user_ids,
"cc_requester": cc_requester,
}
class _ContentSerializer(serializers.Serializer):
"""A base class for thread and comment serializers."""
id_ = serializers.CharField(read_only=True)
author = serializers.SerializerMethodField("get_author")
author_label = serializers.SerializerMethodField("get_author_label")
created_at = serializers.CharField(read_only=True)
updated_at = serializers.CharField(read_only=True)
raw_body = NonEmptyCharField(source="body")
rendered_body = serializers.SerializerMethodField("get_rendered_body")
abuse_flagged = serializers.SerializerMethodField("get_abuse_flagged")
voted = serializers.SerializerMethodField("get_voted")
vote_count = serializers.SerializerMethodField("get_vote_count")
editable_fields = serializers.SerializerMethodField("get_editable_fields")
non_updatable_fields = set()
def __init__(self, *args, **kwargs):
super(_ContentSerializer, self).__init__(*args, **kwargs)
# id is an invalid class attribute name, so we must declare a different
# name above and modify it here
self.fields["id"] = self.fields.pop("id_")
for field in self.non_updatable_fields:
setattr(self, "validate_{}".format(field), self._validate_non_updatable)
def _validate_non_updatable(self, attrs, _source):
"""Ensure that a field is not edited in an update operation."""
if self.object:
raise ValidationError("This field is not allowed in an update.")
return attrs
def _is_user_privileged(self, user_id):
"""
Returns a boolean indicating whether the given user_id identifies a
privileged user.
"""
return user_id in self.context["staff_user_ids"] or user_id in self.context["ta_user_ids"]
def _is_anonymous(self, obj):
"""
Returns a boolean indicating whether the content should be anonymous to
the requester.
"""
return (
obj["anonymous"] or
obj["anonymous_to_peers"] and not self.context["is_requester_privileged"]
)
def get_author(self, obj):
"""Returns the author's username, or None if the content is anonymous."""
return None if self._is_anonymous(obj) else obj["username"]
def _get_user_label(self, user_id):
"""
Returns the role label (i.e. "staff" or "community_ta") for the user
with the given id.
"""
return (
"staff" if user_id in self.context["staff_user_ids"] else
"community_ta" if user_id in self.context["ta_user_ids"] else
None
)
def get_author_label(self, obj):
"""Returns the role label for the content author."""
return None if self._is_anonymous(obj) else self._get_user_label(int(obj["user_id"]))
def get_rendered_body(self, obj):
"""Returns the rendered body content."""
return render_body(obj["body"])
def get_abuse_flagged(self, obj):
"""
Returns a boolean indicating whether the requester has flagged the
content as abusive.
"""
return self.context["cc_requester"]["id"] in obj["abuse_flaggers"]
def get_voted(self, obj):
"""
Returns a boolean indicating whether the requester has voted for the
content.
"""
return obj["id"] in self.context["cc_requester"]["upvoted_ids"]
def get_vote_count(self, obj):
"""Returns the number of votes for the content."""
return obj["votes"]["up_count"]
def get_editable_fields(self, obj):
"""Return the list of the fields the requester can edit"""
return sorted(get_editable_fields(obj, self.context))
class ThreadSerializer(_ContentSerializer):
"""
A serializer for thread data.
N.B. This should not be used with a comment_client Thread object that has
not had retrieve() called, because of the interaction between DRF's attempts
at introspection and Thread's __getattr__.
"""
course_id = serializers.CharField()
topic_id = NonEmptyCharField(source="commentable_id")
group_id = serializers.IntegerField(required=False)
group_name = serializers.SerializerMethodField("get_group_name")
type_ = serializers.ChoiceField(
source="thread_type",
choices=[(val, val) for val in ["discussion", "question"]]
)
title = NonEmptyCharField()
pinned = serializers.BooleanField(read_only=True)
closed = serializers.BooleanField(read_only=True)
following = serializers.SerializerMethodField("get_following")
comment_count = serializers.IntegerField(source="comments_count", read_only=True)
unread_comment_count = serializers.IntegerField(source="unread_comments_count", read_only=True)
comment_list_url = serializers.SerializerMethodField("get_comment_list_url")
endorsed_comment_list_url = serializers.SerializerMethodField("get_endorsed_comment_list_url")
non_endorsed_comment_list_url = serializers.SerializerMethodField("get_non_endorsed_comment_list_url")
read = serializers.BooleanField(read_only=True)
has_endorsed = serializers.BooleanField(read_only=True, source="endorsed")
non_updatable_fields = NON_UPDATABLE_THREAD_FIELDS
def __init__(self, *args, **kwargs):
super(ThreadSerializer, self).__init__(*args, **kwargs)
# type is an invalid class attribute name, so we must declare a
# different name above and modify it here
self.fields["type"] = self.fields.pop("type_")
# Compensate for the fact that some threads in the comments service do
# not have the pinned field set
if self.object and self.object.get("pinned") is None:
self.object["pinned"] = False
def get_group_name(self, obj):
"""Returns the name of the group identified by the thread's group_id."""
return self.context["group_ids_to_names"].get(obj["group_id"])
def get_following(self, obj):
"""
Returns a boolean indicating whether the requester is following the
thread.
"""
return obj["id"] in self.context["cc_requester"]["subscribed_thread_ids"]
def get_comment_list_url(self, obj, endorsed=None):
"""
Returns the URL to retrieve the thread's comments, optionally including
the endorsed query parameter.
"""
if (
(obj["thread_type"] == "question" and endorsed is None) or
(obj["thread_type"] == "discussion" and endorsed is not None)
):
return None
path = reverse("comment-list")
query_dict = {"thread_id": obj["id"]}
if endorsed is not None:
query_dict["endorsed"] = endorsed
return self.context["request"].build_absolute_uri(
urlunparse(("", "", path, "", urlencode(query_dict), ""))
)
def get_endorsed_comment_list_url(self, obj):
"""Returns the URL to retrieve the thread's endorsed comments."""
return self.get_comment_list_url(obj, endorsed=True)
def get_non_endorsed_comment_list_url(self, obj):
"""Returns the URL to retrieve the thread's non-endorsed comments."""
return self.get_comment_list_url(obj, endorsed=False)
def restore_object(self, attrs, instance=None):
if instance:
for key, val in attrs.items():
instance[key] = val
return instance
else:
return Thread(user_id=self.context["cc_requester"]["id"], **attrs)
class CommentSerializer(_ContentSerializer):
"""
A serializer for comment data.
N.B. This should not be used with a comment_client Comment object that has
not had retrieve() called, because of the interaction between DRF's attempts
at introspection and Comment's __getattr__.
"""
thread_id = serializers.CharField()
parent_id = serializers.CharField(required=False)
endorsed = serializers.BooleanField(required=False)
endorsed_by = serializers.SerializerMethodField("get_endorsed_by")
endorsed_by_label = serializers.SerializerMethodField("get_endorsed_by_label")
endorsed_at = serializers.SerializerMethodField("get_endorsed_at")
children = serializers.SerializerMethodField("get_children")
non_updatable_fields = NON_UPDATABLE_COMMENT_FIELDS
def get_endorsed_by(self, obj):
"""
Returns the username of the endorsing user, if the information is
available and would not identify the author of an anonymous thread.
"""
endorsement = obj.get("endorsement")
if endorsement:
endorser_id = int(endorsement["user_id"])
# Avoid revealing the identity of an anonymous non-staff question
# author who has endorsed a comment in the thread
if not (
self._is_anonymous(self.context["thread"]) and
not self._is_user_privileged(endorser_id)
):
return DjangoUser.objects.get(id=endorser_id).username
return None
def get_endorsed_by_label(self, obj):
"""
Returns the role label (i.e. "staff" or "community_ta") for the
endorsing user
"""
endorsement = obj.get("endorsement")
if endorsement:
return self._get_user_label(int(endorsement["user_id"]))
else:
return None
def get_endorsed_at(self, obj):
"""Returns the timestamp for the endorsement, if available."""
endorsement = obj.get("endorsement")
return endorsement["time"] if endorsement else None
def get_children(self, obj):
return [
CommentSerializer(child, context=self.context).data
for child in obj.get("children", [])
]
def validate(self, attrs):
"""
Ensure that parent_id identifies a comment that is actually in the
thread identified by thread_id and does not violate the configured
maximum depth.
"""
parent = None
parent_id = attrs.get("parent_id")
if parent_id:
try:
parent = Comment(id=parent_id).retrieve()
except CommentClientRequestError:
pass
if not (parent and parent["thread_id"] == attrs["thread_id"]):
raise ValidationError(
"parent_id does not identify a comment in the thread identified by thread_id."
)
if is_comment_too_deep(parent):
raise ValidationError({"parent_id": ["Comment level is too deep."]})
return attrs
def restore_object(self, attrs, instance=None):
if instance:
for key, val in attrs.items():
instance[key] = val
# TODO: The comments service doesn't populate the endorsement
# field on comment creation, so we only provide
# endorsement_user_id on update
if key == "endorsed":
instance["endorsement_user_id"] = self.context["cc_requester"]["id"]
return instance
return Comment(
course_id=self.context["thread"]["course_id"],
user_id=self.context["cc_requester"]["id"],
**attrs
)
|
nesdis/djongo
|
refs/heads/master
|
tests/django_tests/tests/v22/tests/inline_formsets/models.py
|
133
|
from django.db import models
class School(models.Model):
name = models.CharField(max_length=100)
class Parent(models.Model):
name = models.CharField(max_length=100)
class Child(models.Model):
mother = models.ForeignKey(Parent, models.CASCADE, related_name='mothers_children')
father = models.ForeignKey(Parent, models.CASCADE, related_name='fathers_children')
school = models.ForeignKey(School, models.CASCADE)
name = models.CharField(max_length=100)
class Poet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Poem(models.Model):
poet = models.ForeignKey(Poet, models.CASCADE)
name = models.CharField(max_length=100)
class Meta:
unique_together = ('poet', 'name')
def __str__(self):
return self.name
|
collex100/odoo
|
refs/heads/8.0
|
addons/website_forum/__init__.py
|
363
|
# -*- coding: utf-8 -*-
import controllers
import models
import tests
|
praveendath92/fitgoal
|
refs/heads/master
|
migrations/env.py
|
557
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
rooshilp/CMPUT410Lab6
|
refs/heads/master
|
virt_env/virt1/lib/python2.7/site-packages/django/test/simple.py
|
51
|
"""
This module is pending deprecation as of Django 1.6 and will be removed in
version 1.8.
"""
from importlib import import_module
import json
import re
import unittest as real_unittest
import warnings
from django.apps import apps
from django.test import _doctest as doctest
from django.test import runner
from django.test.utils import compare_xml, strip_quotes
# django.utils.unittest is deprecated, but so is django.test.simple,
# and the latter will be removed before the former.
from django.utils import unittest
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner',)
warnings.warn(
"The django.test.simple module and DjangoTestSuiteRunner are deprecated; "
"use django.test.runner.DiscoverRunner instead.",
RemovedInDjango18Warning)
# The module name for tests outside models.py
TEST_MODULE = 'tests'
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)",
lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"""
The entry method for doctest output checking. Defers to a sequence of
child checkers
"""
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"""
The default comparator provided by doctest - not perfect, but good for
most purposes
"""
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
try:
return compare_xml(want, got)
except Exception:
return False
def check_output_json(self, want, got, optionsflags):
"""
Tries to compare want and got as if they were JSON-encoded data
"""
want, got = strip_quotes(want, got)
try:
want_json = json.loads(want)
got_json = json.loads(got)
except Exception:
return False
return want_json == got_json
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
doctestOutputChecker = OutputChecker()
def get_tests(app_config):
try:
test_module = import_module('%s.%s' % (app_config.name, TEST_MODULE))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
if not module_has_submodule(app_config.module, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def make_doctest(module):
return doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
def build_suite(app_config):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
models_module = app_config.models_module
if models_module:
if hasattr(models_module, 'suite'):
suite.addTest(models_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
models_module))
try:
suite.addTest(make_doctest(models_module))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
tests_module = get_tests(app_config)
if tests_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(tests_module, 'suite'):
suite.addTest(tests_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
tests_module))
try:
suite.addTest(make_doctest(tests_module))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form app_label.TestClass or app_label.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
app_config = apps.get_app_config(parts[0])
models_module = app_config.models_module
tests_module = get_tests(app_config)
test_modules = []
if models_module:
test_modules.append(models_module)
if tests_module:
test_modules.append(tests_module)
TestClass = None
for module in test_modules:
TestClass = getattr(module, parts[1], None)
if TestClass is not None:
break
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in test_modules:
try:
doctests = make_doctest(module)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
class DjangoTestSuiteRunner(runner.DiscoverRunner):
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app_config = apps.get_app_config(label)
suite.addTest(build_suite(app_config))
else:
for app_config in apps.get_app_configs():
suite.addTest(build_suite(app_config))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return runner.reorder_suite(suite, (unittest.TestCase,))
|
yigitguler/django
|
refs/heads/master
|
django/contrib/gis/geos/prototypes/errcheck.py
|
48
|
"""
Error checking functions for GEOS ctypes prototype functions.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# Getting the `free` routine used to free the memory allocated for
# string pointers returned by GEOS.
free = GEOSFunc('GEOSFree')
free.argtypes = [c_void_p]
free.restype = None
### ctypes error checking routines ###
def last_arg_byref(args):
"Returns the last C argument's value by reference."
return args[-1]._obj.value
def check_dbl(result, func, cargs):
"Checks the status code and returns the double value passed in by reference."
# Checking the status code
if result != 1:
return None
# Double passed in by reference, return its value.
return last_arg_byref(cargs)
def check_geom(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__)
return result
def check_minus_one(result, func, cargs):
"Error checking on routines that should not return -1."
if result == -1:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
def check_predicate(result, func, cargs):
"Error checking for unary/binary predicate functions."
val = ord(result) # getting the ordinal from the character
if val == 1:
return True
elif val == 0:
return False
else:
raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__)
def check_sized_string(result, func, cargs):
"""
Error checking for routines that return explicitly sized strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__)
# A c_size_t object is passed in by reference for the second
# argument on these routines, and its needed to determine the
# correct size.
s = string_at(result, last_arg_byref(cargs))
# Freeing the memory allocated within GEOS
free(result)
return s
def check_string(result, func, cargs):
"""
Error checking for routines that return strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__)
# Getting the string value at the pointer address.
s = string_at(result)
# Freeing the memory allocated within GEOS
free(result)
return s
def check_zero(result, func, cargs):
"Error checking on routines that should not return 0."
if result == 0:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
|
boyers/hamlit
|
refs/heads/master
|
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
|
899
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
sssemil/cjdns
|
refs/heads/socket
|
node_build/dependencies/libuv/build/gyp/test/multiple-targets/gyptest-default.py
|
102
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('multiple.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('multiple.gyp', chdir='relocate/src')
expect1 = """\
hello from prog1.c
hello from common.c
"""
expect2 = """\
hello from prog2.c
hello from common.c
"""
test.run_built_executable('prog1', stdout=expect1, chdir='relocate/src')
test.run_built_executable('prog2', stdout=expect2, chdir='relocate/src')
test.pass_test()
|
RCMRD/geonode
|
refs/heads/master
|
geonode/contrib/geosites/site_template/local_settings_template.py
|
21
|
# flake8: noqa
# -*- coding: utf-8 -*-
###############################################
# Geosite local settings
###############################################
import os
# Outside URL
SITEURL = 'http://$DOMAIN'
# databases unique to site if not defined in site settings
"""
SITE_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, '../development.db'),
},
}
"""
|
kefatong/ops
|
refs/heads/master
|
app/main/ansible_tasks.py
|
1
|
# coding:utf8
import re
import os
import stat
import json
import Queue
import hashlib
import ansible
import ansible.runner
import ansible.playbook
from ansible import callbacks
from ansible import utils
from flask import flash, redirect, url_for
def command_runner(user, command, inventory, view):
if re.findall('rm', command) or re.findall('mv', command):
flash(u'内容包含删除了移动命令')
return redirect(url_for(view))
res = ansible.runner.Runner(
module_name='shell', # 调用shell模块,这个代码是为了示例执行shell命令
module_args=command, # shell命令
remote_user=user,
host_list=inventory,
pattern='all',
private_key_file='/root/.ssh/id_rsa'
).run()
return res
def playbook_runner(playbook, inventory):
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
res = ansible.playbook.PlayBook(
playbook=playbook,
stats=stats,
callbacks=playbook_cb,
runner_callbacks=runner_cb,
host_list=inventory,
).run()
return res
def task_runner(taskList, inventory):
task_res = []
for task in taskList:
res = playbook_runner(task['path'], inventory)
task_res.append({
'name' : task['name'],
'res' : res,
})
if not task_res:
return False
return task_res
def GenerateInventory(current_app, devices=None):
if not devices:
return None
app = current_app._get_current_object()
FLASK_TMP_HOME = app.config['FLASK_TMP_HOME']
print FLASK_TMP_HOME
if not os.path.exists(FLASK_TMP_HOME):
os.mkdir(FLASK_TMP_HOME)
Inventory_devices = {
"devices": {
'hosts': [],
},
}
print Inventory_devices
for device in devices:
if device.ip is None:
flash(u'设备{0}IP地址未设置.'.format(device.hostname))
return None
Inventory_devices['devices']['hosts'].append(device.ip)
if len(Inventory_devices['devices']['hosts']) < 1:
return None
Inventory_devices = json.dumps(Inventory_devices)
print Inventory_devices
md5 = hashlib.md5(Inventory_devices)
print md5.hexdigest()
json_devices = '''#!/usr/bin/env python\n# encoding: utf-8\nimport json\ndevices = json.dumps({0})\nprint devices\n'''.format(
Inventory_devices)
Inventory_devices_file = FLASK_TMP_HOME + '/tasks/{0}'.format(str(md5.hexdigest()))
with open(Inventory_devices_file, 'w') as f:
f.write(json_devices)
os.chmod(Inventory_devices_file, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
return Inventory_devices_file
|
abourget/gevent-socketio
|
refs/heads/master
|
examples/pyramid_backbone_redis_chat_persistence/setup.py
|
11
|
import os
import sys
from setuptools import setup, find_packages, Command
here = os.path.abspath(os.path.dirname(__file__))
def _read(path):
with open(path) as f:
data = f.read()
f.close()
return data
README = ''
CHANGES = ''
requires = ['pyramid', 'gevent', 'gevent-socketio', 'sqlalchemy', 'redis', 'gunicorn']
if sys.version_info[:3] < (2, 5, 0):
requires.append('pysqlite')
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
errno = subprocess.call('py.test')
raise SystemExit(errno)
setup(name='chatter4',
version='0.0',
description='chatter4',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='chatter4',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = chatter4:main
""",
paster_plugins=['pyramid'],
)
|
bloomark/python-dogecoinlib
|
refs/heads/master
|
dogecoin/tests/test_key.py
|
1
|
# Copyright (C) 2013-2014 The python-dogecoinlib developers
#
# This file is part of python-dogecoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-dogecoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from dogecoin.core.key import *
from dogecoin.core import x, b2x
class Test_CPubKey(unittest.TestCase):
def test(self):
def T(hex_pubkey, is_valid, is_fullyvalid, is_compressed):
key = CPubKey(x(hex_pubkey))
self.assertEqual(key.is_valid, is_valid)
self.assertEqual(key.is_fullyvalid, is_fullyvalid)
self.assertEqual(key.is_compressed, is_compressed)
T('', False, False, False)
T('00', True, True, False) # why is this valid?
T('01', True, False, False)
T('02', True, False, False)
T('0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71',
True, True, True)
T('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71',
True, False, True)
T('0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71',
True, True, True)
T('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455',
True, True, False)
|
daviddrysdale/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/data/region_IS.py
|
1
|
"""Auto-generated file, do not edit by hand. IS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IS = PhoneMetadata(id='IS', country_code=354, international_prefix='00|1(?:0(?:01|[12]0)|100)',
general_desc=PhoneNumberDesc(national_number_pattern='(?:38\\d|[4-9])\\d{6}', possible_length=(7, 9)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:4(?:1[0-24-69]|2[0-7]|[37][0-8]|4[0-245]|5[0-68]|6\\d|8[0-36-8])|5(?:05|[156]\\d|2[02578]|3[0-579]|4[03-7]|7[0-2578]|8[0-35-9]|9[013-689])|872)\\d{4}', example_number='4101234', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:38[589]\\d\\d|6(?:1[1-8]|2[0-6]|3[027-9]|4[014679]|5[0159]|6[0-69]|70|8[06-8]|9\\d)|7(?:5[057]|[6-9]\\d)|8(?:2[0-59]|[3-69]\\d|8[28]))\\d{4}', example_number='6111234', possible_length=(7, 9)),
toll_free=PhoneNumberDesc(national_number_pattern='80[08]\\d{4}', example_number='8001234', possible_length=(7,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90(?:0\\d|1[5-79]|2[015-79]|3[135-79]|4[125-7]|5[25-79]|7[1-37]|8[0-35-7])\\d{3}', example_number='9001234', possible_length=(7,)),
voip=PhoneNumberDesc(national_number_pattern='49[0-24-79]\\d{4}', example_number='4921234', possible_length=(7,)),
uan=PhoneNumberDesc(national_number_pattern='809\\d{4}', example_number='8091234', possible_length=(7,)),
voicemail=PhoneNumberDesc(national_number_pattern='(?:689|8(?:7[18]|80)|95[48])\\d{4}', example_number='6891234', possible_length=(7,)),
preferred_international_prefix='00',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[4-9]']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['3'])],
mobile_number_portable_region=True)
|
the-fascinator/fascinator-portal
|
refs/heads/master
|
src/main/config/portal/default/default/scripts/settings.py
|
1
|
from com.googlecode.fascinator.api import PluginManager
from com.googlecode.fascinator.common import JsonSimpleConfig
from com.googlecode.fascinator.portal import Portal
from java.io import File
from java.util import HashMap
class SettingsData:
def __init__(self):
self.sysConfig = JsonSimpleConfig()
def __activate__(self, context):
self.velocityContext = context
self.__portal = None
action = self.vc("formData").get("verb")
if action == "create_view":
fq = [q for q in self.vc("sessionState").get("fq") if q != 'item_type:"object"']
if fq == []:
name = "new"
desc = "New View"
query = ""
else:
name = ""
desc = ""
query = str(" ".join(fq))
newPortal = Portal(name, desc, query)
newPortal.setFacetFields(Services.portalManager.default.facetFields)
newPortal.setQuery(query)
self.__portal = newPortal
else:
portalName = self.vc("formData").get("portalName")
print " * settings.py: portalName=%s, portalId=%s" % (portalName, self.vc("portalId"))
if portalName is None or (self.vc("formData").get("portalAction") == "Cancel"):
self.__portal = Services.portalManager.get(self.vc("portalId"))
else:
self.__portal = Portal()
self.__portal.name = portalName
Services.portalManager.add(self.__portal)
if self.vc("formData").get("portalAction") == "Update":
self.__updatePortal()
if self.vc("formData").get("emailAction") == "Update":
self.__updateEmail()
# Get from velocity context
def vc(self, index):
if self.velocityContext[index] is not None:
return self.velocityContext[index]
else:
log.error("ERROR: Requested context entry '" + index + "' doesn't exist")
return None
def isSelected(self, category):
selected = self.vc("sessionState").get("settingsCategory")
if category == selected:
return "selected"
return ""
def __updatePortal(self):
self.__portal.name = self.vc("formData").get("portalName")
self.__portal.description = self.vc("formData").get("portalDescription")
self.__portal.query = self.vc("formData").get("portalQuery")
self.__portal.recordsPerPage = int(self.vc("formData").get("portalRecordsPerPage"))
self.__portal.facetCount = int(self.vc("formData").get("portalFacetLimit"))
self.__portal.facetSort = self.vc("formData").get("portalFacetSort") is not None
facetFields = self.__portal.facetFields
facetFields.clear()
size = int(self.vc("formData").get("portalFacetSize"))
for i in range(1,size+2):
nameKey = "portalFacet_%s_name" % i
labelKey = "portalFacet_%s_label" % i
name = self.vc("formData").get(nameKey)
label = self.vc("formData").get(labelKey)
print "facet: key: %s, label: %s" % (name, label)
if name is not None and label is not None:
facetFields.put(name, label)
self.__portal.setFacetFields(facetFields)
sortFields = self.__portal.sortFields
sortFields.clear()
size = int(self.vc("formData").get("portalSortSize"))
for i in range(1,size+2):
nameKey = "portalSort_%s_name" % i
labelKey = "portalSort_%s_label" % i
name = self.vc("formData").get(nameKey)
label = self.vc("formData").get(labelKey)
print "sort: key: %s, label: %s" % (name, label)
if name is not None and label is not None:
sortFields.put(name, label)
self.__portal.setSortFields(sortFields)
Services.portalManager.save(self.__portal)
def getPortal(self):
return self.__portal
def getIndexerPlugins(self):
return PluginManager.getIndexerPlugins()
def getStoragePlugins(self):
return PluginManager.getStoragePlugins()
def getHarvesterPlugins(self):
return PluginManager.getHarvesterPlugins()
def getTransformerPlugins(self):
return PluginManager.getTransformerPlugins()
def getWatcherConfig(self):
watcherPath = self.sysConfig.getString("${fascinator.home}/watcher)", ["watcher", "path"])
configFile = File("%s/app/config.json" % watcherPath)
if configFile.exists():
return JsonSimpleConfig(configFile)
return None
def getEmail(self):
return self.sysConfig.getString(None, ["email"])
def getTimeout(self):
return self.sysConfig.getString(None, ["portal", "houseKeeping", "config", "frequency"])
def getFacetDisplays(self):
facetDisplays = self.__portal.getObject(["portal", "facet-displays"])
if facetDisplays is None or facetDisplays.isEmpty():
facetDisplays = HashMap()
facetDisplays.put("list", "List menu")
facetDisplays.put("tree", "Dynamic tree")
return facetDisplays
|
nolanliou/tensorflow
|
refs/heads/master
|
tensorflow/contrib/layers/python/layers/target_column.py
|
125
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TargetColumn abstract a single head in the model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def regression_target(label_name=None,
weight_column_name=None,
label_dimension=1):
"""Creates a _TargetColumn for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: dimension of the target for multilabels.
Returns:
An instance of _TargetColumn
"""
return _RegressionTargetColumn(
loss_fn=_mean_squared_loss,
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension)
# TODO(zakaria): Add logistic_regression_target
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def multi_class_target(n_classes, label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for multi class single label classification.
The target column uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _MultiClassTargetColumn.
Raises:
ValueError: if n_classes is < 2
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
else:
loss_fn = _softmax_cross_entropy_loss
return _MultiClassTargetColumn(
loss_fn=loss_fn,
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name)
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def binary_svm_target(label_name=None, weight_column_name=None):
"""Creates a _TargetColumn for binary classification with SVMs.
The target column uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Returns:
An instance of _TargetColumn.
"""
return _BinarySvmTargetColumn(
label_name=label_name, weight_column_name=weight_column_name)
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
class ProblemType(object):
UNSPECIFIED = 0
CLASSIFICATION = 1
LINEAR_REGRESSION = 2
LOGISTIC_REGRESSION = 3
class _TargetColumn(object):
"""_TargetColumn is the abstraction for a single head in a model.
Args:
loss_fn: a function that returns the loss tensor.
num_label_columns: Integer, number of label columns.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
Raises:
ValueError: if loss_fn or n_classes are missing.
"""
def __init__(self, loss_fn, num_label_columns, label_name, weight_column_name,
problem_type):
if not loss_fn:
raise ValueError("loss_fn must be provided")
if num_label_columns is None: # n_classes can be 0
raise ValueError("num_label_columns must be provided")
self._loss_fn = loss_fn
self._num_label_columns = num_label_columns
self._label_name = label_name
self._weight_column_name = weight_column_name
self._problem_type = problem_type
def logits_to_predictions(self, logits, proba=False):
# Abstrat, Subclasses must implement.
raise NotImplementedError()
def get_eval_ops(self, features, logits, labels, metrics=None):
"""Returns eval op."""
raise NotImplementedError
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def num_label_columns(self):
return self._num_label_columns
def get_weight_tensor(self, features):
if not self._weight_column_name:
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]), shape=(-1,))
@property
def problem_type(self):
return self._problem_type
def _weighted_loss(self, loss, weight_tensor):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.multiply(unweighted_loss,
array_ops.reshape(
weight_tensor, shape=(-1,)))
return weighted_loss
def training_loss(self, logits, target, features, name="training_loss"):
"""Returns training loss tensor for this head.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
name: Op name.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name=name)
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.reduce_mean(loss_weighted, name=name)
def loss(self, logits, target, features):
"""Returns loss tensor for this head.
The loss returned is the weighted average.
L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.div(math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
class _RegressionTargetColumn(_TargetColumn):
"""_TargetColumn for regression."""
def __init__(self, loss_fn, label_name, weight_column_name, label_dimension):
super(_RegressionTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=label_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.LINEAR_REGRESSION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
return array_ops.squeeze(logits, squeeze_dims=[1])
return logits
def get_eval_ops(self, features, logits, labels, metrics=None):
loss = self.loss(logits, labels, features)
result = {"loss": metric_ops.streaming_mean(loss)}
if metrics:
predictions = self.logits_to_predictions(logits, proba=False)
result.update(
_run_metrics(predictions, labels, metrics,
self.get_weight_tensor(features)))
return result
class _MultiClassTargetColumn(_TargetColumn):
"""_TargetColumn for classification."""
# TODO(zakaria): support multilabel.
def __init__(self, loss_fn, n_classes, label_name, weight_column_name):
if n_classes < 2:
raise ValueError("n_classes must be >= 2")
super(_MultiClassTargetColumn, self).__init__(
loss_fn=loss_fn,
num_label_columns=1 if n_classes == 2 else n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
problem_type=ProblemType.CLASSIFICATION)
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
if proba:
return nn.softmax(logits)
else:
return math_ops.argmax(logits, 1)
def _default_eval_metrics(self):
if self._num_label_columns == 1:
return get_default_binary_metrics_for_eval(thresholds=[.5])
return {}
def get_eval_ops(self, features, logits, labels, metrics=None):
loss = self.loss(logits, labels, features)
result = {"loss": metric_ops.streaming_mean(loss)}
# Adds default metrics.
if metrics is None:
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}
predictions = math_ops.sigmoid(logits)
labels_float = math_ops.to_float(labels)
default_metrics = self._default_eval_metrics()
for metric_name, metric_op in default_metrics.items():
result[metric_name] = metric_op(predictions, labels_float)
class_metrics = {}
proba_metrics = {}
for name, metric_op in six.iteritems(metrics):
if isinstance(name, tuple):
if len(name) != 2:
raise ValueError("Ignoring metric {}. It returned a tuple with "
"len {}, expected 2.".format(name, len(name)))
else:
if name[1] not in ["classes", "probabilities"]:
raise ValueError("Ignoring metric {}. The 2nd element of its "
"name should be either 'classes' or "
"'probabilities'.".format(name))
elif name[1] == "classes":
class_metrics[name[0]] = metric_op
else:
proba_metrics[name[0]] = metric_op
elif isinstance(name, str):
class_metrics[name] = metric_op
else:
raise ValueError("Ignoring metric {}. Its name is not in the correct "
"form.".format(name))
if class_metrics:
class_predictions = self.logits_to_predictions(logits, proba=False)
result.update(
_run_metrics(class_predictions, labels, class_metrics,
self.get_weight_tensor(features)))
if proba_metrics:
predictions = self.logits_to_predictions(logits, proba=True)
result.update(
_run_metrics(predictions, labels, proba_metrics,
self.get_weight_tensor(features)))
return result
class _BinarySvmTargetColumn(_MultiClassTargetColumn):
"""_TargetColumn for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name):
def loss_fn(logits, target):
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
["target's shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(
target, shape=[array_ops.shape(target)[0], 1])
return loss_ops.hinge_loss(logits, target)
super(_BinarySvmTargetColumn, self).__init__(
loss_fn=loss_fn,
n_classes=2,
label_name=label_name,
weight_column_name=weight_column_name)
def logits_to_predictions(self, logits, proba=False):
if proba:
raise ValueError(
"logits to probabilities is not supported for _BinarySvmTargetColumn")
logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
return math_ops.argmax(logits, 1)
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(logits, target):
# To prevent broadcasting inside "-".
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.square(logits - math_ops.to_float(target))
def _log_loss_with_two_classes(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(target), logits=logits)
return loss_vec
def _softmax_cross_entropy_loss(logits, target):
# Check that we got integer for classification.
if not target.dtype.is_integer:
raise ValueError("Target's dtype should be integer "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(
labels=target, logits=logits)
return loss_vec
def _run_metrics(predictions, labels, metrics, weights):
result = {}
labels = math_ops.cast(labels, predictions.dtype)
for name, metric in six.iteritems(metrics or {}):
if weights is not None:
result[name] = metric(predictions, labels, weights=weights)
else:
result[name] = metric(predictions, labels)
return result
@deprecated(
"2016-11-12", "This file will be removed after the deprecation date."
"Please switch to "
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
def get_default_binary_metrics_for_eval(thresholds):
"""Returns a dictionary of basic metrics for logistic regression.
Args:
thresholds: List of floating point thresholds to use for accuracy,
precision, and recall metrics. If None, defaults to [0.5].
Returns:
Dictionary mapping metrics string names to metrics functions.
"""
metrics = {}
metrics[_MetricKeys.PREDICTION_MEAN] = _predictions_streaming_mean
metrics[_MetricKeys.TARGET_MEAN] = _labels_streaming_mean
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_MetricKeys.ACCURACY_BASELINE] = _labels_streaming_mean
metrics[_MetricKeys.AUC] = _streaming_auc
for threshold in thresholds:
metrics[_MetricKeys.ACCURACY_MEAN %
threshold] = _accuracy_at_threshold(threshold)
# Precision for positive examples.
metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_at_threshold(
metric_ops.streaming_precision_at_thresholds, threshold)
# Recall for positive examples.
metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_at_threshold(
metric_ops.streaming_recall_at_thresholds, threshold)
return metrics
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.to_float(weights)
def _labels_streaming_mean(unused_predictions, labels, weights=None):
return metric_ops.streaming_mean(labels, weights=weights)
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
return metric_ops.streaming_mean(predictions, weights=weights)
def _streaming_auc(predictions, labels, weights=None):
return metric_ops.streaming_auc(
predictions, labels, weights=_float_weights_or_none(weights))
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metric_ops.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions,
labels=labels,
thresholds=[threshold],
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), update_op
return _streaming_metrics
class _MetricKeys(object):
AUC = "auc"
PREDICTION_MEAN = "labels/prediction_mean"
TARGET_MEAN = "labels/actual_target_mean"
ACCURACY_BASELINE = "accuracy/baseline_target_mean"
ACCURACY_MEAN = "accuracy/threshold_%f_mean"
PRECISION_MEAN = "precision/positive_threshold_%f_mean"
RECALL_MEAN = "recall/positive_threshold_%f_mean"
|
jsoref/django
|
refs/heads/master
|
django/db/models/__init__.py
|
66
|
from functools import wraps
from django.core.exceptions import ObjectDoesNotExist # NOQA
from django.db.models import signals # NOQA
from django.db.models.aggregates import * # NOQA
from django.db.models.deletion import ( # NOQA
CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError,
)
from django.db.models.expressions import ( # NOQA
F, Case, Expression, ExpressionWrapper, Func, Value, When,
)
from django.db.models.fields import * # NOQA
from django.db.models.fields.files import FileField, ImageField # NOQA
from django.db.models.fields.proxy import OrderWrt # NOQA
from django.db.models.lookups import Lookup, Transform # NOQA
from django.db.models.manager import Manager # NOQA
from django.db.models.query import Q, Prefetch, QuerySet # NOQA
# Imports that would create circular imports if sorted
from django.db.models.base import Model # NOQA isort:skip
from django.db.models.fields.related import ( # NOQA isort:skip
ForeignKey, ForeignObject, OneToOneField, ManyToManyField,
ManyToOneRel, ManyToManyRel, OneToOneRel,
)
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
|
Comunitea/CMNT_00098_2017_JIM_addons
|
refs/heads/master
|
default_partner_by_type/__manifest__.py
|
1
|
# -*- coding: utf-8 -*-
# © 2016 Comunitea - Kiko Sánchez <kiko@comunitea.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Default partner by type',
'version': '10.0.0.0.0',
'author': 'Comunitea ',
"category": "Custom",
'license': 'AGPL-3',
'depends': [
'base'
],
'contributors': [
"Comunitea ",
"Kiko Sánchez <kiko@comunitea.com>"
],
"data": [
'views/res_partner_view.xml',
],
"installable": True
}
|
robmyers/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/image.py
|
203
|
import os
from .. import constants, logger
from . import base_classes, io, api
class Image(base_classes.BaseNode):
"""Class the wraps an image node. This is the node that
represent that actual file on disk.
"""
def __init__(self, node, parent):
logger.debug("Image().__init__(%s)", node)
base_classes.BaseNode.__init__(self, node, parent, constants.IMAGE)
texture_folder = self.scene.options.get(constants.TEXTURE_FOLDER, "")
self[constants.URL] = os.path.join(texture_folder, api.image.file_name(self.node))
@property
def destination(self):
"""
:return: full destination path (when copied)
"""
dirname = os.path.dirname(self.scene.filepath)
return os.path.join(dirname, self[constants.URL])
@property
def filepath(self):
"""
:return: source file path
"""
return api.image.file_path(self.node)
def copy_texture(self, func=io.copy):
"""Copy the texture.
self.filepath > self.destination
:param func: Optional function override (Default value = io.copy)
arguments are (<source>, <destination>)
:return: path the texture was copied to
"""
logger.debug("Image().copy_texture()")
func(self.filepath, self.destination)
return self.destination
|
miyuchina/mistletoe
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
import mistletoe
setup(name='mistletoe',
version=mistletoe.__version__,
description='A fast, extensible Markdown parser in pure Python.',
url='https://github.com/miyuchina/mistletoe',
author='Mi Yu',
author_email='hello@afteryu.me',
license='MIT',
packages=['mistletoe'],
entry_points={'console_scripts': ['mistletoe = mistletoe.__main__:main']},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup',
],
keywords='markdown lexer parser development',
python_requires='~=3.3',
zip_safe=False)
|
ryandougherty/mwa-capstone
|
refs/heads/heroku
|
MWA_Tools/mwapy/find_external.py
|
2
|
import logging, sys, os, glob, subprocess, string, re, urllib, math, time
# configure the logging
logging.basicConfig(format='# %(levelname)s:%(name)s: %(message)s')
logger=logging.getLogger('find_external')
logger.setLevel(logging.WARNING)
def find_external(external_programs):
# go through and find the external routines in the search path
# or $MWA_PATH
searchpath=os.environ.get('MWA_PATH')
if (searchpath is not None):
searchpath=searchpath.split(':')
else:
searchpath=[]
searchpath.append(os.path.abspath(os.path.dirname(sys.argv[0])))
external_paths={}
for external_program in external_programs.keys():
external_paths[external_program]=None
p=subprocess.Popen('which %s' % external_program, shell=True,stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
(result,result_error)=p.communicate()
if (len(result) > 0):
# it was found
external_paths[external_program]=result.rstrip("\n")
else:
for path in searchpath:
if (os.path.exists(path + '/' + external_program)):
external_paths[external_program]=path + '/' + external_program
if (external_paths[external_program] is None):
# was not found anywhere
logger.warning('Unable to find external program %s; please set your MWA_PATH environment variable accordingly',external_program)
if (external_programs[external_program]):
logger.error('External program %s is required; exiting',external_program)
sys.exit(1)
else:
logger.debug('Found %s=%s',external_program,external_paths[external_program])
return external_paths
|
jcrugzz/lpvisualization
|
refs/heads/master
|
django/views/decorators/vary.py
|
307
|
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
|
andrewleech/SickRage
|
refs/heads/master
|
lib/js2py/translators/translating_nodes.py
|
27
|
from __future__ import unicode_literals
from .pyjsparserdata import *
from .friendly_nodes import *
import random
import six
if six.PY3:
from functools import reduce
xrange = range
unicode = str
# number of characters above which expression will be split to multiple lines in order to avoid python parser stack overflow
# still experimental so I suggest to set it to 400 in order to avoid common errors
# set it to smaller value only if you have problems with parser stack overflow
LINE_LEN_LIMIT = 400 # 200 # or any other value - the larger the smaller probability of errors :)
class ForController:
def __init__(self):
self.inside = [False]
self.update = ''
def enter_for(self, update):
self.inside.append(True)
self.update = update
def leave_for(self):
self.inside.pop()
def enter_other(self):
self.inside.append(False)
def leave_other(self):
self.inside.pop()
def is_inside(self):
return self.inside[-1]
class InlineStack:
NAME = 'PyJs_%s_%d_'
def __init__(self):
self.reps = {}
self.names = []
def inject_inlines(self, source):
for lval in self.names: # first in first out! Its important by the way
source = inject_before_lval(source, lval, self.reps[lval])
return source
def require(self, typ):
name = self.NAME % (typ, len(self.names))
self.names.append(name)
return name
def define(self, name, val):
self.reps[name] = val
def reset(self):
self.rel = {}
self.names = []
class ContextStack:
def __init__(self):
self.to_register = set([])
self.to_define = {}
def reset(self):
self.to_register = set([])
self.to_define = {}
def register(self, var):
self.to_register.add(var)
def define(self, name, code):
self.to_define[name] = code
self.register(name)
def get_code(self):
code = 'var.registers([%s])\n' % ', '.join(repr(e) for e in self.to_register)
for name, func_code in six.iteritems(self.to_define):
code += func_code
return code
def clean_stacks():
global Context, inline_stack
Context = ContextStack()
inline_stack = InlineStack()
def to_key(literal_or_identifier):
''' returns string representation of this object'''
if literal_or_identifier['type']=='Identifier':
return literal_or_identifier['name']
elif literal_or_identifier['type']=='Literal':
k = literal_or_identifier['value']
if isinstance(k, float):
return unicode(float_repr(k))
elif 'regex' in literal_or_identifier:
return compose_regex(k)
elif isinstance(k, bool):
return 'true' if k else 'false'
elif k is None:
return 'null'
else:
return unicode(k)
def trans(ele, standard=False):
"""Translates esprima syntax tree to python by delegating to appropriate translating node"""
try:
node = globals().get(ele['type'])
if not node:
raise NotImplementedError('%s is not supported!' % ele['type'])
if standard:
node = node.__dict__['standard'] if 'standard' in node.__dict__ else node
return node(**ele)
except:
#print ele
raise
def limited(func):
'''Decorator limiting resulting line length in order to avoid python parser stack overflow -
If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line
USE ONLY ON EXPRESSIONS!!! '''
def f(standard=False, **args):
insert_pos = len(inline_stack.names) # in case line is longer than limit we will have to insert the lval at current position
# this is because calling func will change inline_stack.
# we cant use inline_stack.require here because we dont know whether line overflows yet
res = func(**args)
if len(res)>LINE_LEN_LIMIT:
name = inline_stack.require('LONG')
inline_stack.names.pop()
inline_stack.names.insert(insert_pos, name)
res = 'def %s(var=var):\n return %s\n' % (name, res)
inline_stack.define(name, res)
return name+'()'
else:
return res
f.__dict__['standard'] = func
return f
# ==== IDENTIFIERS AND LITERALS =======
inf = float('inf')
def Literal(type, value, raw, regex=None):
if regex: # regex
return 'JsRegExp(%s)' % repr(compose_regex(value))
elif value is None: # null
return 'var.get(u"null")'
# Todo template
# String, Bool, Float
return 'Js(%s)' % repr(value) if value!=inf else 'Js(float("inf"))'
def Identifier(type, name):
return 'var.get(%s)' % repr(name)
@limited
def MemberExpression(type, computed, object, property):
far_left = trans(object)
if computed: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if property['type'] == 'Literal':
prop = repr(to_key(property))
else: # worst case
prop = trans(property)
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(property))
return far_left + '.get(%s)' % prop
def ThisExpression(type):
return 'var.get(u"this")'
@limited
def CallExpression(type, callee, arguments):
arguments = [trans(e) for e in arguments]
if callee['type']=='MemberExpression':
far_left = trans(callee['object'])
if callee['computed']: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if callee['property']['type'] == 'Literal':
prop = repr(to_key(callee['property']))
else: # worst case
prop = trans(callee['property']) # its not a string literal! so no repr
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(callee['property']))
arguments.insert(0, prop)
return far_left + '.callprop(%s)' % ', '.join(arguments)
else: # standard call
return trans(callee) + '(%s)' % ', '.join(arguments)
# ========== ARRAYS ============
def ArrayExpression(type, elements): # todo fix null inside problem
return 'Js([%s])' % ', '.join(trans(e) if e else 'None' for e in elements)
# ========== OBJECTS =============
def ObjectExpression(type, properties):
name = inline_stack.require('Object')
elems = []
after = ''
for p in properties:
if p['kind']=='init':
elems.append('%s:%s' % Property(**p))
elif p['kind']=='set':
k, setter = Property(**p) # setter is just a lval referring to that function, it will be defined in InlineStack automatically
after += '%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n' % (name, k, setter)
elif p['kind']=='get':
k, getter = Property(**p)
after += '%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n' % (name, k, getter)
else:
raise RuntimeError('Unexpected object propery kind')
obj = '%s = Js({%s})\n' % (name, ','.join(elems))
inline_stack.define(name, obj+after)
return name
def Property(type, kind, key, computed, value, method, shorthand):
if shorthand or computed:
raise NotImplementedError('Shorthand and Computed properties not implemented!')
k = to_key(key)
if k is None:
raise SyntaxError('Invalid key in dictionary! Or bug in Js2Py')
v = trans(value)
return repr(k), v
# ========== EXPRESSIONS ============
@limited
def UnaryExpression(type, operator, argument, prefix):
a = trans(argument, standard=True) # unary involve some complex operations so we cant use line shorteners here
if operator=='delete':
if argument['type'] in {'Identifier', 'MemberExpression'}:
# means that operation is valid
return js_delete(a)
return 'PyJsComma(%s, Js(True))' % a # otherwise not valid, just perform expression and return true.
elif operator=='typeof':
return js_typeof(a)
return UNARY[operator](a)
@limited
def BinaryExpression(type, operator, left, right):
a = trans(left)
b = trans(right)
# delegate to our friends
return BINARY[operator](a,b)
@limited
def UpdateExpression(type, operator, argument, prefix):
a = trans(argument, standard=True) # also complex operation involving parsing of the result so no line length reducing here
return js_postfix(a, operator=='++', not prefix)
@limited
def AssignmentExpression(type, operator, left, right):
operator = operator[:-1]
if left['type']=='Identifier':
if operator:
return 'var.put(%s, %s, %s)' % (repr(to_key(left)), trans(right), repr(operator))
else:
return 'var.put(%s, %s)' % (repr(to_key(left)), trans(right))
elif left['type']=='MemberExpression':
far_left = trans(left['object'])
if left['computed']: # obj[prop] type accessor
# may be literal which is the same in every case so we can save some time on conversion
if left['property']['type'] == 'Literal':
prop = repr(to_key(left['property']))
else: # worst case
prop = trans(left['property']) # its not a string literal! so no repr
else: # always the same since not computed (obj.prop accessor)
prop = repr(to_key(left['property']))
if operator:
return far_left + '.put(%s, %s, %s)' % (prop, trans(right), repr(operator))
else:
return far_left + '.put(%s, %s)' % (prop, trans(right))
else:
raise SyntaxError('Invalid left hand side in assignment!')
six
@limited
def SequenceExpression(type, expressions):
return reduce(js_comma, (trans(e) for e in expressions))
@limited
def NewExpression(type, callee, arguments):
return trans(callee) + '.create(%s)' % ', '.join(trans(e) for e in arguments)
@limited
def ConditionalExpression(type, test, consequent, alternate): # caused plenty of problems in my home-made translator :)
return '(%s if %s else %s)' % (trans(consequent), trans(test), trans(alternate))
# =========== STATEMENTS =============
def BlockStatement(type, body):
return StatementList(body) # never returns empty string! In the worst case returns pass\n
def ExpressionStatement(type, expression):
return trans(expression) + '\n' # end expression space with new line
def BreakStatement(type, label):
if label:
return 'raise %s("Breaked")\n' % (get_break_label(label['name']))
else:
return 'break\n'
def ContinueStatement(type, label):
if label:
return 'raise %s("Continued")\n' % (get_continue_label(label['name']))
else:
return 'continue\n'
def ReturnStatement(type, argument):
return 'return %s\n' % (trans(argument) if argument else "var.get('undefined')")
def EmptyStatement(type):
return 'pass\n'
def DebuggerStatement(type):
return 'pass\n'
def DoWhileStatement(type, body, test):
inside = trans(body) + 'if not %s:\n' % trans(test) + indent('break\n')
result = 'while 1:\n' + indent(inside)
return result
def ForStatement(type, init, test, update, body):
update = indent(trans(update)) if update else ''
init = trans(init) if init else ''
if not init.endswith('\n'):
init += '\n'
test = trans(test) if test else '1'
if not update:
result = '#for JS loop\n%swhile %s:\n%s%s\n' % (init, test, indent(trans(body)), update)
else:
result = '#for JS loop\n%swhile %s:\n' % (init, test)
body = 'try:\n%sfinally:\n %s\n' % (indent(trans(body)), update)
result += indent(body)
return result
def ForInStatement(type, left, right, body, each):
res = 'for PyJsTemp in %s:\n' % trans(right)
if left['type']=="VariableDeclaration":
addon = trans(left) # make sure variable is registered
if addon != 'pass\n':
res = addon + res # we have to execute this expression :(
# now extract the name
try:
name = left['declarations'][0]['id']['name']
except:
raise RuntimeError('Unusual ForIn loop')
elif left['type']=='Identifier':
name = left['name']
else:
raise RuntimeError('Unusual ForIn loop')
res += indent('var.put(%s, PyJsTemp)\n' % repr(name) + trans(body))
return res
def IfStatement(type, test, consequent, alternate):
# NOTE we cannot do elif because function definition inside elif statement would not be possible!
IF = 'if %s:\n' % trans(test)
IF += indent(trans(consequent))
if not alternate:
return IF
ELSE = 'else:\n' + indent(trans(alternate))
return IF + ELSE
def LabeledStatement(type, label, body):
# todo consider using smarter approach!
inside = trans(body)
defs = ''
if inside.startswith('while ') or inside.startswith('for ') or inside.startswith('#for'):
# we have to add contine label as well...
# 3 or 1 since #for loop type has more lines before real for.
sep = 1 if not inside.startswith('#for') else 3
cont_label = get_continue_label(label['name'])
temp = inside.split('\n')
injected = 'try:\n'+'\n'.join(temp[sep:])
injected += 'except %s:\n pass\n'%cont_label
inside = '\n'.join(temp[:sep])+'\n'+indent(injected)
defs += 'class %s(Exception): pass\n'%cont_label
break_label = get_break_label(label['name'])
inside = 'try:\n%sexcept %s:\n pass\n'% (indent(inside), break_label)
defs += 'class %s(Exception): pass\n'%break_label
return defs + inside
def StatementList(lis):
if lis: # ensure we don't return empty string because it may ruin indentation!
code = ''.join(trans(e) for e in lis)
return code if code else 'pass\n'
else:
return 'pass\n'
def PyimportStatement(type, imp):
lib = imp['name']
jlib = 'PyImport_%s' % lib
code = 'import %s as %s\n' % (lib, jlib)
#check whether valid lib name...
try:
compile(code, '', 'exec')
except:
raise SyntaxError('Invalid Python module name (%s) in pyimport statement'%lib)
# var.pyimport will handle module conversion to PyJs object
code += 'var.pyimport(%s, %s)\n' % (repr(lib), jlib)
return code
def SwitchStatement(type, discriminant, cases):
#TODO there will be a problem with continue in a switch statement.... FIX IT
code = 'while 1:\n' + indent('SWITCHED = False\nCONDITION = (%s)\n')
code = code % trans(discriminant)
for case in cases:
case_code = None
if case['test']: # case (x):
case_code = 'if SWITCHED or PyJsStrictEq(CONDITION, %s):\n' % (trans(case['test']))
else: # default:
case_code = 'if True:\n'
case_code += indent('SWITCHED = True\n')
case_code += indent(StatementList(case['consequent']))
# one more indent for whole
code += indent(case_code)
# prevent infinite loop and sort out nested switch...
code += indent('SWITCHED = True\nbreak\n')
return code
def ThrowStatement(type, argument):
return 'PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n' % trans(argument)
def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer):
result = 'try:\n%s' % indent(trans(block))
# complicated catch statement...
if handler:
identifier = handler['param']['name']
holder = 'PyJsHolder_%s_%d'%(to_hex(identifier), random.randrange(1e8))
identifier = repr(identifier)
result += 'except PyJsException as PyJsTempException:\n'
# fill in except ( catch ) block and remember to recover holder variable to its previous state
result += indent(TRY_CATCH.replace('HOLDER', holder).replace('NAME', identifier).replace('BLOCK', indent(trans(handler['body']))))
# translate finally statement if present
if finalizer:
result += 'finally:\n%s' % indent(trans(finalizer))
return result
def LexicalDeclaration(type, declarations, kind):
raise NotImplementedError('let and const not implemented yet but they will be soon! Check github for updates.')
def VariableDeclarator(type, id, init):
name = id['name']
# register the name if not already registered
Context.register(name)
if init:
return 'var.put(%s, %s)\n' % (repr(name), trans(init))
return ''
def VariableDeclaration(type, declarations, kind):
code = ''.join(trans(d) for d in declarations)
return code if code else 'pass\n'
def WhileStatement(type, test, body):
result = 'while %s:\n'%trans(test) + indent(trans(body))
return result
def WithStatement(type, object, body):
raise NotImplementedError('With statement not implemented!')
def Program(type, body):
inline_stack.reset()
code = ''.join(trans(e) for e in body)
# here add hoisted elements (register variables and define functions)
code = Context.get_code() + code
# replace all inline variables
code = inline_stack.inject_inlines(code)
return code
# ======== FUNCTIONS ============
def FunctionDeclaration(type, id, params, defaults, body, generator, expression):
if generator:
raise NotImplementedError('Generators not supported')
if defaults:
raise NotImplementedError('Defaults not supported')
if not id:
return FunctionExpression(type, id, params, defaults, body, generator, expression)
JsName = id['name']
PyName = 'PyJsHoisted_%s_' % JsName
PyName = PyName if is_valid_py_name(PyName) else 'PyJsHoistedNonPyName'
# this is quite complicated
global Context
previous_context = Context
# change context to the context of this function
Context = ContextStack()
# translate body within current context
code = trans(body)
# get arg names
vars = [v['name'] for v in params]
# args are automaticaly registered variables
Context.to_register.update(vars)
# add all hoisted elements inside function
code = Context.get_code() + code
# check whether args are valid python names:
used_vars = []
for v in vars:
if is_valid_py_name(v):
used_vars.append(v)
else: # invalid arg in python, for example $, replace with alternatice arg
used_vars.append('PyJsArg_%s_' % to_hex(v))
header = '@Js\n'
header+= 'def %s(%sthis, arguments, var=var):\n' % (PyName, ', '.join(used_vars) +(', ' if vars else ''))
# transfer names from Py scope to Js scope
arg_map = dict(zip(vars, used_vars))
arg_map.update({'this':'this', 'arguments':'arguments'})
arg_conv = 'var = Scope({%s}, var)\n' % ', '.join(repr(k)+':'+v for k,v in six.iteritems(arg_map))
# and finally set the name of the function to its real name:
footer = '%s.func_name = %s\n' % (PyName, repr(JsName))
footer+= 'var.put(%s, %s)\n' % (repr(JsName), PyName)
whole_code = header + indent(arg_conv+code) + footer
# restore context
Context = previous_context
# define in upper context
Context.define(JsName, whole_code)
return 'pass\n'
def FunctionExpression(type, id, params, defaults, body, generator, expression):
if generator:
raise NotImplementedError('Generators not supported')
if defaults:
raise NotImplementedError('Defaults not supported')
JsName = id['name'] if id else 'anonymous'
if not is_valid_py_name(JsName):
ScriptName = 'InlineNonPyName'
else:
ScriptName = JsName
PyName = inline_stack.require(ScriptName) # this is unique
# again quite complicated
global Context
previous_context = Context
# change context to the context of this function
Context = ContextStack()
# translate body within current context
code = trans(body)
# get arg names
vars = [v['name'] for v in params]
# args are automaticaly registered variables
Context.to_register.update(vars)
# add all hoisted elements inside function
code = Context.get_code() + code
# check whether args are valid python names:
used_vars = []
for v in vars:
if is_valid_py_name(v):
used_vars.append(v)
else: # invalid arg in python, for example $, replace with alternatice arg
used_vars.append('PyJsArg_%s_' % to_hex(v))
header = '@Js\n'
header+= 'def %s(%sthis, arguments, var=var):\n' % (PyName, ', '.join(used_vars) +(', ' if vars else ''))
# transfer names from Py scope to Js scope
arg_map = dict(zip(vars, used_vars))
arg_map.update({'this':'this', 'arguments':'arguments'})
if id: # make self available from inside...
if id['name'] not in arg_map:
arg_map[id['name']] = PyName
arg_conv = 'var = Scope({%s}, var)\n' % ', '.join(repr(k)+':'+v for k,v in six.iteritems(arg_map))
# and finally set the name of the function to its real name:
footer = '%s._set_name(%s)\n' % (PyName, repr(JsName))
whole_code = header + indent(arg_conv+code) + footer
# restore context
Context = previous_context
# define in upper context
inline_stack.define(PyName, whole_code)
return PyName
LogicalExpression = BinaryExpression
PostfixExpression = UpdateExpression
clean_stacks()
if __name__=='__main__':
import codecs
import time
import pyjsparser
c = None#'''`ijfdij`'''
if not c:
with codecs.open("esp.js", "r", "utf-8") as f:
c = f.read()
print('Started')
t = time.time()
res = trans(pyjsparser.PyJsParser().parse(c))
dt = time.time() - t+ 0.000000001
print('Translated everyting in', round(dt,5), 'seconds.')
print('Thats %d characters per second' % int(len(c)/dt))
with open('res.py', 'w') as f:
f.write(res)
|
alkyl1978/gnuradio
|
refs/heads/master
|
gr-filter/examples/synth_filter.py
|
58
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print "Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = scipy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
pylab.show()
if __name__ == "__main__":
main()
|
Technocaveman/There-is-no-Third-Step
|
refs/heads/master
|
node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/emacs.py
|
364
|
# -*- coding: utf-8 -*-
"""
pygments.styles.emacs
~~~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by Emacs.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class EmacsStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #008800",
Comment.Preproc: "noitalic",
Comment.Special: "noitalic bold",
Keyword: "bold #AA22FF",
Keyword.Pseudo: "nobold",
Keyword.Type: "bold #00BB00",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#AA22FF",
Name.Function: "#00A000",
Name.Class: "#0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#B8860B",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#BB4444",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BB4444",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
String.Symbol: "#B8860B",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
denovator/myfriki
|
refs/heads/master
|
lib/flask/flask/testsuite/test_apps/blueprintapp/apps/frontend/__init__.py
|
629
|
from flask import Blueprint, render_template
frontend = Blueprint('frontend', __name__, template_folder='templates')
@frontend.route('/')
def index():
return render_template('frontend/index.html')
|
dongritengfei/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Source/ThirdParty/gtest/test/run_tests_util_test.py
|
233
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for run_tests_util.py test runner script."""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import re
import sets
import unittest
import run_tests_util
GTEST_DBG_DIR = 'scons/build/dbg/gtest/scons'
GTEST_OPT_DIR = 'scons/build/opt/gtest/scons'
GTEST_OTHER_DIR = 'scons/build/other/gtest/scons'
def AddExeExtension(path):
"""Appends .exe to the path on Windows or Cygwin."""
if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
return path + '.exe'
else:
return path
class FakePath(object):
"""A fake os.path module for testing."""
def __init__(self, current_dir=os.getcwd(), known_paths=None):
self.current_dir = current_dir
self.tree = {}
self.path_separator = os.sep
# known_paths contains either absolute or relative paths. Relative paths
# are absolutized with self.current_dir.
if known_paths:
self._AddPaths(known_paths)
def _AddPath(self, path):
ends_with_slash = path.endswith('/')
path = self.abspath(path)
if ends_with_slash:
path += self.path_separator
name_list = path.split(self.path_separator)
tree = self.tree
for name in name_list[:-1]:
if not name:
continue
if name in tree:
tree = tree[name]
else:
tree[name] = {}
tree = tree[name]
name = name_list[-1]
if name:
if name in tree:
assert tree[name] == 1
else:
tree[name] = 1
def _AddPaths(self, paths):
for path in paths:
self._AddPath(path)
def PathElement(self, path):
"""Returns an internal representation of directory tree entry for path."""
tree = self.tree
name_list = self.abspath(path).split(self.path_separator)
for name in name_list:
if not name:
continue
tree = tree.get(name, None)
if tree is None:
break
return tree
# Silences pylint warning about using standard names.
# pylint: disable-msg=C6409
def normpath(self, path):
return os.path.normpath(path)
def abspath(self, path):
return self.normpath(os.path.join(self.current_dir, path))
def isfile(self, path):
return self.PathElement(self.abspath(path)) == 1
def isdir(self, path):
return type(self.PathElement(self.abspath(path))) == type(dict())
def basename(self, path):
return os.path.basename(path)
def dirname(self, path):
return os.path.dirname(path)
def join(self, *kargs):
return os.path.join(*kargs)
class FakeOs(object):
"""A fake os module for testing."""
P_WAIT = os.P_WAIT
def __init__(self, fake_path_module):
self.path = fake_path_module
# Some methods/attributes are delegated to the real os module.
self.environ = os.environ
# pylint: disable-msg=C6409
def listdir(self, path):
assert self.path.isdir(path)
return self.path.PathElement(path).iterkeys()
def spawnv(self, wait, executable, *kargs):
assert wait == FakeOs.P_WAIT
return self.spawn_impl(executable, kargs)
class GetTestsToRunTest(unittest.TestCase):
"""Exercises TestRunner.GetTestsToRun."""
def NormalizeGetTestsToRunResults(self, results):
"""Normalizes path data returned from GetTestsToRun for comparison."""
def NormalizePythonTestPair(pair):
"""Normalizes path data in the (directory, python_script) pair."""
return (os.path.normpath(pair[0]), os.path.normpath(pair[1]))
def NormalizeBinaryTestPair(pair):
"""Normalizes path data in the (directory, binary_executable) pair."""
directory, executable = map(os.path.normpath, pair)
# On Windows and Cygwin, the test file names have the .exe extension, but
# they can be invoked either by name or by name+extension. Our test must
# accommodate both situations.
if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
executable = re.sub(r'\.exe$', '', executable)
return (directory, executable)
python_tests = sets.Set(map(NormalizePythonTestPair, results[0]))
binary_tests = sets.Set(map(NormalizeBinaryTestPair, results[1]))
return (python_tests, binary_tests)
def AssertResultsEqual(self, results, expected):
"""Asserts results returned by GetTestsToRun equal to expected results."""
self.assertEqual(self.NormalizeGetTestsToRunResults(results),
self.NormalizeGetTestsToRunResults(expected),
'Incorrect set of tests returned:\n%s\nexpected:\n%s' %
(results, expected))
def setUp(self):
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
'test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests_util.TestRunner(script_dir='.',
injected_os=self.fake_os,
injected_subprocess=None)
def testBinaryTestsOnly(self):
"""Exercises GetTestsToRun with parameters designating binary tests only."""
# A default build.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# An explicitly specified directory.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# A particular configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'other',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_OTHER_DIR, GTEST_OTHER_DIR + '/gtest_unittest')]))
# All available configurations
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'all',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# All built configurations (unbuilt don't cause failure).
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
True,
available_configurations=self.fake_configurations + ['unbuilt']),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# A combination of an explicit directory and a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'opt',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# Same test specified in an explicit directory and via a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'dbg',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# All built configurations + explicit directory + explicit configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'opt',
True,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
def testPythonTestsOnly(self):
"""Exercises GetTestsToRun with parameters designating Python tests only."""
# A default build.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# An explicitly specified directory.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'test/gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# A particular configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'other',
False,
available_configurations=self.fake_configurations),
([(GTEST_OTHER_DIR, 'test/gtest_color_test.py')],
[]))
# All available configurations
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['test/gtest_color_test.py'],
'all',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# All built configurations (unbuilt don't cause failure).
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
True,
available_configurations=self.fake_configurations + ['unbuilt']),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# A combination of an explicit directory and a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'opt',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# Same test specified in an explicit directory and via a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'dbg',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# All built configurations + explicit directory + explicit configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'opt',
True,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
def testCombinationOfBinaryAndPythonTests(self):
"""Exercises GetTestsToRun with mixed binary/Python tests."""
# Use only default configuration for this test.
# Neither binary nor Python tests are specified so find all.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying both binary and Python tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest', 'gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying binary tests suppresses Python tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying Python tests suppresses binary tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
def testIgnoresNonTestFiles(self):
"""Verifies that GetTestsToRun ignores non-test files in the filesystem."""
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_nontest'),
'test/']))
self.test_runner = run_tests_util.TestRunner(script_dir='.',
injected_os=self.fake_os,
injected_subprocess=None)
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
True,
available_configurations=self.fake_configurations),
([], []))
def testWorksFromDifferentDir(self):
"""Exercises GetTestsToRun from a directory different from run_test.py's."""
# Here we simulate an test script in directory /d/ called from the
# directory /a/b/c/.
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath('/a/b/c'),
known_paths=[
'/a/b/c/',
AddExeExtension('/d/' + GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension('/d/' + GTEST_OPT_DIR + '/gtest_unittest'),
'/d/test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests_util.TestRunner(script_dir='/d/',
injected_os=self.fake_os,
injected_subprocess=None)
# A binary test.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[('/d/' + GTEST_DBG_DIR, '/d/' + GTEST_DBG_DIR + '/gtest_unittest')]))
# A Python test.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([('/d/' + GTEST_DBG_DIR, '/d/test/gtest_color_test.py')], []))
def testNonTestBinary(self):
"""Exercises GetTestsToRun with a non-test parameter."""
self.assert_(
not self.test_runner.GetTestsToRun(
['gtest_unittest_not_really'],
'',
False,
available_configurations=self.fake_configurations))
def testNonExistingPythonTest(self):
"""Exercises GetTestsToRun with a non-existent Python test parameter."""
self.assert_(
not self.test_runner.GetTestsToRun(
['nonexistent_test.py'],
'',
False,
available_configurations=self.fake_configurations))
if run_tests_util.IS_WINDOWS or run_tests_util.IS_CYGWIN:
def testDoesNotPickNonExeFilesOnWindows(self):
"""Verifies that GetTestsToRun does not find _test files on Windows."""
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=['/d/' + GTEST_DBG_DIR + '/gtest_test', 'test/']))
self.test_runner = run_tests_util.TestRunner(script_dir='.',
injected_os=self.fake_os,
injected_subprocess=None)
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
True,
available_configurations=self.fake_configurations),
([], []))
class RunTestsTest(unittest.TestCase):
"""Exercises TestRunner.RunTests."""
def SpawnSuccess(self, unused_executable, unused_argv):
"""Fakes test success by returning 0 as an exit code."""
self.num_spawn_calls += 1
return 0
def SpawnFailure(self, unused_executable, unused_argv):
"""Fakes test success by returning 1 as an exit code."""
self.num_spawn_calls += 1
return 1
def setUp(self):
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests_util.__file__)),
known_paths=[
AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
'test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests_util.TestRunner(
script_dir=os.path.dirname(__file__) or '.',
injected_os=self.fake_os,
injected_subprocess=None)
self.num_spawn_calls = 0 # A number of calls to spawn.
def testRunPythonTestSuccess(self):
"""Exercises RunTests to handle a Python test success."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]),
0)
self.assertEqual(self.num_spawn_calls, 1)
def testRunBinaryTestSuccess(self):
"""Exercises RunTests to handle a binary test success."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 1)
def testRunPythonTestFauilure(self):
"""Exercises RunTests to handle a Python test failure."""
self.fake_os.spawn_impl = self.SpawnFailure
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]),
1)
self.assertEqual(self.num_spawn_calls, 1)
def testRunBinaryTestFailure(self):
"""Exercises RunTests to handle a binary test failure."""
self.fake_os.spawn_impl = self.SpawnFailure
self.assertEqual(
self.test_runner.RunTests(
[],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
1)
self.assertEqual(self.num_spawn_calls, 1)
def testCombinedTestSuccess(self):
"""Exercises RunTests to handle a success of both Python and binary test."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 2)
def testCombinedTestSuccessAndFailure(self):
"""Exercises RunTests to handle a success of both Python and binary test."""
def SpawnImpl(executable, argv):
self.num_spawn_calls += 1
# Simulates failure of a Python test and success of a binary test.
if '.py' in executable or '.py' in argv[0]:
return 1
else:
return 0
self.fake_os.spawn_impl = SpawnImpl
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 2)
class ParseArgsTest(unittest.TestCase):
"""Exercises ParseArgs."""
def testNoOptions(self):
options, args = run_tests_util.ParseArgs('gtest', argv=['script.py'])
self.assertEqual(args, ['script.py'])
self.assert_(options.configurations is None)
self.assertFalse(options.built_configurations)
def testOptionC(self):
options, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '-c', 'dbg'])
self.assertEqual(args, ['script.py'])
self.assertEqual(options.configurations, 'dbg')
self.assertFalse(options.built_configurations)
def testOptionA(self):
options, args = run_tests_util.ParseArgs('gtest', argv=['script.py', '-a'])
self.assertEqual(args, ['script.py'])
self.assertEqual(options.configurations, 'all')
self.assertFalse(options.built_configurations)
def testOptionB(self):
options, args = run_tests_util.ParseArgs('gtest', argv=['script.py', '-b'])
self.assertEqual(args, ['script.py'])
self.assert_(options.configurations is None)
self.assertTrue(options.built_configurations)
def testOptionCAndOptionB(self):
options, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '-c', 'dbg', '-b'])
self.assertEqual(args, ['script.py'])
self.assertEqual(options.configurations, 'dbg')
self.assertTrue(options.built_configurations)
def testOptionH(self):
help_called = [False]
# Suppresses lint warning on unused arguments. These arguments are
# required by optparse, even though they are unused.
# pylint: disable-msg=W0613
def VerifyHelp(option, opt, value, parser):
help_called[0] = True
# Verifies that -h causes the help callback to be called.
help_called[0] = False
_, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '-h'], help_callback=VerifyHelp)
self.assertEqual(args, ['script.py'])
self.assertTrue(help_called[0])
# Verifies that --help causes the help callback to be called.
help_called[0] = False
_, args = run_tests_util.ParseArgs(
'gtest', argv=['script.py', '--help'], help_callback=VerifyHelp)
self.assertEqual(args, ['script.py'])
self.assertTrue(help_called[0])
if __name__ == '__main__':
unittest.main()
|
ondra-novak/chromium.src
|
refs/heads/nw
|
build/android/pylib/utils/time_profile.py
|
166
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
class TimeProfile(object):
"""Class for simple profiling of action, with logging of cost."""
def __init__(self, description):
self._starttime = None
self._description = description
self.Start()
def Start(self):
self._starttime = time.time()
def Stop(self):
"""Stop profiling and dump a log."""
if self._starttime:
stoptime = time.time()
logging.info('%fsec to perform %s',
stoptime - self._starttime, self._description)
self._starttime = None
|
hanfang/scikit-ribo
|
refs/heads/master
|
scripts/NProt2013/Enrichment_efficiency.py
|
1
|
"""
Supplementary Note 14: Enrichment efficiency
Author: Annemarie Becker
inputFile1:
RPM-normalized read densities along the whole genome or in protein coding regions on plus or minus strand from sample 1 (Supplementary Note 7 or 8)
col0: position along genome
col1: RPM-normalized read density at that position
inputFile2:
RPM-normalized read densities along the whole genome or in protein coding regions on plus or minus strand from sample 2 (Supplementary Note 7 or 8)
col0: position along genome
col1: RPM-normalized read density at that position
outputFile:
ratio of RPM-normalized read densities for protein coding regions on plus or minus strand from samples 1 and 2
col0: position along genome
col1: ratio of RPM-normalized read densities from samples 1 and 2
"""
def ratio(inputFile1, inputFile2, outputFile):
# Upload input1
Dict1 = {}
inFile = open(inputFile1, 'r')
line = inFile.readline()
while line != '':
fields = line.split()
col0 = int(fields[0])
col1 = float(fields[1])
Dict1[col0] = col1
line = inFile.readline()
# Upload input2
Dict2 = {}
inFile = open(inputFile2, 'r')
line = inFile.readline()
while line != '':
fields = line.split()
col0 = int(fields[0])
col1 = float(fields[1])
Dict2[col0] = col1
line = inFile.readline()
# calculate window +-20
outFile = open(outputFile, 'w')
sum1 = 0
sum2 = 0
start = 1
end = 4578159 #change this value dependent on the genome
start_sum = start + 20 #start_sum = 21
end_sum = end - 20 #end_sum = 4578139
for J in range(start, start_sum + 1): #lines 1-21
for X in range(start, J+20+1): #sum 1-(J+20)
sum1 += float(Dict1[X])
sum2 += float(Dict2[X])
if sum2 != 0:
ratio = sum1 / sum2
else:
ratio = 0.0
outFile.write(str(J) + '\t' + str(ratio) + '\n')
sum1 = 0
sum2 = 0
for K in range(start_sum + 1, end_sum + 1): #lines 22-4578139
for Y in range(K-20, K+20+1): #sum (K-20)-(K+20)
sum1 += float(Dict1[Y])
sum2 += float(Dict2[Y])
if sum2 != 0:
ratio = sum1 / sum2
else:
ratio = 0.0
outFile.write(str(K) + '\t' + str(ratio) + '\n')
sum1 = 0
sum2 = 0
for L in range(end_sum + 1, end + 1): #lines 4578140-4578159
for Z in range(L-20, end + 1):
sum1 += float(Dict1[Z])
sum2 += float(Dict2[Z])
if sum2 != 0:
ratio = sum1 / sum2
else:
ratio = 0.0
outFile.write(str(L) + '\t' + str(ratio) + '\n')
sum1 = 0
sum2 = 0
if __name__ == '__main__':
inputFile1 = ''
inputFile2 = ''
outputFile = ''
ratio(inputFile1, inputFile2, outputFile)
|
google-code-export/los-cocos
|
refs/heads/master
|
samples/jumping_lens.py
|
3
|
#
# cocos2d
# http://cocos2d.org
#
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pyglet
from cocos.director import director
from cocos.scene import Scene
from cocos.layer import Layer
from cocos.actions import JumpBy, Lens3D, Reverse
# create a layer with an image
class BackgroundLayer( Layer ):
def __init__( self ):
# always call super()
super( BackgroundLayer, self).__init__()
# load the image form file
self.image = pyglet.resource.image('flag.png')
def draw( self ):
# blit the image on every frame
self.image.blit(0,0)
if __name__ == "__main__":
# initialize the director,
# enabling to resize the main window
director.init( resizable=True)
# enable opengl depth test
# since we are using z-values
director.set_depth_test()
# create an Scene with 1 layer: BackgroundLayer
scene = Scene( BackgroundLayer() )
# create a Lens effect action
# radius: 150 pixels
# lens_effect: 0.7, a strong "lens". 0 means no effect at all. 1 means very strong
# center: center of the lens
# grid=(20,16): create a grid of 20 tiles x 16 tiles. More tiles will
# look better but the performance will decraese
# duration=10: 10 seconds
lens = Lens3D( radius=150, lens_effect=0.7, center=(150,150), grid=(20,16), duration=50)
# create a Jump action
# Jump to the right 360 pixels doing:
# 3 jumps
# of height 170 pixels
# in 4 seconds
jump = JumpBy((360,0),170,3,4 )
# do and get the cloned action of lens'
action = scene.do( lens )
# perform the Jump action using as target the lens effect.
# the Jump action will modify the 'position' attribute, and
# the lens action uses the 'position' attribute as the center of the lens
#
# The action Jump + Reverse(Jump) will be repeated 5 times
scene.do( (jump + Reverse(jump)) * 5, target=action)
# Run!
director.run( scene )
|
rajanandakumar/DIRAC
|
refs/heads/integration
|
FrameworkSystem/private/logging/backends/PrintBackend.py
|
7
|
# $HeadURL$
__RCSID__ = "$Id$"
""" This backend just print the log messages through the standar output
"""
from DIRAC.FrameworkSystem.private.logging.backends.BaseBackend import BaseBackend
from DIRAC.Core.Utilities import LogColoring
class PrintBackend( BaseBackend ):
def doMessage( self, messageObject ):
msg = self.composeString( messageObject )
if not self._optionsDictionary[ 'Color' ]:
print( msg )
else:
print LogColoring.colorMessage( messageObject.getLevel(), msg )
|
lazaronixon/enigma2
|
refs/heads/master
|
lib/python/Plugins/Extensions/TuxboxPlugins/pluginrunner.py
|
73
|
from enigma import eDBoxLCD, eRCInput, fbClass, eConsoleAppContainer
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
class PluginRunner(Screen):
skin = """
<screen position="1,1" size="1,1" title="Plugin" >
</screen>"""
def __init__(self, session, pluginname, args = None):
self.skin = PluginRunner.skin
Screen.__init__(self, session)
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.finishedExecution)
self.runPlugin(pluginname)
def runPlugin(self, pluginname):
eDBoxLCD.getInstance().lock()
eRCInput.getInstance().lock()
fbClass.getInstance().lock()
print "executing:", ("pluginlauncher -x %s" % pluginname)
if self.container.execute("pluginlauncher -x %s" % pluginname):
self.finishedExecution(None)
def finishedExecution(self, retval = 1):
print "PluginRunner retval:", retval
fbClass.getInstance().unlock()
eRCInput.getInstance().unlock()
eDBoxLCD.getInstance().unlock()
if retval is None or retval != 1:
self.session.openWithCallback(
self.finishedExecution,
MessageBox,
_("Error executing plugin") % (param)
)
else:
self.close()
|
cybojenix/SlimBot
|
refs/heads/slimbot
|
disabled_stuff/steam.py
|
6
|
import re
from bs4 import BeautifulSoup, NavigableString, Tag
from util import hook, http, web
from util.text import truncate_str
steam_re = (r'(.*:)//(store.steampowered.com)(:[0-9]+)?(.*)', re.I)
def get_steam_info(url):
page = http.get(url)
soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")
data = {}
data["name"] = soup.find('div', {'class': 'apphub_AppName'}).text
data["desc"] = truncate_str(soup.find('meta', {'name': 'description'})['content'].strip(), 80)
# get the element details_block
details = soup.find('div', {'class': 'details_block'})
# loop over every <b></b> tag in details_block
for b in details.findAll('b'):
# get the contents of the <b></b> tag, which is our title
title = b.text.lower().replace(":", "")
if title == "languages":
# we have all we need!
break
# find the next element directly after the <b></b> tag
next_element = b.nextSibling
if next_element:
# if the element is some text
if isinstance(next_element, NavigableString):
text = next_element.string.strip()
if text:
# we found valid text, save it and continue the loop
data[title] = text
continue
else:
# the text is blank - sometimes this means there are
# useless spaces or tabs between the <b> and <a> tags.
# so we find the next <a> tag and carry on to the next
# bit of code below
next_element = next_element.find_next('a', href=True)
# if the element is an <a></a> tag
if isinstance(next_element, Tag) and next_element.name == 'a':
text = next_element.string.strip()
if text:
# we found valid text (in the <a></a> tag),
# save it and continue the loop
data[title] = text
continue
data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip()
return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
u" \x02Price\x02: {price}".format(**data)
@hook.regex(*steam_re)
def steam_url(match):
return get_steam_info("http://store.steampowered.com" + match.group(4))
@hook.command
def steam(inp):
"""steam [search] - Search for specified game/trailer/DLC"""
page = http.get("http://store.steampowered.com/search/?term=" + inp)
soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")
result = soup.find('a', {'class': 'search_result_row'})
return get_steam_info(result['href']) + " - " + web.isgd(result['href'])
|
ebu/radiodns-plugit
|
refs/heads/develop
|
RadioDns-PlugIt/alembic/versions/33bc6fe131c7_added_shortdescripti.py
|
1
|
"""Added shortDescription
Revision ID: 33bc6fe131c7
Revises: 314a9041017e
Create Date: 2014-06-03 17:25:44.348465
"""
# revision identifiers, used by Alembic.
revision = '33bc6fe131c7'
down_revision = '314a9041017e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('station', sa.Column('short_description', sa.String(length=180), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('station', 'short_description')
### end Alembic commands ###
|
sivel/ansible
|
refs/heads/devel
|
test/units/module_utils/common/validation/test_check_mutually_exclusive.py
|
44
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils._text import to_native
from ansible.module_utils.common.validation import check_mutually_exclusive
@pytest.fixture
def mutually_exclusive_terms():
return [
('string1', 'string2',),
('box', 'fox', 'socks'),
]
def test_check_mutually_exclusive(mutually_exclusive_terms):
params = {
'string1': 'cat',
'fox': 'hat',
}
assert check_mutually_exclusive(mutually_exclusive_terms, params) == []
def test_check_mutually_exclusive_found(mutually_exclusive_terms):
params = {
'string1': 'cat',
'string2': 'hat',
'fox': 'red',
'socks': 'blue',
}
expected = "parameters are mutually exclusive: string1|string2, box|fox|socks"
with pytest.raises(TypeError) as e:
check_mutually_exclusive(mutually_exclusive_terms, params)
assert to_native(e.value) == expected
def test_check_mutually_exclusive_none():
terms = None
params = {
'string1': 'cat',
'fox': 'hat',
}
assert check_mutually_exclusive(terms, params) == []
def test_check_mutually_exclusive_no_params(mutually_exclusive_terms):
with pytest.raises(TypeError) as te:
check_mutually_exclusive(mutually_exclusive_terms, None)
assert "'NoneType' object is not iterable" in to_native(te.value)
|
wweiradio/django-guardian
|
refs/heads/master
|
example_project/core/models.py
|
42
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
birth_date = models.DateField(null=True, blank=True)
|
drmateo/ecto
|
refs/heads/master
|
test/scripts/throw_in_interpreter_thread.py
|
4
|
#!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto.ecto_test as ecto_test
try:
ecto_test.should_throw_in_interpreter_thread()
except ecto.EctoException, e:
print "OK:\n", e
except RuntimeError as e:
print e
print "Not so good -1"
try:
ecto_test.should_rethrow_in_interpreter_thread()
print "spinlock..."
while True:
pass
print "FAIL that should have thrown"
except ecto.EctoException, e:
print "OK:\n", e
except RuntimeError as e:
print e
print "Not so good appears in lucid boost 1.40 FIXME"
try:
ecto_test.should_rethrow_stdexcept_in_interpreter_thread()
print "spinlock..."
while True:
pass
print "FAIL that should have thrown"
except RuntimeError, e:
print "OK:\n", e
print "yay"
|
Anccerson/PyAlgorithm
|
refs/heads/master
|
ugly_number.py
|
1
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import deque
def ugly(n):
min_q = 1
queue_2 = deque([2])
queue_3 = deque([3])
queue_5 = deque([5])
for _ in range(1, n):
min_q = min(queue_2[-1], queue_3[-1], queue_5[-1])
if min_q == queue_2[-1]:
queue_2.pop()
queue_2.appendleft(2 * min_q)
queue_3.appendleft(3 * min_q)
queue_5.appendleft(5 * min_q)
elif min_q == queue_3[-1]:
queue_3.pop()
queue_3.appendleft(3 * min_q)
queue_5.appendleft(5 * min_q)
else:
queue_5.pop()
queue_5.appendleft(5 * min_q)
return min_q
if __name__ == "__main__":
print(ugly(1500) == 859963392)
|
b-me/cmsplugin-contact
|
refs/heads/master
|
cmsplugin_contact/migrations/0005_auto__del_field_contact_content_label__del_field_contact_subject_label.py
|
9
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Contact.content_label'
db.delete_column(u'cmsplugin_contact', 'content_label')
# Deleting field 'Contact.subject_label'
db.delete_column(u'cmsplugin_contact', 'subject_label')
# Deleting field 'Contact.email_label'
db.delete_column(u'cmsplugin_contact', 'email_label')
def backwards(self, orm):
# Adding field 'Contact.content_label'
db.add_column(u'cmsplugin_contact', 'content_label',
self.gf('django.db.models.fields.CharField')(default=u'Message', max_length=100),
keep_default=False)
# Adding field 'Contact.subject_label'
db.add_column(u'cmsplugin_contact', 'subject_label',
self.gf('django.db.models.fields.CharField')(default=u'Subject', max_length=200),
keep_default=False)
# Adding field 'Contact.email_label'
db.add_column(u'cmsplugin_contact', 'email_label',
self.gf('django.db.models.fields.CharField')(default=u'Your email address', max_length=100),
keep_default=False)
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'cmsplugin_contact.contact': {
'Meta': {'object_name': 'Contact', 'db_table': "u'cmsplugin_contact'"},
'akismet_api_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'form_layout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'form_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'recaptcha_private_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'recaptcha_public_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'recaptcha_theme': ('django.db.models.fields.CharField', [], {'default': "'clean'", 'max_length': '20'}),
'site_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'spam_protection_method': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'submit': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '30'}),
'thanks': ('django.db.models.fields.TextField', [], {'default': "u'Thank you for your message.'", 'max_length': '200'})
}
}
complete_apps = ['cmsplugin_contact']
|
brianwoo/django-tutorial
|
refs/heads/master
|
build/Django/build/lib.linux-x86_64-2.7/django/contrib/gis/db/backends/postgis/creation.py
|
87
|
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.connection.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.connection.template_postgis),)
return ''
|
hkff/FodtlMon
|
refs/heads/master
|
fodtlmon/ltl/ltl.py
|
1
|
"""
ltl
Copyright (C) 2015 Walid Benghabrit
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
__author__ = 'walid'
from enum import Enum
#############################
# Abstract operators
#############################
class Formula:
"""
Abstract formula
"""
symbol = ""
tspass = ""
ltlfo = ""
code = ""
def toTSPASS(self):
return str(self)
def toLTLFO(self):
return str(self)
def prefix_print(self):
return str(self)
def toCODE(self):
return self.__class__.__name__ + "()"
def reduce(self):
pass
def eval(self):
return self
def clos(self):
pass
def nnf(self):
pass
def and_(self, exp):
return And(self, exp)
def or_(self, exp):
return Or(self, exp)
def size(self):
return 1 + sum([s.size() for s in self.children()])
def children(self):
return []
def walk(self, filters: str=None, filter_type: type=None, pprint=False, depth=-1):
"""
Iterate tree in pre-order wide-first search order
:param filters: filter by python expression
:param filter_type: Filter by class
:return:
"""
children = self.children()
if children is None:
children = []
res = []
if depth == 0:
return res
elif depth != -1:
depth -= 1
for child in children:
if isinstance(child, Formula):
tmp = child.walk(filters=filters, filter_type=filter_type, pprint=pprint, depth=depth)
if tmp:
res.extend(tmp)
if filter_type is None:
if filters is not None:
if eval(filters) is True:
res.append(self)
else:
res.append(self)
elif isinstance(self, filter_type):
if filters is not None:
if eval(filters) is True:
res.append(self)
else:
res.append(self)
if pprint:
res = [str(x) + " " for x in res]
res = "\n".join(res)
return res
class Exp(Formula):
pass
class Atom(Exp):
"""
Atom
"""
symbol = ""
def __str__(self):
return str(self.symbol)
class true(Atom):
"""
True
"""
symbol = "true"
def eval(self):
return true()
# def and_(self, exp):
# if isinstance(exp, true): return true()
# elif isinstance(exp, false): return false()
# else: return exp
#
# def or_(self, exp):
# return self
class false(Atom):
"""
False
"""
symbol = "false"
def eval(self):
return false()
# def and_(self, exp):
# return self
#
# def or_(self, exp):
# if isinstance(exp, true): return true()
# elif isinstance(exp, false): return false()
# else: return exp
class Parameter(Exp):
"""
Parameter
"""
def __init__(self, name=""):
self.name = str(name)
def __str__(self):
return "%s" % self.name
def equal(self, o):
return (o is not None) and isinstance(o, Parameter) and (o.name == self.name)
def toCODE(self):
return "%s('%s')" % (self.__class__.__name__, self.name)
@staticmethod
def parse(string: str, cts=False):
string = string.strip()
if (string.startswith("'") and string.endswith("'")) or (string.startswith('"') and string.endswith('"')):
return Constant(string[1:-1])
elif cts:
return Constant(string)
else:
return Variable(string)
class Variable(Parameter):
"""
Data variable
"""
def equal(self, o):
return (o is not None) and (isinstance(o, Variable) and (o.name == self.name))
def toLTLFO(self):
return "%s" % self.name
V = Variable
class Constant(Parameter):
"""
Constant
"""
def __init__(self, name=""):
super().__init__(name=name)
if self.name.startswith("'") and self.name.endswith("'"):
self.name = self.name[1:-1]
def equal(self, o):
if isinstance(o, Regexp):
return o.equal(self)
return (o is not None) and (isinstance(o, Constant) and (str(o.name) == str(self.name)))
def toLTLFO(self):
return "'%s'" % self.name
def __str__(self):
return "'%s'" % self.name
C = Constant
class Regexp(Constant):
"""
regexp
"""
def equal(self, o):
try:
if o is not None:
p = re.compile(str(self.name))
return False if p.match(o.name) is None else True
except:
return False
class Predicate(Exp):
"""
Predicate
"""
def __init__(self, name="", args=None):
if args is None:
p = Predicate.parse(name)
self.name = p.name
self.args = p.args
else:
self.name = name
self.args = args
def __str__(self):
args = ",".join([str(p) for p in self.args])
return "%s(%s)" % (self.name, args)
@staticmethod
def parse(string: str, cts=False):
string = string.strip()
if string.endswith(")"):
name = string[0: string.find("(")]
args = string[string.find("(")+1:-1].split(",")
arguments = []
for ar in args:
if ar != '':
arguments.append(Parameter.parse(ar, cts=cts))
else:
print("Invalid predicate format !")
return
return Predicate(name, arguments)
def equal(self, p):
res = False
if isinstance(p, Predicate):
res = (p.name == self.name) and (len(p.args) == len(self.args))
if res:
for a1, a2 in zip(self.args, p.args):
if not a1.equal(a2):
return False
return res
def toLTLFO(self):
args = ",".join([p.toLTLFO() for p in self.args])
return "%s(%s)" % (self.name, args)
def toCODE(self):
args = ",".join([p.toCODE() for p in self.args])
return "%s('%s', %s)" % (self.__class__.__name__, self.name, "[" + args + "]")
def children(self):
return self.args
def isIn(self, preds):
for x in preds:
if self.equal(x):
return True
return False
def instantiate(self, valuation):
p = Predicate(name=self.name, args=[])
for x in self.args:
if isinstance(x, Variable):
# Lookup in valuation
found = False
for v in valuation:
if str(v.var) == x.name:
p.args.append(Constant(str(v.value.name)))
found = True
break
if not found:
# raise Exception("Predicate instantiation failed : missing vars")
# p.args.append(Variable(str(x.name)))
return None
elif isinstance(x, Constant):
p.args.append(x)
return p
P = Predicate
class UExp(Exp):
"""
Unary expression
"""
symbol = ""
def __init__(self, inner=None):
self.inner = inner
def __str__(self):
return "%s(%s)" % (self.symbol, self.inner)
def prefix_print(self):
return "(%s %s)" % (self.symbol, self.inner.prefix_print())
def toTSPASS(self):
return "(%s %s)" % (self.tspass, self.inner.toTSPASS())
def toLTLFO(self):
return "(%s %s)" % (self.ltlfo, self.inner.toLTLFO())
def toCODE(self):
return "%s(%s)" % (self.__class__.__name__, self.inner.toCODE())
def children(self):
return [self.inner]
class BExp(Exp):
"""
Binary expression
"""
symbol = ""
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def __str__(self):
return "(%s %s %s)" % (self.left, self.symbol, self.right)
def prefix_print(self):
return "(%s %s %s)" % (self.symbol, self.left, self.right)
def toTSPASS(self):
return "(%s %s %s)" % (self.left.toTSPASS(), self.tspass, self.right.toTSPASS())
def toLTLFO(self):
return "(%s %s %s)" % (self.left.toLTLFO(), self.ltlfo, self.right.toLTLFO())
def toCODE(self):
return "%s(%s,%s)" % (self.__class__.__name__, self.left.toCODE(), self.right.toCODE())
def children(self):
return [self.left, self.right]
#############################
# LTL Operators
#############################
##
# Propositional operators
##
class And(BExp):
symbol = "and"
tspass = "&&"
ltlfo = "/\\"
def eval(self):
# return self.left.eval().and_(self.right.eval())
if isinstance(self.left, true) or self.left is Boolean3.Top:
return self.right
elif isinstance(self.left, false) or self.left is Boolean3.Bottom:
return false()
else:
if isinstance(self.right, true) or self.right is Boolean3.Top: return self.left
elif isinstance(self.right, false) or self.right is Boolean3.Bottom: return false()
else: return self
# def and_(self, exp):
# return self.left.and_(self.right)
#
# def or_(self, exp):
# return self.left.or_(self.right)
class Or(BExp):
symbol = "or"
tspass = "||"
ltlfo = "\/"
def eval(self):
# return self.left.eval().or_(self.right.eval())
if isinstance(self.left, true) or self.left is Boolean3.Top:
return true()
elif isinstance(self.left, false) or self.left is Boolean3.Bottom:
return self.right
else:
if isinstance(self.right, true) or self.right is Boolean3.Top: return true()
elif isinstance(self.right, false) or self.right is Boolean3.Bottom: return self.left
else: return self
# def and_(self, exp):
# return self.left.and_(self.right)
#
# def or_(self, exp):
# return self.left.or_(self.right)
class Neg(UExp):
symbol = "not"
tspass = "~"
ltlfo = "~"
def eval(self):
if isinstance(self.inner, true) or self.inner is Boolean3.Top: return false()
elif isinstance(self.inner, false) or self.inner is Boolean3.Bottom: return true()
elif isinstance(self.inner, Neg): return self.inner.inner
else: return self
Not = Neg
class Imply(Or):
symbol = "=>"
tspass = "=>"
ltlfo = "=>"
def __init__(self, left=None, right=None):
super().__init__(Neg(left), right)
def __str__(self):
return "(%s %s %s)" % (self.left.inner, self.symbol, self.right)
def toCODE(self):
return "%s(%s,%s)" % (self.__class__.__name__, self.left.inner.toCODE(), self.right.toCODE())
##
# Temporal operators
##
# Always
class Always(UExp):
symbol = "always"
tspass = "always"
ltlfo = "G"
class G(Always):
symbol = "G"
# Future
class Future(UExp):
symbol = "future"
tspass = "sometime"
ltlfo = "F"
class F(Future):
symbol = "F"
# Next
class Next(UExp):
symbol = "next"
tspass = "next"
ltlfo = "X"
class X(Next):
symbol = "X"
# Until
class Until(BExp):
symbol = "until"
tspass = "until"
ltlfo = "U"
class U(Until):
symbol = "U"
# Release
class Release(BExp):
symbol = "release"
tspass = "unless"
ltlfo = ""
def toLTLFO(self):
""" Change to until form """
return "~(~(%s) U ~(%s))" % (self.left.toLTLFO(), self.right.toLTLFO())
class R(Release):
symbol = "R"
#############################
# Trace / Events
#############################
class Event:
"""
Event that contains a set of predicates
"""
def __init__(self, predicates=None, step="0"):
self.predicates = [] if predicates is None else predicates
self.step = step
def __str__(self):
return "{" + " | ".join([str(p) for p in self.predicates]) + "}"
@staticmethod
def parse(string):
string = string.strip()
predicates = []
if string.startswith("{") and string.endswith("}"):
prs = string[1:-1].split("|")
if len(prs) == 1 and prs[0] is "":
return Event()
for p in prs:
predicates.append(Predicate.parse(p, cts=True))
else:
print("Invalid event format ! A trace should be between {}")
return
return Event(predicates)
def push_predicate(self, predicate):
self.predicates.append(predicate)
return self
def contains(self, predicate):
for p in self.predicates:
if isinstance(p, Predicate):
if p.equal(predicate):
return True
return False
p = push_predicate
def toLTLFO(self):
return "{" + ",".join([p.toLTLFO() for p in self.predicates]) + "}"
class Trace:
"""
Trace that contains a set of event
"""
def __init__(self, events=None):
self.events = [] if events is None else events
def __str__(self):
return ";".join([str(e) for e in self.events])
@staticmethod
def parse(string):
string = string.strip()
events = []
evs = string.split(";")
[events.append(Event.parse(e)) if e != "" else None for e in evs]
return Trace(events)
def push_event(self, event):
self.events.append(event)
return self
def contains(self, f):
if isinstance(f, Event):
return f in self.events
elif isinstance(f, Predicate):
for e in self.events:
if e.contains(f): return True
return False
else:
return False
e = push_event
def toLTLFO(self):
return ",".join([e.toLTLFO() for e in self.events])
#############################
# Three valued boolean
#############################
class Boolean3(Enum):
"""
Boolean3 values
"""
Top = "\u22A4"
Bottom = "\u22A5"
Unknown = "?"
def __str__(self):
return self.value
def B3(formula):
"""
Rewrite formula eval result into Boolean3
:param formula:
:return: Boolean3
"""
if isinstance(formula, true) or formula is True or formula == Boolean3.Top.name or formula == Boolean3.Top.value:
return Boolean3.Top
if isinstance(formula, false) or formula is False or formula == Boolean3.Bottom.name or formula == Boolean3.Bottom.value:
return Boolean3.Bottom
else:
return Boolean3.Unknown
|
mloesch/tablib
|
refs/heads/develop
|
tablib/packages/openpyxl3/writer/styles.py
|
55
|
# file openpyxl/writer/styles.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write the shared style table."""
# package imports
from ..shared.xmltools import Element, SubElement
from ..shared.xmltools import get_document_content
from .. import style
class StyleWriter(object):
def __init__(self, workbook):
self._style_list = self._get_style_list(workbook)
self._root = Element('styleSheet',
{'xmlns':'http://schemas.openxmlformats.org/spreadsheetml/2006/main'})
def _get_style_list(self, workbook):
crc = {}
for worksheet in workbook.worksheets:
for style in list(worksheet._styles.values()):
crc[hash(style)] = style
self.style_table = dict([(style, i+1) \
for i, style in enumerate(list(crc.values()))])
sorted_styles = sorted(iter(self.style_table.items()), \
key = lambda pair:pair[1])
return [s[0] for s in sorted_styles]
def get_style_by_hash(self):
return dict([(hash(style), id) \
for style, id in self.style_table.items()])
def write_table(self):
number_format_table = self._write_number_formats()
fonts_table = self._write_fonts()
fills_table = self._write_fills()
borders_table = self._write_borders()
self._write_cell_style_xfs()
self._write_cell_xfs(number_format_table, fonts_table, fills_table, borders_table)
self._write_cell_style()
self._write_dxfs()
self._write_table_styles()
return get_document_content(xml_node=self._root)
def _write_fonts(self):
""" add fonts part to root
return {font.crc => index}
"""
fonts = SubElement(self._root, 'fonts')
# default
font_node = SubElement(fonts, 'font')
SubElement(font_node, 'sz', {'val':'11'})
SubElement(font_node, 'color', {'theme':'1'})
SubElement(font_node, 'name', {'val':'Calibri'})
SubElement(font_node, 'family', {'val':'2'})
SubElement(font_node, 'scheme', {'val':'minor'})
# others
table = {}
index = 1
for st in self._style_list:
if hash(st.font) != hash(style.DEFAULTS.font) and hash(st.font) not in table:
table[hash(st.font)] = str(index)
font_node = SubElement(fonts, 'font')
SubElement(font_node, 'sz', {'val':str(st.font.size)})
SubElement(font_node, 'color', {'rgb':str(st.font.color.index)})
SubElement(font_node, 'name', {'val':st.font.name})
SubElement(font_node, 'family', {'val':'2'})
SubElement(font_node, 'scheme', {'val':'minor'})
if st.font.bold:
SubElement(font_node, 'b')
if st.font.italic:
SubElement(font_node, 'i')
index += 1
fonts.attrib["count"] = str(index)
return table
def _write_fills(self):
fills = SubElement(self._root, 'fills', {'count':'2'})
fill = SubElement(fills, 'fill')
SubElement(fill, 'patternFill', {'patternType':'none'})
fill = SubElement(fills, 'fill')
SubElement(fill, 'patternFill', {'patternType':'gray125'})
table = {}
index = 2
for st in self._style_list:
if hash(st.fill) != hash(style.DEFAULTS.fill) and hash(st.fill) not in table:
table[hash(st.fill)] = str(index)
fill = SubElement(fills, 'fill')
if hash(st.fill.fill_type) != hash(style.DEFAULTS.fill.fill_type):
node = SubElement(fill,'patternFill', {'patternType':st.fill.fill_type})
if hash(st.fill.start_color) != hash(style.DEFAULTS.fill.start_color):
SubElement(node, 'fgColor', {'rgb':str(st.fill.start_color.index)})
if hash(st.fill.end_color) != hash(style.DEFAULTS.fill.end_color):
SubElement(node, 'bgColor', {'rgb':str(st.fill.start_color.index)})
index += 1
fills.attrib["count"] = str(index)
return table
def _write_borders(self):
borders = SubElement(self._root, 'borders')
# default
border = SubElement(borders, 'border')
SubElement(border, 'left')
SubElement(border, 'right')
SubElement(border, 'top')
SubElement(border, 'bottom')
SubElement(border, 'diagonal')
# others
table = {}
index = 1
for st in self._style_list:
if hash(st.borders) != hash(style.DEFAULTS.borders) and hash(st.borders) not in table:
table[hash(st.borders)] = str(index)
border = SubElement(borders, 'border')
# caution: respect this order
for side in ('left','right','top','bottom','diagonal'):
obj = getattr(st.borders, side)
node = SubElement(border, side, {'style':obj.border_style})
SubElement(node, 'color', {'rgb':str(obj.color.index)})
index += 1
borders.attrib["count"] = str(index)
return table
def _write_cell_style_xfs(self):
cell_style_xfs = SubElement(self._root, 'cellStyleXfs', {'count':'1'})
xf = SubElement(cell_style_xfs, 'xf',
{'numFmtId':"0", 'fontId':"0", 'fillId':"0", 'borderId':"0"})
def _write_cell_xfs(self, number_format_table, fonts_table, fills_table, borders_table):
""" write styles combinations based on ids found in tables """
# writing the cellXfs
cell_xfs = SubElement(self._root, 'cellXfs',
{'count':'%d' % (len(self._style_list) + 1)})
# default
def _get_default_vals():
return dict(numFmtId='0', fontId='0', fillId='0',
xfId='0', borderId='0')
SubElement(cell_xfs, 'xf', _get_default_vals())
for st in self._style_list:
vals = _get_default_vals()
if hash(st.font) != hash(style.DEFAULTS.font):
vals['fontId'] = fonts_table[hash(st.font)]
vals['applyFont'] = '1'
if hash(st.borders) != hash(style.DEFAULTS.borders):
vals['borderId'] = borders_table[hash(st.borders)]
vals['applyBorder'] = '1'
if hash(st.fill) != hash(style.DEFAULTS.fill):
vals['fillId'] = fills_table[hash(st.fill)]
vals['applyFillId'] = '1'
if st.number_format != style.DEFAULTS.number_format:
vals['numFmtId'] = '%d' % number_format_table[st.number_format]
vals['applyNumberFormat'] = '1'
if hash(st.alignment) != hash(style.DEFAULTS.alignment):
vals['applyAlignment'] = '1'
node = SubElement(cell_xfs, 'xf', vals)
if hash(st.alignment) != hash(style.DEFAULTS.alignment):
alignments = {}
for align_attr in ['horizontal','vertical']:
if hash(getattr(st.alignment, align_attr)) != hash(getattr(style.DEFAULTS.alignment, align_attr)):
alignments[align_attr] = getattr(st.alignment, align_attr)
SubElement(node, 'alignment', alignments)
def _write_cell_style(self):
cell_styles = SubElement(self._root, 'cellStyles', {'count':'1'})
cell_style = SubElement(cell_styles, 'cellStyle',
{'name':"Normal", 'xfId':"0", 'builtinId':"0"})
def _write_dxfs(self):
dxfs = SubElement(self._root, 'dxfs', {'count':'0'})
def _write_table_styles(self):
table_styles = SubElement(self._root, 'tableStyles',
{'count':'0', 'defaultTableStyle':'TableStyleMedium9',
'defaultPivotStyle':'PivotStyleLight16'})
def _write_number_formats(self):
number_format_table = {}
number_format_list = []
exceptions_list = []
num_fmt_id = 165 # start at a greatly higher value as any builtin can go
num_fmt_offset = 0
for style in self._style_list:
if not style.number_format in number_format_list :
number_format_list.append(style.number_format)
for number_format in number_format_list:
if number_format.is_builtin():
btin = number_format.builtin_format_id(number_format.format_code)
number_format_table[number_format] = btin
else:
number_format_table[number_format] = num_fmt_id + num_fmt_offset
num_fmt_offset += 1
exceptions_list.append(number_format)
num_fmts = SubElement(self._root, 'numFmts',
{'count':'%d' % len(exceptions_list)})
for number_format in exceptions_list :
SubElement(num_fmts, 'numFmt',
{'numFmtId':'%d' % number_format_table[number_format],
'formatCode':'%s' % number_format.format_code})
return number_format_table
|
jonasjberg/autonameow
|
refs/heads/master
|
autonameow/vendor/prompt_toolkit/filters/utils.py
|
23
|
from __future__ import unicode_literals
from .base import Always, Never
from .types import SimpleFilter, CLIFilter
__all__ = (
'to_cli_filter',
'to_simple_filter',
)
_always = Always()
_never = Never()
def to_simple_filter(bool_or_filter):
"""
Accept both booleans and CLIFilters as input and
turn it into a SimpleFilter.
"""
if not isinstance(bool_or_filter, (bool, SimpleFilter)):
raise TypeError('Expecting a bool or a SimpleFilter instance. Got %r' % bool_or_filter)
return {
True: _always,
False: _never,
}.get(bool_or_filter, bool_or_filter)
def to_cli_filter(bool_or_filter):
"""
Accept both booleans and CLIFilters as input and
turn it into a CLIFilter.
"""
if not isinstance(bool_or_filter, (bool, CLIFilter)):
raise TypeError('Expecting a bool or a CLIFilter instance. Got %r' % bool_or_filter)
return {
True: _always,
False: _never,
}.get(bool_or_filter, bool_or_filter)
|
jpope777/searx
|
refs/heads/master
|
searx/tests/test_webapp.py
|
4
|
# -*- coding: utf-8 -*-
import json
from urlparse import ParseResult
from searx import webapp
from searx.testing import SearxTestCase
class ViewsTestCase(SearxTestCase):
def setUp(self):
webapp.app.config['TESTING'] = True # to get better error messages
self.app = webapp.app.test_client()
webapp.default_theme = 'default'
# set some defaults
self.test_results = [
{
'content': 'first test content',
'title': 'First Test',
'url': 'http://first.test.xyz',
'engines': ['youtube', 'startpage'],
'engine': 'startpage',
'parsed_url': ParseResult(scheme='http', netloc='first.test.xyz', path='/', params='', query='', fragment=''), # noqa
}, {
'content': 'second test content',
'title': 'Second Test',
'url': 'http://second.test.xyz',
'engines': ['youtube', 'startpage'],
'engine': 'youtube',
'parsed_url': ParseResult(scheme='http', netloc='second.test.xyz', path='/', params='', query='', fragment=''), # noqa
},
]
def search_mock(search_self, *args):
search_self.results = self.test_results
webapp.Search.search = search_mock
self.maxDiff = None # to see full diffs
def test_index_empty(self):
result = self.app.post('/')
self.assertEqual(result.status_code, 200)
self.assertIn('<div class="title"><h1>searx</h1></div>', result.data)
def test_index_html(self):
result = self.app.post('/', data={'q': 'test'})
self.assertIn(
'<h3 class="result_title"><img width="14" height="14" class="favicon" src="/static/themes/default/img/icons/icon_youtube.ico" alt="youtube" /><a href="http://second.test.xyz" rel="noreferrer">Second <span class="highlight">Test</span></a></h3>', # noqa
result.data
)
self.assertIn(
'<p class="content">first <span class="highlight">test</span> content<br class="last"/></p>', # noqa
result.data
)
def test_index_json(self):
result = self.app.post('/', data={'q': 'test', 'format': 'json'})
result_dict = json.loads(result.data)
self.assertEqual('test', result_dict['query'])
self.assertEqual(
result_dict['results'][0]['content'], 'first test content')
self.assertEqual(
result_dict['results'][0]['url'], 'http://first.test.xyz')
def test_index_csv(self):
result = self.app.post('/', data={'q': 'test', 'format': 'csv'})
self.assertEqual(
'title,url,content,host,engine,score\r\n'
'First Test,http://first.test.xyz,first test content,first.test.xyz,startpage,\r\n' # noqa
'Second Test,http://second.test.xyz,second test content,second.test.xyz,youtube,\r\n', # noqa
result.data
)
def test_index_rss(self):
result = self.app.post('/', data={'q': 'test', 'format': 'rss'})
self.assertIn(
'<description>Search results for "test" - searx</description>',
result.data
)
self.assertIn(
'<opensearch:totalResults>2</opensearch:totalResults>',
result.data
)
self.assertIn(
'<title>First Test</title>',
result.data
)
self.assertIn(
'<link>http://first.test.xyz</link>',
result.data
)
self.assertIn(
'<description>first test content</description>',
result.data
)
def test_about(self):
result = self.app.get('/about')
self.assertEqual(result.status_code, 200)
self.assertIn('<h1>About <a href="/">searx</a></h1>', result.data)
def test_preferences(self):
result = self.app.get('/preferences')
self.assertEqual(result.status_code, 200)
self.assertIn(
'<form method="post" action="/preferences" id="search_form">',
result.data
)
self.assertIn(
'<legend>Default categories</legend>',
result.data
)
self.assertIn(
'<legend>Interface language</legend>',
result.data
)
def test_stats(self):
result = self.app.get('/stats')
self.assertEqual(result.status_code, 200)
self.assertIn('<h2>Engine stats</h2>', result.data)
def test_robots_txt(self):
result = self.app.get('/robots.txt')
self.assertEqual(result.status_code, 200)
self.assertIn('Allow: /', result.data)
def test_opensearch_xml(self):
result = self.app.get('/opensearch.xml')
self.assertEqual(result.status_code, 200)
self.assertIn('<Description>Search searx</Description>', result.data)
def test_favicon(self):
result = self.app.get('/favicon.ico')
self.assertEqual(result.status_code, 200)
|
Microvellum/Fluid-Designer
|
refs/heads/master
|
win64-vc/2.78/Python/bin/2.78/scripts/addons/add_mesh_extra_objects/__init__.py
|
1
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Contributed to by:
# Pontiac, Fourmadmen, varkenvarken, tuga3d, meta-androcto, metalliandy #
# dreampainter, cotejrp1, liero, Kayo Phoenix, sugiany, dommetysk #
# Phymec, Anthony D'Agostino, Pablo Vazquez, Richard Wilks, lijenstina #
# xyz presets by elfnor
bl_info = {
"name": "Extra Objects",
"author": "Multiple Authors",
"version": (0, 3, 1),
"blender": (2, 74, 5),
"location": "View3D > Add > Mesh",
"description": "Add extra mesh object types",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Add_Mesh/Add_Extra",
"category": "Add Mesh",
}
from .geodesic_domes import __init__
from .geodesic_domes import add_shape_geodesic
from .geodesic_domes import forms_271
from .geodesic_domes import geodesic_classes_271
from .geodesic_domes import third_domes_panel_271
from .geodesic_domes import vefm_271
if "bpy" in locals():
import importlib
importlib.reload(add_mesh_star)
importlib.reload(add_mesh_twisted_torus)
importlib.reload(add_mesh_gemstones)
importlib.reload(add_mesh_gears)
importlib.reload(add_mesh_3d_function_surface)
importlib.reload(add_mesh_round_cube)
importlib.reload(add_mesh_supertoroid)
importlib.reload(add_mesh_pyramid)
importlib.reload(add_mesh_torusknot)
importlib.reload(add_mesh_honeycomb)
importlib.reload(add_mesh_teapot)
importlib.reload(add_mesh_pipe_joint)
importlib.reload(add_mesh_solid)
importlib.reload(add_mesh_round_brilliant)
importlib.reload(add_mesh_menger_sponge)
importlib.reload(add_mesh_vertex)
importlib.reload(add_empty_as_parent)
importlib.reload(mesh_discombobulator)
importlib.reload(add_mesh_beam_builder)
importlib.reload(Wallfactory)
importlib.reload(Blocks)
else:
from . import add_mesh_star
from . import add_mesh_twisted_torus
from . import add_mesh_gemstones
from . import add_mesh_gears
from . import add_mesh_3d_function_surface
from . import add_mesh_round_cube
from . import add_mesh_supertoroid
from . import add_mesh_pyramid
from . import add_mesh_torusknot
from . import add_mesh_honeycomb
from . import add_mesh_teapot
from . import add_mesh_pipe_joint
from . import add_mesh_solid
from . import add_mesh_round_brilliant
from . import add_mesh_menger_sponge
from . import add_mesh_vertex
from . import add_empty_as_parent
from . import mesh_discombobulator
from . import add_mesh_beam_builder
from . import Wallfactory
from . import Blocks
import bpy
from bpy.types import Menu
from bpy.props import (
BoolProperty,
IntProperty,
FloatProperty,
StringProperty,
)
class INFO_MT_mesh_vert_add(Menu):
# Define the "Single Vert" menu
bl_idname = "INFO_MT_mesh_vert_add"
bl_label = "Single Vert"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.primitive_vert_add",
text="Add Single Vert")
layout.separator()
layout.operator("mesh.primitive_emptyvert_add",
text="Object Origin Only")
layout.operator("mesh.primitive_symmetrical_vert_add",
text="Origin & Vert Mirrored")
layout.operator("mesh.primitive_symmetrical_empty_add",
text="Object Origin Mirrored")
class INFO_MT_mesh_gears_add(Menu):
# Define the "Gears" menu
bl_idname = "INFO_MT_mesh_gears_add"
bl_label = "Gears"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.primitive_gear",
text="Gear")
layout.operator("mesh.primitive_worm_gear",
text="Worm")
class INFO_MT_mesh_diamonds_add(Menu):
# Define the "Diamonds" menu
bl_idname = "INFO_MT_mesh_diamonds_add"
bl_label = "Diamonds"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.primitive_brilliant_add",
text="Brilliant Diamond")
layout.operator("mesh.primitive_diamond_add",
text="Diamond")
layout.operator("mesh.primitive_gem_add",
text="Gem")
class INFO_MT_mesh_math_add(Menu):
# Define the "Math Function" menu
bl_idname = "INFO_MT_mesh_math_add"
bl_label = "Math Functions"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.primitive_z_function_surface",
text="Z Math Surface")
layout.operator("mesh.primitive_xyz_function_surface",
text="XYZ Math Surface")
self.layout.operator("mesh.primitive_solid_add", text="Regular Solid")
class INFO_MT_mesh_mech(Menu):
# Define the "Math Function" menu
bl_idname = "INFO_MT_mesh_mech_add"
bl_label = "Mechanical"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.menu("INFO_MT_mesh_pipe_joints_add",
text="Pipe Joints", icon="SNAP_PEEL_OBJECT")
layout.menu("INFO_MT_mesh_gears_add",
text="Gears", icon="SCRIPTWIN")
class INFO_MT_mesh_extras_add(Menu):
# Define the "Extra Objects" menu
bl_idname = "INFO_MT_mesh_extras_add"
bl_label = "Extras"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.menu("INFO_MT_mesh_diamonds_add", text="Diamonds",
icon="PMARKER_SEL")
layout.separator()
layout.operator("mesh.add_beam",
text="Beam Builder")
layout.operator("mesh.wall_add",
text="Wall Factory")
layout.separator()
layout.operator("mesh.primitive_star_add",
text="Simple Star")
layout.operator("mesh.primitive_steppyramid_add",
text="Step Pyramid")
layout.operator("mesh.honeycomb_add",
text="Honeycomb")
layout.operator("mesh.primitive_teapot_add",
text="Teapot+")
layout.operator("mesh.menger_sponge_add",
text="Menger Sponge")
class INFO_MT_mesh_torus_add(Menu):
# Define the "Torus Objects" menu
bl_idname = "INFO_MT_mesh_torus_add"
bl_label = "Torus Objects"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.primitive_twisted_torus_add",
text="Twisted Torus")
layout.operator("mesh.primitive_supertoroid_add",
text="Supertoroid")
layout.operator("mesh.primitive_torusknot_add",
text="Torus Knot")
class INFO_MT_mesh_pipe_joints_add(Menu):
# Define the "Pipe Joints" menu
bl_idname = "INFO_MT_mesh_pipe_joints_add"
bl_label = "Pipe Joints"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.primitive_elbow_joint_add",
text="Pipe Elbow")
layout.operator("mesh.primitive_tee_joint_add",
text="Pipe T-Joint")
layout.operator("mesh.primitive_wye_joint_add",
text="Pipe Y-Joint")
layout.operator("mesh.primitive_cross_joint_add",
text="Pipe Cross-Joint")
layout.operator("mesh.primitive_n_joint_add",
text="Pipe N-Joint")
class discombobulator_scene_props(bpy.types.PropertyGroup):
DISC_doodads = []
# Protusions Buttons:
repeatprot = IntProperty(
name="Repeat protusions",
description=("Make several layers of protusion \n"
"Use carefully, runs recursively the discombulator"),
default=1, min=1, max=4 # set to 4 because it's 2**n reqursive
)
doprots = BoolProperty(
name="Make protusions",
description="Check if we want to add protusions to the mesh",
default=True
)
subpolygon1 = BoolProperty(
name="1",
default=True
)
subpolygon2 = BoolProperty(
name="2",
default=True
)
subpolygon3 = BoolProperty(
name="3",
default=True
)
subpolygon4 = BoolProperty(
name="4",
default=True
)
polygonschangedpercent = FloatProperty(
name="Polygon %",
description="Percentage of changed polygons",
default=1.0
)
minHeight = FloatProperty(
name="Min height",
description="Minimal height of the protusions",
default=0.2
)
maxHeight = FloatProperty(
name="Max height",
description="Maximal height of the protusions",
default=0.4
)
minTaper = FloatProperty(
name="Min taper",
description="Minimal height of the protusions",
default=0.15, min=0.0, max=1.0,
subtype='PERCENTAGE'
)
maxTaper = FloatProperty(
name="Max taper",
description="Maximal height of the protusions",
default=0.35, min=0.0, max=1.0,
subtype='PERCENTAGE'
)
# Doodads buttons:
dodoodads = BoolProperty(
name="Make doodads",
description="Check if we want to generate doodads",
default=False
)
mindoodads = IntProperty(
name="Minimum doodads number",
description="Ask for the minimum number of doodads to generate per polygon",
default=1, min=0, max=50
)
maxdoodads = IntProperty(
name="Maximum doodads number",
description="Ask for the maximum number of doodads to generate per polygon",
default=6, min=1, max=50
)
doodMinScale = FloatProperty(
name="Scale min", description="Minimum scaling of doodad",
default=0.5, min=0.0, max=1.0,
subtype='PERCENTAGE'
)
doodMaxScale = FloatProperty(
name="Scale max",
description="Maximum scaling of doodad",
default=1.0, min=0.0, max=1.0,
subtype='PERCENTAGE'
)
# Materials buttons:
sideProtMat = IntProperty(
name="Side's prot mat",
description="Material of protusion's sides",
default=0, min=0
)
topProtMat = IntProperty(
name="Prot's top mat",
description="Material of protusion's top",
default=0, min=0
)
# Register all operators and panels
# Define "Extras" menu
def menu_func(self, context):
lay_out = self.layout
lay_out.operator_context = 'INVOKE_REGION_WIN'
lay_out.separator()
lay_out.menu("INFO_MT_mesh_vert_add",
text="Single Vert", icon="LAYER_ACTIVE")
lay_out.operator("mesh.primitive_round_cube_add",
text="Round Cube", icon="MOD_SUBSURF")
lay_out.menu("INFO_MT_mesh_math_add",
text="Math Function", icon="PACKAGE")
lay_out.menu("INFO_MT_mesh_mech_add",
text="Mechanical", icon="SCRIPTWIN")
lay_out.menu("INFO_MT_mesh_torus_add",
text="Torus Objects", icon="MESH_TORUS")
lay_out.separator()
lay_out.operator("mesh.generate_geodesic_dome",
text="Geodesic Dome", icon="MESH_ICOSPHERE")
lay_out.operator("discombobulate.ops",
text="Discombobulator", icon="RETOPO")
lay_out.separator()
lay_out.menu("INFO_MT_mesh_extras_add",
text="Extras", icon="MESH_DATA")
lay_out.separator()
lay_out.operator("object.parent_to_empty",
text="Parent To Empty", icon="LINK_AREA")
def register():
bpy.utils.register_module(__name__)
# Register Discombobulator properties
bpy.types.Scene.discomb = bpy.props.PointerProperty(
type=discombobulator_scene_props
)
# Error messages for Geodesic Domes
bpy.types.Scene.error_message = StringProperty(
name="actual error",
default=""
)
bpy.types.Scene.geodesic_not_yet_called = BoolProperty(
name="geodesic_not_called",
default=True
)
bpy.types.Scene.gd_help_text_width = IntProperty(
name="Text Width",
description="The width above which the text wraps",
default=60,
max=180, min=20
)
# Add "Extras" menu to the "Add Mesh" menu
bpy.types.INFO_MT_mesh_add.append(menu_func)
def unregister():
# Remove "Extras" menu from the "Add Mesh" menu.
bpy.types.INFO_MT_mesh_add.remove(menu_func)
del bpy.types.Scene.discomb
del bpy.types.Scene.error_message
del bpy.types.Scene.geodesic_not_yet_called
del bpy.types.Scene.gd_help_text_width
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
|
sqlalchemy-redshift/sqlalchemy-redshift
|
refs/heads/master
|
redshift_sqlalchemy/__init__.py
|
5
|
"""
Compatibility module for projects referencing sqlalchemy_redshift
by its old name "redshift_sqlalchemy".
"""
import sys
import warnings
import sqlalchemy_redshift
DEPRECATION_MESSAGE = """\
redshift_sqlalchemy has been renamed to sqlalchemy_redshift.
The redshift_sqlalchemy compatibility package will be removed in
a future release, so it is recommended to update all package references.
"""
warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning)
# All references to module redshift_sqlalchemy will map to sqlalchemy_redshift
sys.modules['redshift_sqlalchemy'] = sqlalchemy_redshift
|
dksteele/Senior-Project
|
refs/heads/master
|
catkin_ws/src/sensors/src/sensors/sensing_register.py
|
1
|
#!/usr/bin/env python
import rospy
from sensors.srv import *
import datetime
import threading
from python_qt_binding import QtCore
from python_qt_binding.QtCore import QThread
def output_debug_message(msg):
print datetime.datetime.now(), msg
class SensingRegister(QThread):
__registered_nodes__ = None
__register_service__ = None
__widget__ = None
__add_tree_node__ = QtCore.pyqtSignal(object)
def __init__(self, registered_nodes):
QThread.__init__(self)
self.__registered_nodes__ = registered_nodes
def register_callback(self, req):
global __registered_nodes__
global __sensing_thread_update_tree_event__
num_nodes = len(self.__registered_nodes__.keys())
topic = "sensor_node" + str(num_nodes)
self.__registered_nodes__[num_nodes] = (req.platform_name, req.sensor_type, topic)
output_debug_message("[INFO] : Registered " + req.platform_name + "->" + req.sensor_type + " On Topic " + topic)
self.__add_tree_node__.emit(num_nodes)
return RegistrationServiceResponse(topic)
#Setup service to register sensors on topics
def run(self):
output_debug_message("[INFO] : Starting Registration Service")
__register_service__ = rospy.Service('sensors_register', RegistrationService, self.register_callback)
output_debug_message("[INFO] : Starting Sensing Thread")
|
christianblunden/googmuze
|
refs/heads/master
|
resources/lib/google/protobuf/compiler/plugin_pb2.py
|
24
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/compiler/plugin.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import google.protobuf.descriptor_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/compiler/plugin.proto',
package='google.protobuf.compiler',
serialized_pb='\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"}\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xaa\x01\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a>\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\tB,\n\x1c\x63om.google.protobuf.compilerB\x0cPluginProtos')
_CODEGENERATORREQUEST = _descriptor.Descriptor(
name='CodeGeneratorRequest',
full_name='google.protobuf.compiler.CodeGeneratorRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_to_generate', full_name='google.protobuf.compiler.CodeGeneratorRequest.file_to_generate', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameter', full_name='google.protobuf.compiler.CodeGeneratorRequest.parameter', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='proto_file', full_name='google.protobuf.compiler.CodeGeneratorRequest.proto_file', index=2,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=101,
serialized_end=226,
)
_CODEGENERATORRESPONSE_FILE = _descriptor.Descriptor(
name='File',
full_name='google.protobuf.compiler.CodeGeneratorResponse.File',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='insertion_point', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='content', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.content', index=2,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=337,
serialized_end=399,
)
_CODEGENERATORRESPONSE = _descriptor.Descriptor(
name='CodeGeneratorResponse',
full_name='google.protobuf.compiler.CodeGeneratorResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='google.protobuf.compiler.CodeGeneratorResponse.error', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file', full_name='google.protobuf.compiler.CodeGeneratorResponse.file', index=1,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CODEGENERATORRESPONSE_FILE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=229,
serialized_end=399,
)
_CODEGENERATORREQUEST.fields_by_name['proto_file'].message_type = google.protobuf.descriptor_pb2._FILEDESCRIPTORPROTO
_CODEGENERATORRESPONSE_FILE.containing_type = _CODEGENERATORRESPONSE;
_CODEGENERATORRESPONSE.fields_by_name['file'].message_type = _CODEGENERATORRESPONSE_FILE
DESCRIPTOR.message_types_by_name['CodeGeneratorRequest'] = _CODEGENERATORREQUEST
DESCRIPTOR.message_types_by_name['CodeGeneratorResponse'] = _CODEGENERATORRESPONSE
class CodeGeneratorRequest(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CODEGENERATORREQUEST
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest)
class CodeGeneratorResponse(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class File(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _CODEGENERATORRESPONSE_FILE
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File)
DESCRIPTOR = _CODEGENERATORRESPONSE
# @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\034com.google.protobuf.compilerB\014PluginProtos')
# @@protoc_insertion_point(module_scope)
|
sdoran35/hate-to-hugs
|
refs/heads/master
|
venv/lib/python3.6/site-packages/pip/req/__init__.py
|
806
|
from __future__ import absolute_import
from .req_install import InstallRequirement
from .req_set import RequirementSet, Requirements
from .req_file import parse_requirements
__all__ = [
"RequirementSet", "Requirements", "InstallRequirement",
"parse_requirements",
]
|
dancingdan/tensorflow
|
refs/heads/master
|
tensorflow/python/platform/control_imports.py
|
153
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Switch between Google or open source dependencies."""
# Switch between Google and OSS dependencies
USE_OSS = True
# Per-dependency switches determining whether each dependency is ready
# to be replaced by its OSS equivalence.
# TODO(danmane,mrry,opensource): Flip these switches, then remove them
OSS_APP = True
OSS_FLAGS = True
OSS_GFILE = True
OSS_GOOGLETEST = True
OSS_LOGGING = True
OSS_PARAMETERIZED = True
|
openstack/keystone
|
refs/heads/master
|
keystone/common/cache/core.py
|
2
|
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone Caching Layer Implementation."""
import os
from dogpile.cache import region
from dogpile.cache import util
from oslo_cache import core as cache
from keystone.common.cache import _context_cache
import keystone.conf
CONF = keystone.conf.CONF
class RegionInvalidationManager(object):
REGION_KEY_PREFIX = '<<<region>>>:'
def __init__(self, invalidation_region, region_name):
self._invalidation_region = invalidation_region
self._region_key = self.REGION_KEY_PREFIX + region_name
def _generate_new_id(self):
return os.urandom(10)
@property
def region_id(self):
return self._invalidation_region.get_or_create(
self._region_key, self._generate_new_id, expiration_time=-1)
def invalidate_region(self):
new_region_id = self._generate_new_id()
self._invalidation_region.set(self._region_key, new_region_id)
return new_region_id
def is_region_key(self, key):
return key == self._region_key
class DistributedInvalidationStrategy(region.RegionInvalidationStrategy):
def __init__(self, region_manager):
self._region_manager = region_manager
def invalidate(self, hard=None):
self._region_manager.invalidate_region()
def is_invalidated(self, timestamp):
return False
def was_hard_invalidated(self):
return False
def is_hard_invalidated(self, timestamp):
return False
def was_soft_invalidated(self):
return False
def is_soft_invalidated(self, timestamp):
return False
def key_mangler_factory(invalidation_manager, orig_key_mangler):
def key_mangler(key):
# NOTE(dstanek): Since *all* keys go through the key mangler we
# need to make sure the region keys don't get the region_id added.
# If it were there would be no way to get to it, making the cache
# effectively useless.
if not invalidation_manager.is_region_key(key):
key = '%s:%s' % (key, invalidation_manager.region_id)
if orig_key_mangler:
key = orig_key_mangler(key)
return key
return key_mangler
def create_region(name):
"""Create a dopile region.
Wraps oslo_cache.core.create_region. This is used to ensure that the
Region is properly patched and allows us to more easily specify a region
name.
:param str name: The region name
:returns: The new region.
:rtype: :class:`dogpile.cache.region.CacheRegion`
"""
region = cache.create_region()
region.name = name # oslo.cache doesn't allow this yet
return region
CACHE_REGION = create_region(name='shared default')
CACHE_INVALIDATION_REGION = create_region(name='invalidation region')
register_model_handler = _context_cache._register_model_handler
def configure_cache(region=None):
if region is None:
region = CACHE_REGION
# NOTE(morganfainberg): running cache.configure_cache_region()
# sets region.is_configured, this must be captured before
# cache.configure_cache_region is called.
configured = region.is_configured
cache.configure_cache_region(CONF, region)
# Only wrap the region if it was not configured. This should be pushed
# to oslo_cache lib somehow.
if not configured:
region.wrap(_context_cache._ResponseCacheProxy)
region_manager = RegionInvalidationManager(
CACHE_INVALIDATION_REGION, region.name)
region.key_mangler = key_mangler_factory(
region_manager, region.key_mangler)
region.region_invalidator = DistributedInvalidationStrategy(
region_manager)
def _sha1_mangle_key(key):
"""Wrapper for dogpile's sha1_mangle_key.
dogpile's sha1_mangle_key function expects an encoded string, so we
should take steps to properly handle multiple inputs before passing
the key through.
NOTE(dstanek): this was copied directly from olso_cache
"""
try:
key = key.encode('utf-8', errors='xmlcharrefreplace')
except (UnicodeError, AttributeError):
# NOTE(stevemar): if encoding fails just continue anyway.
pass
return util.sha1_mangle_key(key)
def configure_invalidation_region():
if CACHE_INVALIDATION_REGION.is_configured:
return
# NOTE(dstanek): Configuring this region manually so that we control the
# expiration and can ensure that the keys don't expire.
config_dict = cache._build_cache_config(CONF)
config_dict['expiration_time'] = None # we don't want an expiration
CACHE_INVALIDATION_REGION.configure_from_config(
config_dict, '%s.' % CONF.cache.config_prefix)
# NOTE(breton): Wrap the cache invalidation region to avoid excessive
# calls to memcached, which would result in poor performance.
CACHE_INVALIDATION_REGION.wrap(_context_cache._ResponseCacheProxy)
# NOTE(morganfainberg): if the backend requests the use of a
# key_mangler, we should respect that key_mangler function. If a
# key_mangler is not defined by the backend, use the sha1_mangle_key
# mangler provided by dogpile.cache. This ensures we always use a fixed
# size cache-key.
if CACHE_INVALIDATION_REGION.key_mangler is None:
CACHE_INVALIDATION_REGION.key_mangler = _sha1_mangle_key
def get_memoization_decorator(group, expiration_group=None, region=None):
if region is None:
region = CACHE_REGION
return cache.get_memoization_decorator(CONF, region, group,
expiration_group=expiration_group)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.