code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
"""
Benchmarking and performance tests.
"""
import pytest
from pluggy import (_multicall, _legacymulticall, HookImpl, HookspecMarker,
HookimplMarker)
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def MC(methods, kwargs, callertype, firstresult=False):
hookfuncs = []
for method in methods:
f = HookImpl(None, "<temp>", method, method.example_impl)
hookfuncs.append(f)
return callertype(hookfuncs, kwargs, {"firstresult": firstresult})
@hookimpl
def hook(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def wrapper(arg1, arg2, arg3):
yield
@pytest.fixture(
params=[10, 100],
ids="hooks={}".format,
)
def hooks(request):
return [hook for i in range(request.param)]
@pytest.fixture(
params=[10, 100],
ids="wrappers={}".format,
)
def wrappers(request):
return [wrapper for i in range(request.param)]
@pytest.fixture(
params=[_multicall, _legacymulticall],
ids=lambda item: item.__name__
)
def callertype(request):
return request.param
def inner_exec(methods, callertype):
return MC(methods, {'arg1': 1, 'arg2': 2, 'arg3': 3}, callertype)
def test_hook_and_wrappers_speed(benchmark, hooks, wrappers, callertype):
benchmark(inner_exec, hooks + wrappers, callertype)
| UK992/servo | tests/wpt/web-platform-tests/tools/third_party/pluggy/testing/benchmark.py | Python | mpl-2.0 | 1,328 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import osv
from openerp.report import report_sxw
def titlize(journal_name):
words = journal_name.split()
while words.pop() != 'journal':
continue
return ' '.join(words)
class order(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(order, self).__init__(cr, uid, name, context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
partner = user.company_id.partner_id
self.localcontext.update({
'time': time,
'disc': self.discount,
'net': self.netamount,
'get_journal_amt': self._get_journal_amt,
'address': partner or False,
'titlize': titlize
})
def netamount(self, order_line_id):
sql = 'select (qty*price_unit) as net_price from pos_order_line where id = %s'
self.cr.execute(sql, (order_line_id,))
res = self.cr.fetchone()
return res[0]
def discount(self, order_id):
sql = 'select discount, price_unit, qty from pos_order_line where order_id = %s '
self.cr.execute(sql, (order_id,))
res = self.cr.fetchall()
dsum = 0
for line in res:
if line[0] != 0:
dsum = dsum +(line[2] * (line[0]*line[1]/100))
return dsum
def _get_journal_amt(self, order_id):
data={}
sql = """ select aj.name,absl.amount as amt from account_bank_statement as abs
LEFT JOIN account_bank_statement_line as absl ON abs.id = absl.statement_id
LEFT JOIN account_journal as aj ON aj.id = abs.journal_id
WHERE absl.pos_statement_id =%d"""%(order_id)
self.cr.execute(sql)
data = self.cr.dictfetchall()
return data
class report_order_receipt(osv.AbstractModel):
_name = 'report.point_of_sale.report_receipt'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_receipt'
_wrapped_report_class = order
| vileopratama/vitech | src/addons/point_of_sale/report/pos_receipt.py | Python | mit | 2,154 |
import bench
from ucollections import namedtuple
T = namedtuple("Tup", ["num", "bar"])
def test(num):
t = T(20000000, 0)
i = 0
while i < t.num:
i += 1
bench.run(test)
| pozetroninc/micropython | tests/internal_bench/var-8-namedtuple-1st.py | Python | mit | 190 |
"""Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not hasattr(showwarning, "__call__"):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
| dya2/python-for-android | python3-alpha/python3-src/Lib/warnings.py | Python | apache-2.0 | 13,836 |
import sys
from unicode_parse_common import *
# http://www.unicode.org/Public/5.1.0/ucd/Scripts.txt
script_to_harfbuzz = {
# This is the list of HB_Script_* at the time of writing
'Common': 'HB_Script_Common',
'Greek': 'HB_Script_Greek',
'Cyrillic': 'HB_Script_Cyrillic',
'Armenian': 'HB_Script_Armenian',
'Hebrew': 'HB_Script_Hebrew',
'Arabic': 'HB_Script_Arabic',
'Syriac': 'HB_Script_Syriac',
'Thaana': 'HB_Script_Thaana',
'Devanagari': 'HB_Script_Devanagari',
'Bengali': 'HB_Script_Bengali',
'Gurmukhi': 'HB_Script_Gurmukhi',
'Gujarati': 'HB_Script_Gujarati',
'Oriya': 'HB_Script_Oriya',
'Tamil': 'HB_Script_Tamil',
'Telugu': 'HB_Script_Telugu',
'Kannada': 'HB_Script_Kannada',
'Malayalam': 'HB_Script_Malayalam',
'Sinhala': 'HB_Script_Sinhala',
'Thai': 'HB_Script_Thai',
'Lao': 'HB_Script_Lao',
'Tibetan': 'HB_Script_Tibetan',
'Myanmar': 'HB_Script_Myanmar',
'Georgian': 'HB_Script_Georgian',
'Hangul': 'HB_Script_Hangul',
'Ogham': 'HB_Script_Ogham',
'Runic': 'HB_Script_Runic',
'Khmer': 'HB_Script_Khmer',
'Inherited': 'HB_Script_Inherited',
}
class ScriptDict(object):
def __init__(self, base):
self.base = base
def __getitem__(self, key):
r = self.base.get(key, None)
if r is None:
return 'HB_Script_Common'
return r
def main(infile, outfile):
ranges = unicode_file_parse(infile,
ScriptDict(script_to_harfbuzz),
'HB_Script_Common')
ranges = sort_and_merge(ranges)
print >>outfile, '// Generated from Unicode script tables\n'
print >>outfile, '#ifndef SCRIPT_PROPERTIES_H_'
print >>outfile, '#define SCRIPT_PROPERTIES_H_\n'
print >>outfile, '#include <stdint.h>'
print >>outfile, '#include "harfbuzz-shaper.h"\n'
print >>outfile, 'struct script_property {'
print >>outfile, ' uint32_t range_start;'
print >>outfile, ' uint32_t range_end;'
print >>outfile, ' HB_Script script;'
print >>outfile, '};\n'
print >>outfile, 'static const struct script_property script_properties[] = {'
for (start, end, value) in ranges:
print >>outfile, ' {0x%x, 0x%x, %s},' % (start, end, value)
print >>outfile, '};\n'
print >>outfile, 'static const unsigned script_properties_count = %d;\n' % len(ranges)
print >>outfile, '#endif // SCRIPT_PROPERTIES_H_'
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: %s <input .txt> <output .h>' % sys.argv[0]
else:
main(file(sys.argv[1], 'r'), file(sys.argv[2], 'w+'))
| zcbenz/cefode-chromium | third_party/harfbuzz/contrib/tables/scripts-parse.py | Python | bsd-3-clause | 2,516 |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import select_class_tests
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
FirefoxSelectElementHandlingTests.webserver = webserver
FirefoxSelectElementHandlingTests.driver = webdriver.Firefox()
class FirefoxSelectElementHandlingTests(select_class_tests.WebDriverSelectSupportTests):
pass
def teardown_module(module):
FirefoxSelectElementHandlingTests.driver.quit()
FirefoxSelectElementHandlingTests.webserver.stop()
| jerome-jacob/selenium | py/test/selenium/webdriver/firefox/ff_select_support_class_tests.py | Python | apache-2.0 | 1,419 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides the definition of an RPC serialization handler"""
import abc
class Serializer(object):
"""Generic (de-)serialization definition base class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def serialize_entity(self, context, entity):
"""Serialize something to primitive form.
:param context: Security context
:param entity: Entity to be serialized
:returns: Serialized form of entity
"""
pass
@abc.abstractmethod
def deserialize_entity(self, context, entity):
"""Deserialize something from primitive form.
:param context: Security context
:param entity: Primitive to be deserialized
:returns: Deserialized form of entity
"""
pass
class NoOpSerializer(Serializer):
"""A serializer that does nothing."""
def serialize_entity(self, context, entity):
return entity
def deserialize_entity(self, context, entity):
return entity
| denismakogon/trove-guestagent | trove_guestagent/openstack/common/rpc/serializer.py | Python | apache-2.0 | 1,600 |
# Generated by h2py from /usr/include/netinet/in.h
# Included from sys/feature_tests.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506L
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION = 4
_XOPEN_VERSION = 3
from TYPES import *
# Included from sys/stream.h
# Included from sys/vnode.h
from TYPES import *
# Included from sys/t_lock.h
# Included from sys/machlock.h
from TYPES import *
LOCK_HELD_VALUE = 0xff
def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
CLOCK_LEVEL = 10
LOCK_LEVEL = 10
DISP_LEVEL = (LOCK_LEVEL + 1)
PTR24_LSB = 5
PTR24_MSB = (PTR24_LSB + 24)
PTR24_ALIGN = 32
PTR24_BASE = 0xe0000000
# Included from sys/param.h
from TYPES import *
_POSIX_VDISABLE = 0
MAX_INPUT = 512
MAX_CANON = 256
UID_NOBODY = 60001
GID_NOBODY = UID_NOBODY
UID_NOACCESS = 60002
MAX_TASKID = 999999
MAX_MAXPID = 999999
DEFAULT_MAXPID = 999999
DEFAULT_JUMPPID = 100000
DEFAULT_MAXPID = 30000
DEFAULT_JUMPPID = 0
MAXUID = 2147483647
MAXPROJID = MAXUID
MAXLINK = 32767
NMOUNT = 40
CANBSIZ = 256
NOFILE = 20
NGROUPS_UMIN = 0
NGROUPS_UMAX = 32
NGROUPS_MAX_DEFAULT = 16
NZERO = 20
NULL = 0L
NULL = 0
CMASK = 022
CDLIMIT = (1L<<11)
NBPS = 0x20000
NBPSCTR = 512
UBSIZE = 512
SCTRSHFT = 9
SYSNAME = 9
PREMOTE = 39
MAXPATHLEN = 1024
MAXSYMLINKS = 20
MAXNAMELEN = 256
NADDR = 13
PIPE_BUF = 5120
PIPE_MAX = 5120
NBBY = 8
MAXBSIZE = 8192
DEV_BSIZE = 512
DEV_BSHIFT = 9
MAXFRAG = 8
MAXOFF32_T = 0x7fffffff
MAXOFF_T = 0x7fffffffffffffffl
MAXOFFSET_T = 0x7fffffffffffffffl
MAXOFF_T = 0x7fffffffl
MAXOFFSET_T = 0x7fffffff
def btodb(bytes): return \
def dbtob(db): return \
def lbtodb(bytes): return \
def ldbtob(db): return \
NCARGS32 = 0x100000
NCARGS64 = 0x200000
NCARGS = NCARGS64
NCARGS = NCARGS32
FSHIFT = 8
FSCALE = (1<<FSHIFT)
def DELAY(n): return drv_usecwait(n)
def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
def ptob(x): return ((x) << PAGESHIFT)
def btop(x): return (((x) >> PAGESHIFT))
def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
_AIO_LISTIO_MAX = (4096)
_AIO_MAX = (-1)
_MQ_OPEN_MAX = (32)
_MQ_PRIO_MAX = (32)
_SEM_NSEMS_MAX = INT_MAX
_SEM_VALUE_MAX = INT_MAX
# Included from sys/unistd.h
_CS_PATH = 65
_CS_LFS_CFLAGS = 68
_CS_LFS_LDFLAGS = 69
_CS_LFS_LIBS = 70
_CS_LFS_LINTFLAGS = 71
_CS_LFS64_CFLAGS = 72
_CS_LFS64_LDFLAGS = 73
_CS_LFS64_LIBS = 74
_CS_LFS64_LINTFLAGS = 75
_CS_XBS5_ILP32_OFF32_CFLAGS = 700
_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
_CS_XBS5_ILP32_OFF32_LIBS = 702
_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
_CS_XBS5_ILP32_OFFBIG_LIBS = 707
_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
_CS_XBS5_LP64_OFF64_CFLAGS = 709
_CS_XBS5_LP64_OFF64_LDFLAGS = 710
_CS_XBS5_LP64_OFF64_LIBS = 711
_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_XOPEN_VERSION = 12
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_STREAM_MAX = 16
_SC_TZNAME_MAX = 17
_SC_AIO_LISTIO_MAX = 18
_SC_AIO_MAX = 19
_SC_AIO_PRIO_DELTA_MAX = 20
_SC_ASYNCHRONOUS_IO = 21
_SC_DELAYTIMER_MAX = 22
_SC_FSYNC = 23
_SC_MAPPED_FILES = 24
_SC_MEMLOCK = 25
_SC_MEMLOCK_RANGE = 26
_SC_MEMORY_PROTECTION = 27
_SC_MESSAGE_PASSING = 28
_SC_MQ_OPEN_MAX = 29
_SC_MQ_PRIO_MAX = 30
_SC_PRIORITIZED_IO = 31
_SC_PRIORITY_SCHEDULING = 32
_SC_REALTIME_SIGNALS = 33
_SC_RTSIG_MAX = 34
_SC_SEMAPHORES = 35
_SC_SEM_NSEMS_MAX = 36
_SC_SEM_VALUE_MAX = 37
_SC_SHARED_MEMORY_OBJECTS = 38
_SC_SIGQUEUE_MAX = 39
_SC_SIGRT_MIN = 40
_SC_SIGRT_MAX = 41
_SC_SYNCHRONIZED_IO = 42
_SC_TIMERS = 43
_SC_TIMER_MAX = 44
_SC_2_C_BIND = 45
_SC_2_C_DEV = 46
_SC_2_C_VERSION = 47
_SC_2_FORT_DEV = 48
_SC_2_FORT_RUN = 49
_SC_2_LOCALEDEF = 50
_SC_2_SW_DEV = 51
_SC_2_UPE = 52
_SC_2_VERSION = 53
_SC_BC_BASE_MAX = 54
_SC_BC_DIM_MAX = 55
_SC_BC_SCALE_MAX = 56
_SC_BC_STRING_MAX = 57
_SC_COLL_WEIGHTS_MAX = 58
_SC_EXPR_NEST_MAX = 59
_SC_LINE_MAX = 60
_SC_RE_DUP_MAX = 61
_SC_XOPEN_CRYPT = 62
_SC_XOPEN_ENH_I18N = 63
_SC_XOPEN_SHM = 64
_SC_2_CHAR_TERM = 66
_SC_XOPEN_XCU_VERSION = 67
_SC_ATEXIT_MAX = 76
_SC_IOV_MAX = 77
_SC_XOPEN_UNIX = 78
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_T_IOV_MAX = 79
_SC_PHYS_PAGES = 500
_SC_AVPHYS_PAGES = 501
_SC_COHER_BLKSZ = 503
_SC_SPLIT_CACHE = 504
_SC_ICACHE_SZ = 505
_SC_DCACHE_SZ = 506
_SC_ICACHE_LINESZ = 507
_SC_DCACHE_LINESZ = 508
_SC_ICACHE_BLKSZ = 509
_SC_DCACHE_BLKSZ = 510
_SC_DCACHE_TBLKSZ = 511
_SC_ICACHE_ASSOC = 512
_SC_DCACHE_ASSOC = 513
_SC_MAXPID = 514
_SC_STACK_PROT = 515
_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
_SC_GETGR_R_SIZE_MAX = 569
_SC_GETPW_R_SIZE_MAX = 570
_SC_LOGIN_NAME_MAX = 571
_SC_THREAD_KEYS_MAX = 572
_SC_THREAD_STACK_MIN = 573
_SC_THREAD_THREADS_MAX = 574
_SC_TTY_NAME_MAX = 575
_SC_THREADS = 576
_SC_THREAD_ATTR_STACKADDR = 577
_SC_THREAD_ATTR_STACKSIZE = 578
_SC_THREAD_PRIORITY_SCHEDULING = 579
_SC_THREAD_PRIO_INHERIT = 580
_SC_THREAD_PRIO_PROTECT = 581
_SC_THREAD_PROCESS_SHARED = 582
_SC_THREAD_SAFE_FUNCTIONS = 583
_SC_XOPEN_LEGACY = 717
_SC_XOPEN_REALTIME = 718
_SC_XOPEN_REALTIME_THREADS = 719
_SC_XBS5_ILP32_OFF32 = 720
_SC_XBS5_ILP32_OFFBIG = 721
_SC_XBS5_LP64_OFF64 = 722
_SC_XBS5_LPBIG_OFFBIG = 723
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_ASYNC_IO = 10
_PC_PRIO_IO = 11
_PC_SYNC_IO = 12
_PC_FILESIZEBITS = 67
_PC_LAST = 67
_POSIX_VERSION = 199506L
_POSIX2_VERSION = 199209L
_POSIX2_C_VERSION = 199209L
_XOPEN_XCU_VERSION = 4
_XOPEN_REALTIME = 1
_XOPEN_ENH_I18N = 1
_XOPEN_SHM = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_C_DEV = 1
_POSIX2_SW_DEV = 1
_POSIX2_UPE = 1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
# Included from sys/rwlock.h
from TYPES import *
def RW_READ_HELD(x): return (rw_read_held((x)))
def RW_WRITE_HELD(x): return (rw_write_held((x)))
def RW_LOCK_HELD(x): return (rw_lock_held((x)))
def RW_ISWRITER(x): return (rw_iswriter(x))
# Included from sys/semaphore.h
# Included from sys/thread.h
from TYPES import *
# Included from sys/klwp.h
from TYPES import *
# Included from sys/condvar.h
from TYPES import *
# Included from sys/time.h
# Included from sys/types32.h
# Included from sys/int_types.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
# Included from sys/time_impl.h
def TIMESPEC_OVERFLOW(ts): return \
def ITIMERSPEC_OVERFLOW(it): return \
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_HIGHRES = 4
CLOCK_MAX = 5
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
def TICK_TO_SEC(tick): return ((tick) / hz)
def SEC_TO_TICK(sec): return ((sec) * hz)
def TICK_TO_MSEC(tick): return \
def MSEC_TO_TICK(msec): return \
def MSEC_TO_TICK_ROUNDUP(msec): return \
def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
def USEC_TO_TICK_ROUNDUP(usec): return \
def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
def NSEC_TO_TICK_ROUNDUP(nsec): return \
def TIMEVAL_TO_TICK(tvp): return \
def TIMESTRUC_TO_TICK(tsp): return \
# Included from time.h
from TYPES import *
# Included from iso/time_iso.h
NULL = 0L
NULL = 0
CLOCKS_PER_SEC = 1000000
# Included from sys/select.h
FD_SETSIZE = 65536
FD_SETSIZE = 1024
_NBBY = 8
NBBY = _NBBY
def FD_ZERO(p): return bzero((p), sizeof (*(p)))
# Included from sys/signal.h
# Included from sys/iso/signal_iso.h
SIGHUP = 1
SIGINT = 2
SIGQUIT = 3
SIGILL = 4
SIGTRAP = 5
SIGIOT = 6
SIGABRT = 6
SIGEMT = 7
SIGFPE = 8
SIGKILL = 9
SIGBUS = 10
SIGSEGV = 11
SIGSYS = 12
SIGPIPE = 13
SIGALRM = 14
SIGTERM = 15
SIGUSR1 = 16
SIGUSR2 = 17
SIGCLD = 18
SIGCHLD = 18
SIGPWR = 19
SIGWINCH = 20
SIGURG = 21
SIGPOLL = 22
SIGIO = SIGPOLL
SIGSTOP = 23
SIGTSTP = 24
SIGCONT = 25
SIGTTIN = 26
SIGTTOU = 27
SIGVTALRM = 28
SIGPROF = 29
SIGXCPU = 30
SIGXFSZ = 31
SIGWAITING = 32
SIGLWP = 33
SIGFREEZE = 34
SIGTHAW = 35
SIGCANCEL = 36
SIGLOST = 37
_SIGRTMIN = 38
_SIGRTMAX = 45
SIG_BLOCK = 1
SIG_UNBLOCK = 2
SIG_SETMASK = 3
SIGNO_MASK = 0xFF
SIGDEFER = 0x100
SIGHOLD = 0x200
SIGRELSE = 0x400
SIGIGNORE = 0x800
SIGPAUSE = 0x1000
# Included from sys/siginfo.h
from TYPES import *
SIGEV_NONE = 1
SIGEV_SIGNAL = 2
SIGEV_THREAD = 3
SI_NOINFO = 32767
SI_USER = 0
SI_LWP = (-1)
SI_QUEUE = (-2)
SI_TIMER = (-3)
SI_ASYNCIO = (-4)
SI_MESGQ = (-5)
# Included from sys/machsig.h
ILL_ILLOPC = 1
ILL_ILLOPN = 2
ILL_ILLADR = 3
ILL_ILLTRP = 4
ILL_PRVOPC = 5
ILL_PRVREG = 6
ILL_COPROC = 7
ILL_BADSTK = 8
NSIGILL = 8
EMT_TAGOVF = 1
EMT_CPCOVF = 2
NSIGEMT = 2
FPE_INTDIV = 1
FPE_INTOVF = 2
FPE_FLTDIV = 3
FPE_FLTOVF = 4
FPE_FLTUND = 5
FPE_FLTRES = 6
FPE_FLTINV = 7
FPE_FLTSUB = 8
NSIGFPE = 8
SEGV_MAPERR = 1
SEGV_ACCERR = 2
NSIGSEGV = 2
BUS_ADRALN = 1
BUS_ADRERR = 2
BUS_OBJERR = 3
NSIGBUS = 3
TRAP_BRKPT = 1
TRAP_TRACE = 2
TRAP_RWATCH = 3
TRAP_WWATCH = 4
TRAP_XWATCH = 5
NSIGTRAP = 5
CLD_EXITED = 1
CLD_KILLED = 2
CLD_DUMPED = 3
CLD_TRAPPED = 4
CLD_STOPPED = 5
CLD_CONTINUED = 6
NSIGCLD = 6
POLL_IN = 1
POLL_OUT = 2
POLL_MSG = 3
POLL_ERR = 4
POLL_PRI = 5
POLL_HUP = 6
NSIGPOLL = 6
PROF_SIG = 1
NSIGPROF = 1
SI_MAXSZ = 256
SI_MAXSZ = 128
# Included from sys/time_std_impl.h
from TYPES import *
SI32_MAXSZ = 128
def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
SA_NOCLDSTOP = 0x00020000
SA_ONSTACK = 0x00000001
SA_RESETHAND = 0x00000002
SA_RESTART = 0x00000004
SA_SIGINFO = 0x00000008
SA_NODEFER = 0x00000010
SA_NOCLDWAIT = 0x00010000
SA_WAITSIG = 0x00010000
NSIG = 46
MAXSIG = 45
S_SIGNAL = 1
S_SIGSET = 2
S_SIGACTION = 3
S_NONE = 4
MINSIGSTKSZ = 2048
SIGSTKSZ = 8192
SS_ONSTACK = 0x00000001
SS_DISABLE = 0x00000002
SN_PROC = 1
SN_CANCEL = 2
SN_SEND = 3
# Included from sys/ucontext.h
from TYPES import *
# Included from sys/regset.h
REG_CCR = (0)
REG_PSR = (0)
REG_PSR = (0)
REG_PC = (1)
REG_nPC = (2)
REG_Y = (3)
REG_G1 = (4)
REG_G2 = (5)
REG_G3 = (6)
REG_G4 = (7)
REG_G5 = (8)
REG_G6 = (9)
REG_G7 = (10)
REG_O0 = (11)
REG_O1 = (12)
REG_O2 = (13)
REG_O3 = (14)
REG_O4 = (15)
REG_O5 = (16)
REG_O6 = (17)
REG_O7 = (18)
REG_ASI = (19)
REG_FPRS = (20)
REG_PS = REG_PSR
REG_SP = REG_O6
REG_R0 = REG_O0
REG_R1 = REG_O1
_NGREG = 21
_NGREG = 19
NGREG = _NGREG
_NGREG32 = 19
_NGREG64 = 21
SPARC_MAXREGWINDOW = 31
MAXFPQ = 16
XRS_ID = 0x78727300
# Included from v7/sys/privregs.h
# Included from v7/sys/psr.h
PSR_CWP = 0x0000001F
PSR_ET = 0x00000020
PSR_PS = 0x00000040
PSR_S = 0x00000080
PSR_PIL = 0x00000F00
PSR_EF = 0x00001000
PSR_EC = 0x00002000
PSR_RSV = 0x000FC000
PSR_ICC = 0x00F00000
PSR_C = 0x00100000
PSR_V = 0x00200000
PSR_Z = 0x00400000
PSR_N = 0x00800000
PSR_VER = 0x0F000000
PSR_IMPL = 0xF0000000
PSL_ALLCC = PSR_ICC
PSL_USER = (PSR_S)
PSL_USERMASK = (PSR_ICC)
PSL_UBITS = (PSR_ICC|PSR_EF)
def USERMODE(ps): return (((ps) & PSR_PS) == 0)
# Included from sys/fsr.h
FSR_CEXC = 0x0000001f
FSR_AEXC = 0x000003e0
FSR_FCC = 0x00000c00
FSR_PR = 0x00001000
FSR_QNE = 0x00002000
FSR_FTT = 0x0001c000
FSR_VER = 0x000e0000
FSR_TEM = 0x0f800000
FSR_RP = 0x30000000
FSR_RD = 0xc0000000
FSR_VER_SHIFT = 17
FSR_FCC1 = 0x00000003
FSR_FCC2 = 0x0000000C
FSR_FCC3 = 0x00000030
FSR_CEXC_NX = 0x00000001
FSR_CEXC_DZ = 0x00000002
FSR_CEXC_UF = 0x00000004
FSR_CEXC_OF = 0x00000008
FSR_CEXC_NV = 0x00000010
FSR_AEXC_NX = (0x1 << 5)
FSR_AEXC_DZ = (0x2 << 5)
FSR_AEXC_UF = (0x4 << 5)
FSR_AEXC_OF = (0x8 << 5)
FSR_AEXC_NV = (0x10 << 5)
FTT_NONE = 0
FTT_IEEE = 1
FTT_UNFIN = 2
FTT_UNIMP = 3
FTT_SEQ = 4
FTT_ALIGN = 5
FTT_DFAULT = 6
FSR_FTT_SHIFT = 14
FSR_FTT_IEEE = (FTT_IEEE << FSR_FTT_SHIFT)
FSR_FTT_UNFIN = (FTT_UNFIN << FSR_FTT_SHIFT)
FSR_FTT_UNIMP = (FTT_UNIMP << FSR_FTT_SHIFT)
FSR_FTT_SEQ = (FTT_SEQ << FSR_FTT_SHIFT)
FSR_FTT_ALIGN = (FTT_ALIGN << FSR_FTT_SHIFT)
FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
FSR_TEM_NX = (0x1 << 23)
FSR_TEM_DZ = (0x2 << 23)
FSR_TEM_UF = (0x4 << 23)
FSR_TEM_OF = (0x8 << 23)
FSR_TEM_NV = (0x10 << 23)
RP_DBLEXT = 0
RP_SINGLE = 1
RP_DOUBLE = 2
RP_RESERVED = 3
RD_NEAR = 0
RD_ZER0 = 1
RD_POSINF = 2
RD_NEGINF = 3
FPRS_DL = 0x1
FPRS_DU = 0x2
FPRS_FEF = 0x4
PIL_MAX = 0xf
def SAVE_GLOBALS(RP): return \
def RESTORE_GLOBALS(RP): return \
def SAVE_OUTS(RP): return \
def RESTORE_OUTS(RP): return \
def SAVE_WINDOW(SBP): return \
def RESTORE_WINDOW(SBP): return \
def STORE_FPREGS(FP): return \
def LOAD_FPREGS(FP): return \
_SPARC_MAXREGWINDOW = 31
_XRS_ID = 0x78727300
GETCONTEXT = 0
SETCONTEXT = 1
UC_SIGMASK = 001
UC_STACK = 002
UC_CPU = 004
UC_MAU = 010
UC_FPU = UC_MAU
UC_INTR = 020
UC_ASR = 040
UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
_SIGQUEUE_MAX = 32
_SIGNOTIFY_MAX = 32
# Included from sys/pcb.h
INSTR_VALID = 0x02
NORMAL_STEP = 0x04
WATCH_STEP = 0x08
CPC_OVERFLOW = 0x10
ASYNC_HWERR = 0x20
STEP_NONE = 0
STEP_REQUESTED = 1
STEP_ACTIVE = 2
STEP_WASACTIVE = 3
# Included from sys/msacct.h
LMS_USER = 0
LMS_SYSTEM = 1
LMS_TRAP = 2
LMS_TFAULT = 3
LMS_DFAULT = 4
LMS_KFAULT = 5
LMS_USER_LOCK = 6
LMS_SLEEP = 7
LMS_WAIT_CPU = 8
LMS_STOPPED = 9
NMSTATES = 10
# Included from sys/lwp.h
# Included from sys/synch.h
from TYPES import *
USYNC_THREAD = 0x00
USYNC_PROCESS = 0x01
LOCK_NORMAL = 0x00
LOCK_ERRORCHECK = 0x02
LOCK_RECURSIVE = 0x04
USYNC_PROCESS_ROBUST = 0x08
LOCK_PRIO_NONE = 0x00
LOCK_PRIO_INHERIT = 0x10
LOCK_PRIO_PROTECT = 0x20
LOCK_STALL_NP = 0x00
LOCK_ROBUST_NP = 0x40
LOCK_OWNERDEAD = 0x1
LOCK_NOTRECOVERABLE = 0x2
LOCK_INITED = 0x4
LOCK_UNMAPPED = 0x8
LWP_DETACHED = 0x00000040
LWP_SUSPENDED = 0x00000080
__LWP_ASLWP = 0x00000100
MAXSYSARGS = 8
NORMALRETURN = 0
JUSTRETURN = 1
LWP_USER = 0x01
LWP_SYS = 0x02
TS_FREE = 0x00
TS_SLEEP = 0x01
TS_RUN = 0x02
TS_ONPROC = 0x04
TS_ZOMB = 0x08
TS_STOPPED = 0x10
T_INTR_THREAD = 0x0001
T_WAKEABLE = 0x0002
T_TOMASK = 0x0004
T_TALLOCSTK = 0x0008
T_WOULDBLOCK = 0x0020
T_DONTBLOCK = 0x0040
T_DONTPEND = 0x0080
T_SYS_PROF = 0x0100
T_WAITCVSEM = 0x0200
T_WATCHPT = 0x0400
T_PANIC = 0x0800
TP_HOLDLWP = 0x0002
TP_TWAIT = 0x0004
TP_LWPEXIT = 0x0008
TP_PRSTOP = 0x0010
TP_CHKPT = 0x0020
TP_EXITLWP = 0x0040
TP_PRVSTOP = 0x0080
TP_MSACCT = 0x0100
TP_STOPPING = 0x0200
TP_WATCHPT = 0x0400
TP_PAUSE = 0x0800
TP_CHANGEBIND = 0x1000
TS_LOAD = 0x0001
TS_DONT_SWAP = 0x0002
TS_SWAPENQ = 0x0004
TS_ON_SWAPQ = 0x0008
TS_CSTART = 0x0100
TS_UNPAUSE = 0x0200
TS_XSTART = 0x0400
TS_PSTART = 0x0800
TS_RESUME = 0x1000
TS_CREATE = 0x2000
TS_ALLSTART = \
(TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
def CPR_VSTOPPED(t): return \
def THREAD_TRANSITION(tp): return thread_transition(tp);
def THREAD_STOP(tp): return \
def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
def SEMA_HELD(x): return (sema_held((x)))
NO_LOCKS_HELD = 1
NO_COMPETING_THREADS = 1
# Included from sys/cred.h
# Included from sys/uio.h
from TYPES import *
# Included from sys/resource.h
from TYPES import *
PRIO_PROCESS = 0
PRIO_PGRP = 1
PRIO_USER = 2
RLIMIT_CPU = 0
RLIMIT_FSIZE = 1
RLIMIT_DATA = 2
RLIMIT_STACK = 3
RLIMIT_CORE = 4
RLIMIT_NOFILE = 5
RLIMIT_VMEM = 6
RLIMIT_AS = RLIMIT_VMEM
RLIM_NLIMITS = 7
RLIM_INFINITY = (-3l)
RLIM_SAVED_MAX = (-2l)
RLIM_SAVED_CUR = (-1l)
RLIM_INFINITY = 0x7fffffff
RLIM_SAVED_MAX = 0x7ffffffe
RLIM_SAVED_CUR = 0x7ffffffd
RLIM32_INFINITY = 0x7fffffff
RLIM32_SAVED_MAX = 0x7ffffffe
RLIM32_SAVED_CUR = 0x7ffffffd
# Included from sys/model.h
# Included from sys/debug.h
def ASSERT64(x): return ASSERT(x)
def ASSERT32(x): return ASSERT(x)
DATAMODEL_MASK = 0x0FF00000
DATAMODEL_ILP32 = 0x00100000
DATAMODEL_LP64 = 0x00200000
DATAMODEL_NONE = 0
DATAMODEL_NATIVE = DATAMODEL_LP64
DATAMODEL_NATIVE = DATAMODEL_ILP32
def STRUCT_SIZE(handle): return \
def STRUCT_BUF(handle): return ((handle).ptr.m64)
def SIZEOF_PTR(umodel): return \
def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
def STRUCT_BUF(handle): return ((handle).ptr)
def SIZEOF_PTR(umodel): return sizeof (caddr_t)
def lwp_getdatamodel(t): return DATAMODEL_ILP32
RUSAGE_SELF = 0
RUSAGE_CHILDREN = -1
# Included from vm/seg_enum.h
# Included from sys/buf.h
# Included from sys/kstat.h
from TYPES import *
KSTAT_STRLEN = 31
def KSTAT_ENTER(k): return \
def KSTAT_EXIT(k): return \
KSTAT_TYPE_RAW = 0
KSTAT_TYPE_NAMED = 1
KSTAT_TYPE_INTR = 2
KSTAT_TYPE_IO = 3
KSTAT_TYPE_TIMER = 4
KSTAT_NUM_TYPES = 5
KSTAT_FLAG_VIRTUAL = 0x01
KSTAT_FLAG_VAR_SIZE = 0x02
KSTAT_FLAG_WRITABLE = 0x04
KSTAT_FLAG_PERSISTENT = 0x08
KSTAT_FLAG_DORMANT = 0x10
KSTAT_FLAG_INVALID = 0x20
KSTAT_READ = 0
KSTAT_WRITE = 1
KSTAT_DATA_CHAR = 0
KSTAT_DATA_INT32 = 1
KSTAT_DATA_UINT32 = 2
KSTAT_DATA_INT64 = 3
KSTAT_DATA_UINT64 = 4
KSTAT_DATA_LONG = KSTAT_DATA_INT32
KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
KSTAT_DATA_LONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
KSTAT_DATA_LONG = 7
KSTAT_DATA_ULONG = 8
KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
KSTAT_DATA_FLOAT = 5
KSTAT_DATA_DOUBLE = 6
KSTAT_INTR_HARD = 0
KSTAT_INTR_SOFT = 1
KSTAT_INTR_WATCHDOG = 2
KSTAT_INTR_SPURIOUS = 3
KSTAT_INTR_MULTSVC = 4
KSTAT_NUM_INTRS = 5
B_BUSY = 0x0001
B_DONE = 0x0002
B_ERROR = 0x0004
B_PAGEIO = 0x0010
B_PHYS = 0x0020
B_READ = 0x0040
B_WRITE = 0x0100
B_KERNBUF = 0x0008
B_WANTED = 0x0080
B_AGE = 0x000200
B_ASYNC = 0x000400
B_DELWRI = 0x000800
B_STALE = 0x001000
B_DONTNEED = 0x002000
B_REMAPPED = 0x004000
B_FREE = 0x008000
B_INVAL = 0x010000
B_FORCE = 0x020000
B_HEAD = 0x040000
B_NOCACHE = 0x080000
B_TRUNC = 0x100000
B_SHADOW = 0x200000
B_RETRYWRI = 0x400000
def notavail(bp): return \
def BWRITE(bp): return \
def BWRITE2(bp): return \
VROOT = 0x01
VNOCACHE = 0x02
VNOMAP = 0x04
VDUP = 0x08
VNOSWAP = 0x10
VNOMOUNT = 0x20
VISSWAP = 0x40
VSWAPLIKE = 0x80
VVFSLOCK = 0x100
VVFSWAIT = 0x200
VVMLOCK = 0x400
VDIROPEN = 0x800
VVMEXEC = 0x1000
VPXFS = 0x2000
AT_TYPE = 0x0001
AT_MODE = 0x0002
AT_UID = 0x0004
AT_GID = 0x0008
AT_FSID = 0x0010
AT_NODEID = 0x0020
AT_NLINK = 0x0040
AT_SIZE = 0x0080
AT_ATIME = 0x0100
AT_MTIME = 0x0200
AT_CTIME = 0x0400
AT_RDEV = 0x0800
AT_BLKSIZE = 0x1000
AT_NBLOCKS = 0x2000
AT_VCODE = 0x4000
AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
VSUID = 04000
VSGID = 02000
VSVTX = 01000
VREAD = 00400
VWRITE = 00200
VEXEC = 00100
MODEMASK = 07777
PERMMASK = 00777
def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
VSA_ACL = 0x0001
VSA_ACLCNT = 0x0002
VSA_DFACL = 0x0004
VSA_DFACLCNT = 0x0008
LOOKUP_DIR = 0x01
DUMP_ALLOC = 0
DUMP_FREE = 1
DUMP_SCAN = 2
ATTR_UTIME = 0x01
ATTR_EXEC = 0x02
ATTR_COMM = 0x04
ATTR_HINT = 0x08
ATTR_REAL = 0x10
# Included from sys/poll.h
POLLIN = 0x0001
POLLPRI = 0x0002
POLLOUT = 0x0004
POLLRDNORM = 0x0040
POLLWRNORM = POLLOUT
POLLRDBAND = 0x0080
POLLWRBAND = 0x0100
POLLNORM = POLLRDNORM
POLLERR = 0x0008
POLLHUP = 0x0010
POLLNVAL = 0x0020
POLLREMOVE = 0x0800
POLLRDDATA = 0x0200
POLLNOERR = 0x0400
POLLCLOSED = 0x8000
# Included from sys/strmdep.h
def str_aligned(X): return (((ulong_t)(X) & (sizeof (long) - 1)) == 0)
# Included from sys/strft.h
tdelta_t_sz = 12
FTEV_MASK = 0x1FFF
FTEV_ISWR = 0x8000
FTEV_CS = 0x4000
FTEV_PS = 0x2000
FTEV_QMASK = 0x1F00
FTEV_ALLOCMASK = 0x1FF8
FTEV_ALLOCB = 0x0000
FTEV_ESBALLOC = 0x0001
FTEV_DESBALLOC = 0x0002
FTEV_ESBALLOCA = 0x0003
FTEV_DESBALLOCA = 0x0004
FTEV_ALLOCBIG = 0x0005
FTEV_ALLOCBW = 0x0006
FTEV_FREEB = 0x0008
FTEV_DUPB = 0x0009
FTEV_COPYB = 0x000A
FTEV_CALLER = 0x000F
FTEV_PUT = 0x0100
FTEV_FSYNCQ = 0x0103
FTEV_DSYNCQ = 0x0104
FTEV_PUTQ = 0x0105
FTEV_GETQ = 0x0106
FTEV_RMVQ = 0x0107
FTEV_INSQ = 0x0108
FTEV_PUTBQ = 0x0109
FTEV_FLUSHQ = 0x010A
FTEV_REPLYQ = 0x010B
FTEV_PUTNEXT = 0x010D
FTEV_RWNEXT = 0x010E
FTEV_QWINNER = 0x010F
FTEV_GEWRITE = 0x0101
def FTFLW_HASH(h): return (((unsigned)(h))%ftflw_hash_sz)
FTBLK_EVNTS = 0x9
QENAB = 0x00000001
QWANTR = 0x00000002
QWANTW = 0x00000004
QFULL = 0x00000008
QREADR = 0x00000010
QUSE = 0x00000020
QNOENB = 0x00000040
QBACK = 0x00000100
QHLIST = 0x00000200
QPAIR = 0x00000800
QPERQ = 0x00001000
QPERMOD = 0x00002000
QMTSAFE = 0x00004000
QMTOUTPERIM = 0x00008000
QMT_TYPEMASK = (QPAIR|QPERQ|QPERMOD|QMTSAFE|QMTOUTPERIM)
QINSERVICE = 0x00010000
QWCLOSE = 0x00020000
QEND = 0x00040000
QWANTWSYNC = 0x00080000
QSYNCSTR = 0x00100000
QISDRV = 0x00200000
QHOT = 0x00400000
QNEXTHOT = 0x00800000
_QINSERTING = 0x04000000
_QREMOVING = 0x08000000
Q_SQQUEUED = 0x01
Q_SQDRAINING = 0x02
QB_FULL = 0x01
QB_WANTW = 0x02
QB_BACK = 0x04
NBAND = 256
STRUIOT_NONE = -1
STRUIOT_DONTCARE = 0
STRUIOT_STANDARD = 1
STRUIOT_IP = 2
DBLK_REFMIN = 0x01
STRUIO_SPEC = 0x01
STRUIO_DONE = 0x02
STRUIO_IP = 0x04
STRUIO_ZC = 0x08
STRUIO_ICK = 0x10
MSGMARK = 0x01
MSGNOLOOP = 0x02
MSGDELIM = 0x04
MSGNOGET = 0x08
MSGMARKNEXT = 0x10
MSGNOTMARKNEXT = 0x20
M_DATA = 0x00
M_PROTO = 0x01
M_BREAK = 0x08
M_PASSFP = 0x09
M_EVENT = 0x0a
M_SIG = 0x0b
M_DELAY = 0x0c
M_CTL = 0x0d
M_IOCTL = 0x0e
M_SETOPTS = 0x10
M_RSE = 0x11
M_IOCACK = 0x81
M_IOCNAK = 0x82
M_PCPROTO = 0x83
M_PCSIG = 0x84
M_READ = 0x85
M_FLUSH = 0x86
M_STOP = 0x87
M_START = 0x88
M_HANGUP = 0x89
M_ERROR = 0x8a
M_COPYIN = 0x8b
M_COPYOUT = 0x8c
M_IOCDATA = 0x8d
M_PCRSE = 0x8e
M_STOPI = 0x8f
M_STARTI = 0x90
M_PCEVENT = 0x91
M_UNHANGUP = 0x92
QNORM = 0x00
QPCTL = 0x80
IOC_MODELS = DATAMODEL_MASK
IOC_ILP32 = DATAMODEL_ILP32
IOC_LP64 = DATAMODEL_LP64
IOC_NATIVE = DATAMODEL_NATIVE
IOC_NONE = DATAMODEL_NONE
STRCANON = 0x01
RECOPY = 0x02
SO_ALL = 0x003f
SO_READOPT = 0x0001
SO_WROFF = 0x0002
SO_MINPSZ = 0x0004
SO_MAXPSZ = 0x0008
SO_HIWAT = 0x0010
SO_LOWAT = 0x0020
SO_MREADON = 0x0040
SO_MREADOFF = 0x0080
SO_NDELON = 0x0100
SO_NDELOFF = 0x0200
SO_ISTTY = 0x0400
SO_ISNTTY = 0x0800
SO_TOSTOP = 0x1000
SO_TONSTOP = 0x2000
SO_BAND = 0x4000
SO_DELIM = 0x8000
SO_NODELIM = 0x010000
SO_STRHOLD = 0x020000
SO_ERROPT = 0x040000
SO_COPYOPT = 0x080000
SO_MAXBLK = 0x100000
DEF_IOV_MAX = 16
INFOD_FIRSTBYTES = 0x02
INFOD_BYTES = 0x04
INFOD_COUNT = 0x08
INFOD_COPYOUT = 0x10
MODOPEN = 0x1
CLONEOPEN = 0x2
CONSOPEN = 0x4
OPENFAIL = -1
BPRI_LO = 1
BPRI_MED = 2
BPRI_HI = 3
BPRI_FT = 4
INFPSZ = -1
FLUSHALL = 1
FLUSHDATA = 0
STRHIGH = 5120
STRLOW = 1024
MAXIOCBSZ = 1024
PERIM_INNER = 1
PERIM_OUTER = 2
def datamsg(type): return \
def straln(a): return (caddr_t)((intptr_t)(a) & ~(sizeof (int)-1))
# Included from sys/byteorder.h
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_ENCAP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_IPV6 = 41
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_RSVP = 46
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_PIM = 103
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_ECHO = 7
IPPORT_DISCARD = 9
IPPORT_SYSTAT = 11
IPPORT_DAYTIME = 13
IPPORT_NETSTAT = 15
IPPORT_FTP = 21
IPPORT_TELNET = 23
IPPORT_SMTP = 25
IPPORT_TIMESERVER = 37
IPPORT_NAMESERVER = 42
IPPORT_WHOIS = 43
IPPORT_MTP = 57
IPPORT_BOOTPS = 67
IPPORT_BOOTPC = 68
IPPORT_TFTP = 69
IPPORT_RJE = 77
IPPORT_FINGER = 79
IPPORT_TTYLINK = 87
IPPORT_SUPDUP = 95
IPPORT_EXECSERVER = 512
IPPORT_LOGINSERVER = 513
IPPORT_CMDSERVER = 514
IPPORT_EFSSERVER = 520
IPPORT_BIFFUDP = 512
IPPORT_WHOSERVER = 513
IPPORT_ROUTESERVER = 520
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IMPLINK_IP = 155
IMPLINK_LOWEXPER = 156
IMPLINK_HIGHEXPER = 158
IN_CLASSA_NSHIFT = 24
IN_CLASSA_MAX = 128
IN_CLASSB_NSHIFT = 16
IN_CLASSB_MAX = 65536
IN_CLASSC_NSHIFT = 8
IN_CLASSD_NSHIFT = 28
def IN_MULTICAST(i): return IN_CLASSD(i)
IN_LOOPBACKNET = 127
def IN_SET_LOOPBACK_ADDR(a): return \
def IN6_IS_ADDR_UNSPECIFIED(addr): return \
def IN6_IS_ADDR_LOOPBACK(addr): return \
def IN6_IS_ADDR_LOOPBACK(addr): return \
def IN6_IS_ADDR_MULTICAST(addr): return \
def IN6_IS_ADDR_MULTICAST(addr): return \
def IN6_IS_ADDR_LINKLOCAL(addr): return \
def IN6_IS_ADDR_LINKLOCAL(addr): return \
def IN6_IS_ADDR_SITELOCAL(addr): return \
def IN6_IS_ADDR_SITELOCAL(addr): return \
def IN6_IS_ADDR_V4MAPPED(addr): return \
def IN6_IS_ADDR_V4MAPPED(addr): return \
def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
def IN6_IS_ADDR_V4COMPAT(addr): return \
def IN6_IS_ADDR_V4COMPAT(addr): return \
def IN6_IS_ADDR_MC_RESERVED(addr): return \
def IN6_IS_ADDR_MC_RESERVED(addr): return \
def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
def IN6_IS_ADDR_MC_GLOBAL(addr): return \
def IN6_IS_ADDR_MC_GLOBAL(addr): return \
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 0x10
IP_MULTICAST_TTL = 0x11
IP_MULTICAST_LOOP = 0x12
IP_ADD_MEMBERSHIP = 0x13
IP_DROP_MEMBERSHIP = 0x14
IP_SEC_OPT = 0x22
IPSEC_PREF_NEVER = 0x01
IPSEC_PREF_REQUIRED = 0x02
IPSEC_PREF_UNIQUE = 0x04
IP_ADD_PROXY_ADDR = 0x40
IP_BOUND_IF = 0x41
IP_UNSPEC_SRC = 0x42
IP_REUSEADDR = 0x104
IP_DONTROUTE = 0x105
IP_BROADCAST = 0x106
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_UNICAST_HOPS = 0x5
IPV6_MULTICAST_IF = 0x6
IPV6_MULTICAST_HOPS = 0x7
IPV6_MULTICAST_LOOP = 0x8
IPV6_JOIN_GROUP = 0x9
IPV6_LEAVE_GROUP = 0xa
IPV6_ADD_MEMBERSHIP = 0x9
IPV6_DROP_MEMBERSHIP = 0xa
IPV6_PKTINFO = 0xb
IPV6_HOPLIMIT = 0xc
IPV6_NEXTHOP = 0xd
IPV6_HOPOPTS = 0xe
IPV6_DSTOPTS = 0xf
IPV6_RTHDR = 0x10
IPV6_RTHDRDSTOPTS = 0x11
IPV6_RECVPKTINFO = 0x12
IPV6_RECVHOPLIMIT = 0x13
IPV6_RECVHOPOPTS = 0x14
IPV6_RECVDSTOPTS = 0x15
IPV6_RECVRTHDR = 0x16
IPV6_RECVRTHDRDSTOPTS = 0x17
IPV6_CHECKSUM = 0x18
IPV6_BOUND_IF = 0x41
IPV6_UNSPEC_SRC = 0x42
INET_ADDRSTRLEN = 16
INET6_ADDRSTRLEN = 46
IPV6_PAD1_OPT = 0
| xbmc/xbmc-antiquated | xbmc/lib/libPython/Python/Lib/plat-sunos5/IN.py | Python | gpl-2.0 | 28,151 |
from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" underscores, dashes or slashes."))
class Meta:
model = FlatPage
class FlatPageAdmin(admin.ModelAdmin):
form = FlatpageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
admin.site.register(FlatPage, FlatPageAdmin)
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/contrib/flatpages/admin.py | Python | bsd-3-clause | 1,072 |
#-*- coding: utf-8 -*-
from reportlab.lib.colors import Color
from unittest import TestCase
from xhtml2pdf.util import getCoords, getColor, getSize, getFrameDimensions, \
getPos, getBox
from xhtml2pdf.tags import int_to_roman
class UtilsCoordTestCase(TestCase):
def test_getCoords_simple(self):
res = getCoords(1, 1, 10, 10, (10,10))
self.assertEqual(res, (1, -1, 10, 10))
# A second time - it's memoized!
res = getCoords(1, 1, 10, 10, (10,10))
self.assertEqual(res, (1, -1, 10, 10))
def test_getCoords_x_lt_0(self):
res = getCoords(-1, 1, 10, 10, (10,10))
self.assertEqual(res, (9, -1, 10, 10))
def test_getCoords_y_lt_0(self):
res = getCoords(1, -1, 10, 10, (10,10))
self.assertEqual(res, (1, -9, 10, 10))
def test_getCoords_w_and_h_none(self):
res = getCoords(1, 1, None, None, (10,10))
self.assertEqual(res, (1, 9))
def test_getCoords_w_lt_0(self):
res = getCoords(1, 1, -1, 10, (10,10))
self.assertEqual(res, (1, -1, 8, 10))
def test_getCoords_h_lt_0(self):
res = getCoords(1, 1, 10, -1, (10,10))
self.assertEqual(res, (1, 1, 10, 8))
class UtilsColorTestCase(TestCase):
def test_get_color_simple(self):
res = getColor('red')
self.assertEqual(res, Color(1,0,0,1))
# Testing it being memoized properly
res = getColor('red')
self.assertEqual(res, Color(1,0,0,1))
def test_get_color_from_color(self):
# Noop if argument is already a color
res = getColor(Color(1,0,0,1))
self.assertEqual(res, Color(1,0,0,1))
def test_get_transparent_color(self):
res = getColor('transparent', default='TOKEN')
self.assertEqual(res, 'TOKEN')
res = getColor('none', default='TOKEN')
self.assertEqual(res, 'TOKEN')
def test_get_color_for_none(self):
res = getColor(None, default='TOKEN')
self.assertEqual(res, 'TOKEN')
def test_get_color_for_RGB(self):
res = getColor('#FF0000')
self.assertEqual(res, Color(1,0,0,1))
def test_get_color_for_RGB_with_len_4(self):
res = getColor('#F00')
self.assertEqual(res, Color(1,0,0,1))
def test_get_color_for_CSS_RGB_function(self):
# It's regexp based, let's try common cases.
res = getColor('rgb(255,0,0)')
self.assertEqual(res, Color(1,0,0,1))
res = getColor('<css function: rgb(255,0,0)>')
self.assertEqual(res, Color(1,0,0,1))
class UtilsGetSizeTestCase(TestCase):
def test_get_size_simple(self):
res = getSize('12pt')
self.assertEqual(res, 12.00)
# Memoized...
res = getSize('12pt')
self.assertEqual(res, 12.00)
def test_get_size_for_none(self):
res = getSize(None, relative='TOKEN')
self.assertEqual(res, 'TOKEN')
def test_get_size_for_float(self):
res = getSize(12.00)
self.assertEqual(res, 12.00)
def test_get_size_for_tuple(self):
# TODO: This is a really strange case. Probably should not work this way.
res = getSize(("12", ".12"))
self.assertEqual(res, 12.12)
def test_get_size_for_cm(self):
res = getSize("1cm")
self.assertEqual(res, 28.346456692913385)
def test_get_size_for_mm(self):
res = getSize("1mm")
self.assertEqual(res, 2.8346456692913385)
def test_get_size_for_i(self):
res = getSize("1i")
self.assertEqual(res, 72.00)
def test_get_size_for_in(self):
res = getSize("1in")
self.assertEqual(res, 72.00)
def test_get_size_for_inch(self):
res = getSize("1in")
self.assertEqual(res, 72.00)
def test_get_size_for_pc(self):
res = getSize("1pc")
self.assertEqual(res, 12.00)
def test_get_size_for_none_str(self):
res = getSize("none")
self.assertEqual(res, 0.0)
res = getSize("0")
self.assertEqual(res, 0.0)
res = getSize("auto") # Really?
self.assertEqual(res, 0.0)
class PisaDimensionTestCase(TestCase):
def test_FrameDimensions_left_top_width_height(self):
#builder = pisaCSSBuilder(mediumSet=['all'])
dims = {
'left': '10pt',
'top': '20pt',
'width': '30pt',
'height': '40pt',
}
expected = (10.0, 20.0, 30.0, 40.0)
result = getFrameDimensions(dims, 100, 200)
self.assertEquals(expected, result)
def test_FrameDimensions_left_top_bottom_right(self):
dims = {
'left': '10pt',
'top': '20pt',
'bottom': '30pt',
'right': '40pt',
}
expected = (10.0, 20.0, 50.0, 150.0)
result = getFrameDimensions(dims, 100, 200)
self.assertEquals(expected, result)
def test_FrameDimensions_bottom_right_width_height(self):
dims = {
'bottom': '10pt',
'right': '20pt',
'width': '70pt',
'height': '80pt',
}
expected = (10.0, 110.0, 70.0, 80.0)
result = getFrameDimensions(dims, 100, 200)
self.assertEquals(expected, result)
def test_FrameDimensions_left_top_width_height_with_margin(self):
dims = {
'left': '10pt',
'top': '20pt',
'width': '70pt',
'height': '80pt',
'margin-top': '10pt',
'margin-left': '15pt',
'margin-bottom': '20pt',
'margin-right': '25pt',
}
expected = (25.0, 30.0, 30.0, 50.0)
result = getFrameDimensions(dims, 100, 200)
self.assertEquals(expected, result)
def test_FrameDimensions_bottom_right_width_height_with_margin(self):
dims = {
'bottom': '10pt',
'right': '20pt',
'width': '70pt',
'height': '80pt',
'margin-top': '10pt',
'margin-left': '15pt',
'margin-bottom': '20pt',
'margin-right': '25pt',
}
expected = (25.0, 120.0, 30.0, 50.0)
result = getFrameDimensions(dims, 100, 200)
self.assertEquals(expected, result)
def test_frame_dimensions_for_box_len_eq_4(self):
dims = {
'-pdf-frame-box': ['12pt','12,pt','12pt','12pt']
}
expected = [12.0, 12.0, 12.0, 12.0]
result = getFrameDimensions(dims, 100, 200)
self.assertEqual(result, expected)
def test_trame_dimentions_for_height_without_top_or_bottom(self):
dims = {
'left': '10pt',
#'top': '20pt',
'width': '30pt',
'height': '40pt',
}
expected = (10.0, 0.0, 30.0, 200.0)
result = getFrameDimensions(dims, 100, 200)
self.assertEquals(expected, result)
def test_trame_dimentions_for_width_without_left_or_right(self):
dims = {
#'left': '10pt',
'top': '20pt',
'width': '30pt',
'height': '40pt',
}
expected = (0.0, 20.0, 100.0, 40.0)
result = getFrameDimensions(dims, 100, 200)
self.assertEquals(expected, result)
class GetPosTestCase(TestCase):
def test_get_pos_simple(self):
res = getBox("1pt 1pt 10pt 10pt", (10,10))
self.assertEqual(res,(1.0, -1.0, 10, 10))
def test_get_pos_raising(self):
raised = False
try:
getBox("1pt 1pt 10pt", (10,10))
except Exception:
raised = True
self.assertTrue(raised)
class TestTagUtils(TestCase):
def test_roman_numeral_conversion(self):
self.assertEqual("I", int_to_roman(1))
self.assertEqual("L", int_to_roman(50))
self.assertEqual("XLII", int_to_roman(42))
self.assertEqual("XXVI", int_to_roman(26))
| zenx/xhtml2pdf | tests/test_utils.py | Python | apache-2.0 | 8,136 |
import os
import re
import sys
from distutils import log
import xml.dom.pulldom
import shlex
import locale
import codecs
import unicodedata
import warnings
from setuptools.compat import unicode
from setuptools.py31compat import TemporaryDirectory
from xml.sax.saxutils import unescape
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from subprocess import Popen as _Popen, PIPE as _PIPE
#NOTE: Use of the command line options require SVN 1.3 or newer (December 2005)
# and SVN 1.3 hasn't been supported by the developers since mid 2008.
#subprocess is called several times with shell=(sys.platform=='win32')
#see the follow for more information:
# http://bugs.python.org/issue8557
# http://stackoverflow.com/questions/5658622/
# python-subprocess-popen-environment-path
def _run_command(args, stdout=_PIPE, stderr=_PIPE, encoding=None, stream=0):
#regarding the shell argument, see: http://bugs.python.org/issue8557
try:
proc = _Popen(args, stdout=stdout, stderr=stderr,
shell=(sys.platform == 'win32'))
data = proc.communicate()[stream]
except OSError:
return 1, ''
#doubled checked and
data = decode_as_string(data, encoding)
#communciate calls wait()
return proc.returncode, data
def _get_entry_schedule(entry):
schedule = entry.getElementsByTagName('schedule')[0]
return "".join([t.nodeValue
for t in schedule.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_target_property(target):
property_text = target.getElementsByTagName('property')[0]
return "".join([t.nodeValue
for t in property_text.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_xml_data(decoded_str):
if sys.version_info < (3, 0):
#old versions want an encoded string
data = decoded_str.encode('utf-8')
else:
data = decoded_str
return data
def joinpath(prefix, *suffix):
if not prefix or prefix == '.':
return os.path.join(*suffix)
return os.path.join(prefix, *suffix)
def determine_console_encoding():
try:
#try for the preferred encoding
encoding = locale.getpreferredencoding()
#see if the locale.getdefaultlocale returns null
#some versions of python\platforms return US-ASCII
#when it cannot determine an encoding
if not encoding or encoding == "US-ASCII":
encoding = locale.getdefaultlocale()[1]
if encoding:
codecs.lookup(encoding) # make sure a lookup error is not made
except (locale.Error, LookupError):
encoding = None
is_osx = sys.platform == "darwin"
if not encoding:
return ["US-ASCII", "utf-8"][is_osx]
elif encoding.startswith("mac-") and is_osx:
#certain versions of python would return mac-roman as default
#OSX as a left over of earlier mac versions.
return "utf-8"
else:
return encoding
_console_encoding = determine_console_encoding()
def decode_as_string(text, encoding=None):
"""
Decode the console or file output explicitly using getpreferredencoding.
The text paraemeter should be a encoded string, if not no decode occurs
If no encoding is given, getpreferredencoding is used. If encoding is
specified, that is used instead. This would be needed for SVN --xml
output. Unicode is explicitly put in composed NFC form.
--xml should be UTF-8 (SVN Issue 2938) the discussion on the Subversion
DEV List from 2007 seems to indicate the same.
"""
#text should be a byte string
if encoding is None:
encoding = _console_encoding
if not isinstance(text, unicode):
text = text.decode(encoding)
text = unicodedata.normalize('NFC', text)
return text
def parse_dir_entries(decoded_str):
'''Parse the entries from a recursive info xml'''
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
entries = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'entry':
doc.expandNode(node)
if not _get_entry_schedule(node).startswith('delete'):
entries.append((node.getAttribute('path'),
node.getAttribute('kind')))
return entries[1:] # do not want the root directory
def parse_externals_xml(decoded_str, prefix=''):
'''Parse a propget svn:externals xml'''
prefix = os.path.normpath(prefix)
prefix = os.path.normcase(prefix)
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
externals = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'target':
doc.expandNode(node)
path = os.path.normpath(node.getAttribute('path'))
if os.path.normcase(path).startswith(prefix):
path = path[len(prefix)+1:]
data = _get_target_property(node)
#data should be decoded already
for external in parse_external_prop(data):
externals.append(joinpath(path, external))
return externals # do not want the root directory
def parse_external_prop(lines):
"""
Parse the value of a retrieved svn:externals entry.
possible token setups (with quotng and backscaping in laters versions)
URL[@#] EXT_FOLDERNAME
[-r#] URL EXT_FOLDERNAME
EXT_FOLDERNAME [-r#] URL
"""
externals = []
for line in lines.splitlines():
line = line.lstrip() # there might be a "\ "
if not line:
continue
if sys.version_info < (3, 0):
#shlex handles NULLs just fine and shlex in 2.7 tries to encode
#as ascii automatiically
line = line.encode('utf-8')
line = shlex.split(line)
if sys.version_info < (3, 0):
line = [x.decode('utf-8') for x in line]
#EXT_FOLDERNAME is either the first or last depending on where
#the URL falls
if urlparse.urlsplit(line[-1])[0]:
external = line[0]
else:
external = line[-1]
external = decode_as_string(external, encoding="utf-8")
externals.append(os.path.normpath(external))
return externals
def parse_prop_file(filename, key):
found = False
f = open(filename, 'rt')
data = ''
try:
for line in iter(f.readline, ''): # can't use direct iter!
parts = line.split()
if len(parts) == 2:
kind, length = parts
data = f.read(int(length))
if kind == 'K' and data == key:
found = True
elif kind == 'V' and found:
break
finally:
f.close()
return data
class SvnInfo(object):
'''
Generic svn_info object. No has little knowledge of how to extract
information. Use cls.load to instatiate according svn version.
Paths are not filesystem encoded.
'''
@staticmethod
def get_svn_version():
# Temp config directory should be enough to check for repository
# This is needed because .svn always creates .subversion and
# some operating systems do not handle dot directory correctly.
# Real queries in real svn repos with be concerned with it creation
with TemporaryDirectory() as tempdir:
code, data = _run_command(['svn',
'--config-dir', tempdir,
'--version',
'--quiet'])
if code == 0 and data:
return data.strip()
else:
return ''
#svnversion return values (previous implementations return max revision)
# 4123:4168 mixed revision working copy
# 4168M modified working copy
# 4123S switched working copy
# 4123:4168MS mixed revision, modified, switched working copy
revision_re = re.compile(r'(?:([\-0-9]+):)?(\d+)([a-z]*)\s*$', re.I)
@classmethod
def load(cls, dirname=''):
normdir = os.path.normpath(dirname)
# Temp config directory should be enough to check for repository
# This is needed because .svn always creates .subversion and
# some operating systems do not handle dot directory correctly.
# Real queries in real svn repos with be concerned with it creation
with TemporaryDirectory() as tempdir:
code, data = _run_command(['svn',
'--config-dir', tempdir,
'info', normdir])
# Must check for some contents, as some use empty directories
# in testcases, however only enteries is needed also the info
# command above MUST have worked
svn_dir = os.path.join(normdir, '.svn')
is_svn_wd = (not code or
os.path.isfile(os.path.join(svn_dir, 'entries')))
svn_version = tuple(cls.get_svn_version().split('.'))
try:
base_svn_version = tuple(int(x) for x in svn_version[:2])
except ValueError:
base_svn_version = tuple()
if not is_svn_wd:
#return an instance of this NO-OP class
return SvnInfo(dirname)
if code or not base_svn_version or base_svn_version < (1, 3):
warnings.warn(("No SVN 1.3+ command found: falling back "
"on pre 1.7 .svn parsing"), DeprecationWarning)
return SvnFileInfo(dirname)
if base_svn_version < (1, 5):
return Svn13Info(dirname)
return Svn15Info(dirname)
def __init__(self, path=''):
self.path = path
self._entries = None
self._externals = None
def get_revision(self):
'Retrieve the directory revision informatino using svnversion'
code, data = _run_command(['svnversion', '-c', self.path])
if code:
log.warn("svnversion failed")
return 0
parsed = self.revision_re.match(data)
if parsed:
return int(parsed.group(2))
else:
return 0
@property
def entries(self):
if self._entries is None:
self._entries = self.get_entries()
return self._entries
@property
def externals(self):
if self._externals is None:
self._externals = self.get_externals()
return self._externals
def iter_externals(self):
'''
Iterate over the svn:external references in the repository path.
'''
for item in self.externals:
yield item
def iter_files(self):
'''
Iterate over the non-deleted file entries in the repository path
'''
for item, kind in self.entries:
if kind.lower() == 'file':
yield item
def iter_dirs(self, include_root=True):
'''
Iterate over the non-deleted file entries in the repository path
'''
if include_root:
yield self.path
for item, kind in self.entries:
if kind.lower() == 'dir':
yield item
def get_entries(self):
return []
def get_externals(self):
return []
class Svn13Info(SvnInfo):
def get_entries(self):
code, data = _run_command(['svn', 'info', '-R', '--xml', self.path],
encoding="utf-8")
if code:
log.debug("svn info failed")
return []
return parse_dir_entries(data)
def get_externals(self):
#Previous to 1.5 --xml was not supported for svn propget and the -R
#output format breaks the shlex compatible semantics.
cmd = ['svn', 'propget', 'svn:externals']
result = []
for folder in self.iter_dirs():
code, lines = _run_command(cmd + [folder], encoding="utf-8")
if code != 0:
log.warn("svn propget failed")
return []
#lines should a str
for external in parse_external_prop(lines):
if folder:
external = os.path.join(folder, external)
result.append(os.path.normpath(external))
return result
class Svn15Info(Svn13Info):
def get_externals(self):
cmd = ['svn', 'propget', 'svn:externals', self.path, '-R', '--xml']
code, lines = _run_command(cmd, encoding="utf-8")
if code:
log.debug("svn propget failed")
return []
return parse_externals_xml(lines, prefix=os.path.abspath(self.path))
class SvnFileInfo(SvnInfo):
def __init__(self, path=''):
super(SvnFileInfo, self).__init__(path)
self._directories = None
self._revision = None
def _walk_svn(self, base):
entry_file = joinpath(base, '.svn', 'entries')
if os.path.isfile(entry_file):
entries = SVNEntriesFile.load(base)
yield (base, False, entries.parse_revision())
for path in entries.get_undeleted_records():
path = decode_as_string(path)
path = joinpath(base, path)
if os.path.isfile(path):
yield (path, True, None)
elif os.path.isdir(path):
for item in self._walk_svn(path):
yield item
def _build_entries(self):
entries = list()
rev = 0
for path, isfile, dir_rev in self._walk_svn(self.path):
if isfile:
entries.append((path, 'file'))
else:
entries.append((path, 'dir'))
rev = max(rev, dir_rev)
self._entries = entries
self._revision = rev
def get_entries(self):
if self._entries is None:
self._build_entries()
return self._entries
def get_revision(self):
if self._revision is None:
self._build_entries()
return self._revision
def get_externals(self):
prop_files = [['.svn', 'dir-prop-base'],
['.svn', 'dir-props']]
externals = []
for dirname in self.iter_dirs():
prop_file = None
for rel_parts in prop_files:
filename = joinpath(dirname, *rel_parts)
if os.path.isfile(filename):
prop_file = filename
if prop_file is not None:
ext_prop = parse_prop_file(prop_file, 'svn:externals')
#ext_prop should be utf-8 coming from svn:externals
ext_prop = decode_as_string(ext_prop, encoding="utf-8")
externals.extend(parse_external_prop(ext_prop))
return externals
def svn_finder(dirname=''):
#combined externals due to common interface
#combined externals and entries due to lack of dir_props in 1.7
info = SvnInfo.load(dirname)
for path in info.iter_files():
yield path
for path in info.iter_externals():
sub_info = SvnInfo.load(path)
for sub_path in sub_info.iter_files():
yield sub_path
class SVNEntriesFile(object):
def __init__(self, data):
self.data = data
@classmethod
def load(class_, base):
filename = os.path.join(base, '.svn', 'entries')
f = open(filename)
try:
result = SVNEntriesFile.read(f)
finally:
f.close()
return result
@classmethod
def read(class_, fileobj):
data = fileobj.read()
is_xml = data.startswith('<?xml')
class_ = [SVNEntriesFileText, SVNEntriesFileXML][is_xml]
return class_(data)
def parse_revision(self):
all_revs = self.parse_revision_numbers() + [0]
return max(all_revs)
class SVNEntriesFileText(SVNEntriesFile):
known_svn_versions = {
'1.4.x': 8,
'1.5.x': 9,
'1.6.x': 10,
}
def __get_cached_sections(self):
return self.sections
def get_sections(self):
SECTION_DIVIDER = '\f\n'
sections = self.data.split(SECTION_DIVIDER)
sections = [x for x in map(str.splitlines, sections)]
try:
# remove the SVN version number from the first line
svn_version = int(sections[0].pop(0))
if not svn_version in self.known_svn_versions.values():
log.warn("Unknown subversion verson %d", svn_version)
except ValueError:
return
self.sections = sections
self.get_sections = self.__get_cached_sections
return self.sections
def is_valid(self):
return bool(self.get_sections())
def get_url(self):
return self.get_sections()[0][4]
def parse_revision_numbers(self):
revision_line_number = 9
rev_numbers = [
int(section[revision_line_number])
for section in self.get_sections()
if (len(section) > revision_line_number
and section[revision_line_number])
]
return rev_numbers
def get_undeleted_records(self):
undeleted = lambda s: s and s[0] and (len(s) < 6 or s[5] != 'delete')
result = [
section[0]
for section in self.get_sections()
if undeleted(section)
]
return result
class SVNEntriesFileXML(SVNEntriesFile):
def is_valid(self):
return True
def get_url(self):
"Get repository URL"
urlre = re.compile('url="([^"]+)"')
return urlre.search(self.data).group(1)
def parse_revision_numbers(self):
revre = re.compile(r'committed-rev="(\d+)"')
return [
int(m.group(1))
for m in revre.finditer(self.data)
]
def get_undeleted_records(self):
entries_pattern = \
re.compile(r'name="([^"]+)"(?![^>]+deleted="true")', re.I)
results = [
unescape(match.group(1))
for match in entries_pattern.finditer(self.data)
]
return results
if __name__ == '__main__':
for name in svn_finder(sys.argv[1]):
print(name)
| cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/setuptools/svn_utils.py | Python | mit | 18,879 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
| ivaano/zato | code/zato-web-admin/test/zato/admin/web/views/__init__.py | Python | gpl-3.0 | 154 |
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Checks that msvs_system_include_dirs works.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'system-include'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
| ibc/MediaSoup | worker/deps/gyp/test/win/gyptest-system-include.py | Python | isc | 476 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <matt@sivel.net>
# (c) 2016, Justin Mayer <https://justinmayer.com/>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# =============================================================================
#
# This script is to be used with vault_password_file or --vault-password-file
# to retrieve the vault password via your OS's native keyring application.
#
# This file *MUST* be saved with executable permissions. Otherwise, Ansible
# will try to parse as a password file and display: "ERROR! Decryption failed"
#
# The `keyring` Python module is required: https://pypi.python.org/pypi/keyring
#
# By default, this script will store the specified password in the keyring of
# the user that invokes the script. To specify a user keyring, add a [vault]
# section to your ansible.cfg file with a 'username' option. Example:
#
# [vault]
# username = 'ansible-vault'
#
# Another optional setting is for the key name, which allows you to use this
# script to handle multiple project vaults with different passwords:
#
# [vault]
# keyname = 'ansible-vault-yourproject'
#
# You can configure the `vault_password_file` option in ansible.cfg:
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring.py
# ...
#
# To set your password, `cd` to your project directory and run:
#
# python /path/to/vault-keyring.py set
#
# If you choose not to configure the path to `vault_password_file` in
# ansible.cfg, your `ansible-playbook` command might look like:
#
# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import sys
import getpass
import keyring
import ansible.constants as C
def main():
(parser, config_path) = C.load_config_file()
if parser.has_option('vault', 'username'):
username = parser.get('vault', 'username')
else:
username = getpass.getuser()
if parser.has_option('vault', 'keyname'):
keyname = parser.get('vault', 'keyname')
else:
keyname = 'ansible'
if len(sys.argv) == 2 and sys.argv[1] == 'set':
intro = 'Storing password in "{}" user keyring using key name: {}\n'
sys.stdout.write(intro.format(username, keyname))
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password(keyname, username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
sys.stdout.write('{}\n'.format(keyring.get_password(keyname,
username)))
sys.exit(0)
if __name__ == '__main__':
main()
| GustavoHennig/ansible | contrib/vault/vault-keyring.py | Python | gpl-3.0 | 3,430 |
import logging as log
import sys
import getopt
import os
import subprocess
import shutil
def RunCMake(workspace, target, platform):
# run CMake
print "\n==================================================\n"
returncode = 0
if platform == "windows":
print "Running: vcvarsall.bat x86_amd64 && " + workspace + "\ProjectK\NDP\clr\src\pal\\tools\gen-buildsys-win.bat " + workspace + "\ProjectK\NDP\clr"
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(["vcvarsall.bat", "x86_amd64", "&&", workspace + "\ProjectK\NDP\clr\src\pal\\tools\gen-buildsys-win.bat", workspace + "\ProjectK\NDP\clr"])
elif platform == "linux":
print "Running: " + workspace + "/ProjectK/NDP/clr/src/pal/tools/gen-buildsys-clang.sh " + workspace + "/ProjectK/NDP/clr DEBUG"
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(workspace + "/ProjectK/NDP/clr/src/pal/tools/gen-buildsys-clang.sh " + workspace + "/ProjectK/NDP/clr " + target, shell=True)
if returncode != 0:
print "ERROR: cmake failed with exit code " + str(returncode)
return returncode
def RunBuild(target, platform, arch):
if platform == "windows":
return RunMsBuild(target, arch)
elif platform == "linux":
return RunMake()
def RunMsBuild(target, arch):
# run MsBuild
print "\n==================================================\n"
print "Running: vcvarsall.bat x86_amd64 && msbuild CoreCLR.sln /p:Configuration=" + target + " /p:Platform=" + arch
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(["vcvarsall.bat","x86_amd64","&&","msbuild","CoreCLR.sln","/p:Configuration=" + target,"/p:Platform=" + arch])
if returncode != 0:
print "ERROR: vcvarsall.bat failed with exit code " + str(returncode)
return returncode
def RunMake():
print "\n==================================================\n"
print "Running: make"
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(["make"])
if returncode != 0:
print "ERROR: make failed with exit code " + str(returncode)
return returncode
def Compile(workspace, target, platform, arch):
returncode = RunCMake(workspace, target, platform)
if returncode != 0:
return returncode
returncode += RunBuild(target, platform, arch)
if returncode != 0:
return returncode
return returncode
| Godin/coreclr | src/pal/automation/compile.py | Python | mit | 2,660 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import workflow
class sale_order_line_make_invoice(osv.osv_memory):
_name = "sale.order.line.make.invoice"
_description = "Sale OrderLine Make_invoice"
def _prepare_invoice(self, cr, uid, order, lines, context=None):
a = order.partner_id.property_account_receivable.id
if order.partner_id and order.partner_id.property_payment_term.id:
pay_term = order.partner_id.property_payment_term.id
else:
pay_term = False
return {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': "P%dSO%d" % (order.partner_id.id, order.id),
'account_id': a,
'partner_id': order.partner_invoice_id.id,
'invoice_line': [(6, 0, lines)],
'currency_id' : order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': pay_term,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'user_id': order.user_id and order.user_id.id or False,
'company_id': order.company_id and order.company_id.id or False,
'date_invoice': fields.date.today(),
'section_id': order.section_id.id,
}
def make_invoices(self, cr, uid, ids, context=None):
"""
To make invoices.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None: context = {}
res = False
invoices = {}
#TODO: merge with sale.py/make_invoice
def make_invoice(order, lines):
"""
To make invoices.
@param order:
@param lines:
@return:
"""
inv = self._prepare_invoice(cr, uid, order, lines)
inv_id = self.pool.get('account.invoice').create(cr, uid, inv)
return inv_id
sales_order_line_obj = self.pool.get('sale.order.line')
sales_order_obj = self.pool.get('sale.order')
for line in sales_order_line_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.order_id in invoices:
invoices[line.order_id] = []
line_id = sales_order_line_obj.invoice_line_create(cr, uid, [line.id])
for lid in line_id:
invoices[line.order_id].append(lid)
for order, il in invoices.items():
res = make_invoice(order, il)
cr.execute('INSERT INTO sale_order_invoice_rel \
(order_id,invoice_id) values (%s,%s)', (order.id, res))
sales_order_obj.invalidate_cache(cr, uid, ['invoice_ids'], [order.id], context=context)
flag = True
sales_order_obj.message_post(cr, uid, [order.id], body=_("Invoice created"), context=context)
data_sale = sales_order_obj.browse(cr, uid, order.id, context=context)
for line in data_sale.order_line:
if not line.invoiced and line.state != 'cancel':
flag = False
break
if flag:
line.order_id.write({'state': 'progress'})
workflow.trg_validate(uid, 'sale.order', order.id, 'all_lines', cr)
if not invoices:
raise osv.except_osv(_('Warning!'), _('Invoice cannot be created for this Sales Order Line due to one of the following reasons:\n1.The state of this sales order line is either "draft" or "cancel"!\n2.The Sales Order Line is Invoiced!'))
if context.get('open_invoices', False):
return self.open_invoices(cr, uid, ids, res, context=context)
return {'type': 'ir.actions.act_window_close'}
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids,
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': {'type': 'out_invoice'},
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| solintegra/addons | sale/wizard/sale_line_invoice.py | Python | agpl-3.0 | 6,126 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple wrapper around enum types to expose utility functions.
Instances are created as properties with the same name as the enum they wrap
on proto classes. For usage, see:
reflection_test.py
"""
__author__ = 'rabsatt@google.com (Kevin Rabsatt)'
class EnumTypeWrapper(object):
"""A utility for finding the names of enum values."""
DESCRIPTOR = None
def __init__(self, enum_type):
"""Inits EnumTypeWrapper with an EnumDescriptor."""
self._enum_type = enum_type
self.DESCRIPTOR = enum_type;
def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number))
def Value(self, name):
"""Returns the value coresponding to the given enum name."""
if name in self._enum_type.values_by_name:
return self._enum_type.values_by_name[name].number
raise ValueError('Enum %s has no value defined for name %s' % (
self._enum_type.name, name))
def keys(self):
"""Return a list of the string names in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.name
for value_descriptor in self._enum_type.values]
def values(self):
"""Return a list of the integer values in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.number
for value_descriptor in self._enum_type.values]
def items(self):
"""Return a list of the (name, value) pairs of the enum.
These are returned in the order they were defined in the .proto file.
"""
return [(value_descriptor.name, value_descriptor.number)
for value_descriptor in self._enum_type.values]
| cherrishes/weilai | xingxing/protobuf/python/lib/Python3.4/google/protobuf/internal/enum_type_wrapper.py | Python | apache-2.0 | 3,554 |
from __future__ import absolute_import
from django.conf.urls import url
from . import views as test_views
urlpatterns = [
url(r'^no-logging$', test_views.MockNoLoggingView.as_view()),
url(r'^logging$', test_views.MockLoggingView.as_view()),
url(r'^slow-logging$', test_views.MockSlowLoggingView.as_view()),
url(r'^explicit-logging$', test_views.MockExplicitLoggingView.as_view()),
url(r'^unsafe-methods-logging$', test_views.MockUnsafeMethodsLoggingView.as_view()),
url(r'^no-response-save-logging$', test_views.MockNotSaveResponseLoggingView.as_view()),
url(r'^session-auth-logging$', test_views.MockSessionAuthLoggingView.as_view()),
url(r'^token-auth-logging$', test_views.MockTokenAuthLoggingView.as_view()),
url(r'^json-logging$', test_views.MockJSONLoggingView.as_view()),
url(r'^validation-error-logging$', test_views.MockValidationErrorLoggingView.as_view()),
url(r'^404-error-logging$', test_views.Mock404ErrorLoggingView.as_view()),
url(r'^500-error-logging$', test_views.Mock500ErrorLoggingView.as_view()),
url(r'^415-error-logging$', test_views.Mock415ErrorLoggingView.as_view()),
url(r'^only-error-logging$', test_views.MockOnlyErrorLoggingView.as_view()),
url(r'^no-view-log$', test_views.MockNameAPIView.as_view()),
url(r'^view-log$', test_views.MockNameViewSet.as_view({'get': 'list'})),
url(r'^400-body-parse-error-logging$', test_views.Mock400BodyParseErrorLoggingView.as_view()),
]
| frankie567/drf-tracking | tests/urls.py | Python | isc | 1,472 |
#!/usr/bin/env python3
from struct import pack, unpack
from datetime import date
from pathlib import Path
import os.path
import argparse
import sys
import re
configFilename = 'openmw.cfg'
configPaths = { 'linux': '~/.config/openmw',
'freebsd': '~/.config/openmw',
'darwin': '~/Library/Preferences/openmw' }
modPaths = { 'linux': '~/.local/share/openmw/data',
'freebsd': '~/.local/share/openmw/data',
'darwin': '~/Library/Application Support/openmw/data' }
def packLong(i):
# little-endian, "standard" 4-bytes (old 32-bit systems)
return pack('<l', i)
def packString(s):
return bytes(s, 'ascii')
def packPaddedString(s, l):
bs = bytes(s, 'ascii')
if len(bs) > l:
# still need to null-terminate
return bs[:(l-1)] + bytes(1)
else:
return bs + bytes(l - len(bs))
def parseString(ba):
i = ba.find(0)
return ba[:i].decode(encoding='ascii', errors='ignore')
def parseNum(ba):
return int.from_bytes(ba, 'little')
def parseFloat(ba):
return unpack('f', ba)[0]
def parseLEV(rec):
levrec = {}
sr = rec['subrecords']
levrec['type'] = rec['type']
levrec['name'] = parseString(sr[0]['data'])
levrec['calcfrom'] = parseNum(sr[1]['data'])
levrec['chancenone'] = parseNum(sr[2]['data'])
levrec['file'] = os.path.basename(rec['fullpath'])
# Apparently, you can have LEV records that end before
# the INDX subrecord. Found those in Tamriel_Data.esm
if len(sr) > 3:
listcount = parseNum(sr[3]['data'])
listitems = []
for i in range(0,listcount*2,2):
itemid = parseString(sr[4+i]['data'])
itemlvl = parseNum(sr[5+i]['data'])
listitems.append((itemlvl, itemid))
levrec['items'] = listitems
else:
levrec['items'] = []
return levrec
def parseTES3(rec):
tesrec = {}
sr = rec['subrecords']
tesrec['version'] = parseFloat(sr[0]['data'][0:4])
tesrec['filetype'] = parseNum(sr[0]['data'][4:8])
tesrec['author'] = parseString(sr[0]['data'][8:40])
tesrec['desc'] = parseString(sr[0]['data'][40:296])
tesrec['numrecords'] = parseNum(sr[0]['data'][296:300])
masters = []
for i in range(1, len(sr), 2):
mastfile = parseString(sr[i]['data'])
mastsize = parseNum(sr[i+1]['data'])
masters.append((mastfile, mastsize))
tesrec['masters'] = masters
return tesrec
def pullSubs(rec, subtype):
return [ s for s in rec['subrecords'] if s['type'] == subtype ]
def readHeader(ba):
header = {}
header['type'] = ba[0:4].decode()
header['length'] = int.from_bytes(ba[4:8], 'little')
return header
def readSubRecord(ba):
sr = {}
sr['type'] = ba[0:4].decode()
sr['length'] = int.from_bytes(ba[4:8], 'little')
endbyte = 8 + sr['length']
sr['data'] = ba[8:endbyte]
return (sr, ba[endbyte:])
def readRecords(filename):
fh = open(filename, 'rb')
while True:
headerba = fh.read(16)
if headerba is None or len(headerba) < 16:
return None
record = {}
header = readHeader(headerba)
record['type'] = header['type']
record['length'] = header['length']
record['subrecords'] = []
# stash the filename here (a bit hacky, but useful)
record['fullpath'] = filename
remains = fh.read(header['length'])
while len(remains) > 0:
(subrecord, restofbytes) = readSubRecord(remains)
record['subrecords'].append(subrecord)
remains = restofbytes
yield record
def oldGetRecords(filename, rectype):
return ( r for r in readRecords(filename) if r['type'] == rectype )
def getRecords(filename, rectypes):
numtypes = len(rectypes)
retval = [ [] for x in range(numtypes) ]
for r in readRecords(filename):
if r['type'] in rectypes:
for i in range(numtypes):
if r['type'] == rectypes[i]:
retval[i].append(r)
return retval
def packStringSubRecord(lbl, strval):
str_bs = packString(strval) + bytes(1)
l = packLong(len(str_bs))
return packString(lbl) + l + str_bs
def packIntSubRecord(lbl, num, numsize=4):
# This is interesting. The 'pack' function from struct works fine like this:
#
# >>> pack('<l', 200)
# b'\xc8\x00\x00\x00'
#
# but breaks if you make that format string a non-literal:
#
# >>> fs = '<l'
# >>> pack(fs, 200)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# struct.error: repeat count given without format specifier
#
# This is as of Python 3.5.2
num_bs = b''
if numsize == 4:
# "standard" 4-byte longs, little-endian
num_bs = pack('<l', num)
elif numsize == 2:
num_bs = pack('<h', num)
elif numsize == 1:
# don't think endian-ness matters for bytes, but consistency
num_bs = pack('<b', num)
elif numsize == 8:
num_bs = pack('<q', num)
return packString(lbl) + packLong(numsize) + num_bs
def packLEV(rec):
start_bs = b''
id_bs = b''
if rec['type'] == 'LEVC':
start_bs += b'LEVC'
id_bs = 'CNAM'
else:
start_bs += b'LEVI'
id_bs = 'INAM'
headerflags_bs = bytes(8)
name_bs = packStringSubRecord('NAME', rec['name'])
calcfrom_bs = packIntSubRecord('DATA', rec['calcfrom'])
chance_bs = packIntSubRecord('NNAM', rec['chancenone'], 1)
subrec_bs = packIntSubRecord('INDX', len(rec['items']))
for (lvl, lid) in rec['items']:
subrec_bs += packStringSubRecord(id_bs, lid)
subrec_bs += packIntSubRecord('INTV', lvl, 2)
reclen = len(name_bs) + len(calcfrom_bs) + len(chance_bs) + len(subrec_bs)
reclen_bs = packLong(reclen)
return start_bs + reclen_bs + headerflags_bs + \
name_bs + calcfrom_bs + chance_bs + subrec_bs
def packTES3(desc, numrecs, masters):
start_bs = b'TES3'
headerflags_bs = bytes(8)
hedr_bs = b'HEDR' + packLong(300)
version_bs = pack('<f', 1.0)
# .esp == 0, .esm == 1, .ess == 32
# suprisingly, .omwaddon == 0, also -- figured it would have its own
ftype_bs = bytes(4)
author_bs = packPaddedString('omwllf, copyright 2017, jmelesky', 32)
desc_bs = packPaddedString(desc, 256)
numrecs_bs = packLong(numrecs)
masters_bs = b''
for (m, s) in masters:
masters_bs += packStringSubRecord('MAST', m)
masters_bs += packIntSubRecord('DATA', s, 8)
reclen = len(hedr_bs) + len(version_bs) + len(ftype_bs) + len(author_bs) +\
len(desc_bs) + len(numrecs_bs) + len(masters_bs)
reclen_bs = packLong(reclen)
return start_bs + reclen_bs + headerflags_bs + \
hedr_bs + version_bs + ftype_bs + author_bs + \
desc_bs + numrecs_bs + masters_bs
def ppSubRecord(sr):
if sr['type'] in ['NAME', 'INAM', 'CNAM']:
print(" %s, length %d, value '%s'" % (sr['type'], sr['length'], parseString(sr['data'])))
elif sr['type'] in ['DATA', 'NNAM', 'INDX', 'INTV']:
print(" %s, length %d, value '%s'" % (sr['type'], sr['length'], parseNum(sr['data'])))
else:
print(" %s, length %d" % (sr['type'], sr['length']))
def ppRecord(rec):
print("%s, length %d" % (rec['type'], rec['length']))
for sr in rec['subrecords']:
ppSubRecord(sr)
def ppLEV(rec):
if rec['type'] == 'LEVC':
print("Creature list '%s' from '%s':" % (rec['name'], rec['file']))
else:
print("Item list '%s' from '%s':" % (rec['name'], rec['file']))
print("flags: %d, chance of none: %d" % (rec['calcfrom'], rec['chancenone']))
for (lvl, lid) in rec['items']:
print(" %2d - %s" % (lvl, lid))
def ppTES3(rec):
print("TES3 record, type %d, version %f" % (rec['filetype'], rec['version']))
print("author: %s" % rec['author'])
print("description: %s" % rec['desc'])
for (mfile, msize) in rec['masters']:
print(" master %s, size %d" % (mfile, msize))
print()
def mergeableLists(alllists):
candidates = {}
for l in alllists:
lid = l['name']
if lid in candidates:
candidates[lid].append(l)
else:
candidates[lid] = [l]
mergeables = {}
for k in candidates:
if len(candidates[k]) > 1:
mergeables[k] = candidates[k]
return mergeables
def mergeLists(lls):
# last one gets priority for list-level attributes
last = lls[-1]
newLev = { 'type': last['type'],
'name': last['name'],
'calcfrom': last['calcfrom'],
'chancenone': last['chancenone'] }
allItems = []
for l in lls:
allItems += l['items']
newLev['files'] = [ x['file'] for x in lls ]
newLev['file'] = ', '.join(newLev['files'])
# This ends up being a bit tricky, but it prevents us
# from overloading lists with the same stuff.
#
# This is needed, because the original leveled lists
# contain multiple entries for some creatures/items, and
# that gets reproduced in many plugins.
#
# If we just added and sorted, then the more plugins you
# have, the less often you'd see plugin content. This
# method prevents the core game content from overwhelming
# plugin contents.
allUniques = [ x for x in set(allItems) ]
allUniques.sort()
newList = []
for i in allUniques:
newCount = max([ x['items'].count(i) for x in lls ])
newList += [i] * newCount
newLev['items'] = newList
return newLev
def mergeAllLists(alllists):
mergeables = mergeableLists(alllists)
merged = []
for k in mergeables:
merged.append(mergeLists(mergeables[k]))
return merged
def readCfg(cfg):
# first, open the file and pull all 'data' and 'content' lines, in order
data_dirs = []
mods = []
with open(cfg, 'r') as f:
for l in f.readlines():
# match of form "blah=blahblah"
m = re.search(r'^(.*)=(.*)$', l)
if m:
varname = m.group(1).strip()
# get rid of not only whitespace, but also surrounding quotes
varvalue = m.group(2).strip().strip('\'"')
if varname == 'data':
data_dirs.append(varvalue)
elif varname == 'content':
mods.append(varvalue)
# we've got the basenames of the mods, but not the full paths
# and we have to search through the data_dirs to find them
fp_mods = []
for m in mods:
for p in data_dirs:
full_path = os.path.join(p, m)
if os.path.exists(full_path):
fp_mods.append(full_path)
break
print("Config file parsed...")
return fp_mods
def dumplists(cfg):
llists = []
fp_mods = readCfg(cfg)
for f in fp_mods:
[ ppTES3(parseTES3(x)) for x in oldGetRecords(f, 'TES3') ]
for f in fp_mods:
llists += [ parseLEV(x) for x in oldGetRecords(f, 'LEVI') ]
for f in fp_mods:
llists += [ parseLEV(x) for x in oldGetRecords(f, 'LEVC') ]
for l in llists:
ppLEV(l)
def main(cfg, outmoddir, outmod):
fp_mods = readCfg(cfg)
# first, let's grab the "raw" records from the files
(rtes3, rlevi, rlevc) = ([], [], [])
for f in fp_mods:
print("Parsing '%s' for relevant records" % f)
(rtes3t, rlevit, rlevct) = getRecords(f, ('TES3', 'LEVI', 'LEVC'))
rtes3 += rtes3t
rlevi += rlevit
rlevc += rlevct
# next, parse the tes3 records so we can get a list
# of master files required by all our mods
tes3list = [ parseTES3(x) for x in rtes3 ]
masters = {}
for t in tes3list:
for m in t['masters']:
masters[m[0]] = m[1]
master_list = [ (k,v) for (k,v) in masters.items() ]
# now, let's parse the levi and levc records into
# mergeable lists, then merge them
# creature lists
clist = [ parseLEV(x) for x in rlevc ]
levc = mergeAllLists(clist)
# item lists
ilist = [ parseLEV(x) for x in rlevi ]
levi = mergeAllLists(ilist)
# now build the binary representation of
# the merged lists.
# along the way, build up the module
# description for the new merged mod, out
# of the names of mods that had lists
llist_bc = b''
pluginlist = []
for x in levi + levc:
# ppLEV(x)
llist_bc += packLEV(x)
pluginlist += x['files']
plugins = set(pluginlist)
moddesc = "Merged leveled lists from: %s" % ', '.join(plugins)
# finally, build the binary form of the
# TES3 record, and write the whole thing
# out to disk
if not os.path.exists(outmoddir):
p = Path(outmoddir)
p.mkdir(parents=True)
with open(outmod, 'wb') as f:
f.write(packTES3(moddesc, len(levi + levc), master_list))
f.write(llist_bc)
# And give some hopefully-useful instructions
modShortName = os.path.basename(outmod)
print("\n\n****************************************")
print(" Great! I think that worked. When you next start the OpenMW Launcher, look for a module named %s. Make sure of the following things:" % modShortName)
print(" 1. %s is at the bottom of the list. Drag it to the bottom if it's not. It needs to load last." % modShortName)
print(" 2. %s is checked (enabled)" % modShortName)
print(" 3. Any other OMWLLF mods are *un*checked. Loading them might not cause problems, but probably will")
print("\n")
print(" Then, go ahead and start the game! Your leveled lists should include adjustments from all relevant enabled mods")
print("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--conffile', type = str, default = None,
action = 'store', required = False,
help = 'Conf file to use. Optional. By default, attempts to use the default conf file location.')
parser.add_argument('-d', '--moddir', type = str, default = None,
action = 'store', required = False,
help = 'Directory to store the new module in. By default, attempts to use the default work directory for OpenMW-CS')
parser.add_argument('-m', '--modname', type = str, default = None,
action = 'store', required = False,
help = 'Name of the new module to create. By default, this is "OMWLLF Mod - <today\'s date>.omwaddon.')
parser.add_argument('--dumplists', default = False,
action = 'store_true', required = False,
help = 'Instead of generating merged lists, dump all leveled lists in the conf mods. Used for debugging')
p = parser.parse_args()
# determine the conf file to use
confFile = ''
if p.conffile:
confFile = p.conffile
else:
pl = sys.platform
if pl in configPaths:
baseDir = os.path.expanduser(configPaths[pl])
confFile = os.path.join(baseDir, configFilename)
elif pl == 'win32':
# this is ugly. first, imports that only work properly on windows
from ctypes import *
import ctypes.wintypes
buf = create_unicode_buffer(ctypes.wintypes.MAX_PATH)
# opaque arguments. they are, roughly, for our purposes:
# - an indicator of folder owner (0 == current user)
# - an id for the type of folder (5 == 'My Documents')
# - an indicator for user to call from (0 same as above)
# - a bunch of flags for different things
# (if you want, for example, to get the default path
# instead of the actual path, or whatnot)
# 0 == current stuff
# - the variable to hold the return value
windll.shell32.SHGetFolderPathW(0, 5, 0, 0, buf)
# pull out the return value and construct the rest
baseDir = os.path.join(buf.value, 'My Games', 'OpenMW')
confFile = os.path.join(baseDir, configFilename)
else:
print("Sorry, I don't recognize the platform '%s'. You can try specifying the conf file using the '-c' flag." % p)
sys.exit(1)
baseModDir = ''
if p.moddir:
baseModDir = p.moddir
else:
pl = sys.platform
if pl in configPaths:
baseModDir = os.path.expanduser(modPaths[pl])
elif pl == 'win32':
# this is ugly in exactly the same ways as above.
# see there for more information
from ctypes import *
import ctypes.wintypes
buf = create_unicode_buffer(ctypes.wintypes.MAX_PATH)
windll.shell32.SHGetFolderPathW(0, 5, 0, 0, buf)
baseDir = os.path.join(buf.value, 'My Games', 'OpenMW')
baseModDir = os.path.join(baseDir, 'data')
else:
print("Sorry, I don't recognize the platform '%s'. You can try specifying the conf file using the '-c' flag." % p)
sys.exit(1)
if not os.path.exists(confFile):
print("Sorry, the conf file '%s' doesn't seem to exist." % confFile)
sys.exit(1)
modName = ''
if p.modname:
modName = p.modname
else:
modName = 'OMWLLF Mod - %s.omwaddon' % date.today().strftime('%Y-%m-%d')
modFullPath = os.path.join(baseModDir, modName)
if p.dumplists:
dumplists(confFile)
else:
main(confFile, baseModDir, modFullPath)
# regarding the windows path detection:
#
# "SHGetFolderPath" is deprecated in favor of "SHGetKnownFolderPath", but
# >>> windll.shell32.SHGetKnownFolderPath('{FDD39AD0-238F-46AF-ADB4-6C85480369C7}', 0, 0, buf2)
# -2147024894
| jmelesky/omwllf | omwllf.py | Python | isc | 17,912 |
#!/bin/python3
import sys
def solve(a0, a1, a2, b0, b1, b2):
score = [0, 0]
alist = [a0, a1, a2]
blist = [b0, b1, b2]
clist = zip(alist, blist)
for pair in clist:
if pair[0] > pair[1]:
score[0] += 1
elif pair[0] < pair[1]:
score[1] += 1
else:
continue
return score
a0, a1, a2 = input().strip().split(' ')
a0, a1, a2 = [int(a0), int(a1), int(a2)]
b0, b1, b2 = input().strip().split(' ')
b0, b1, b2 = [int(b0), int(b1), int(b2)]
result = solve(a0, a1, a2, b0, b1, b2)
print (" ".join(map(str, result)))
| Krakn/learning | src/python/hackerrank/algorithms/compare_the_triplets/compare_the_triplets.py | Python | isc | 597 |
import os, re, traceback
from dirtree_node import get_file_info
from util import is_text_file
class SearchAborted(Exception):
pass
def null_filter(info):
return True
class Search(object):
def __init__(self, path, match, output, file_filter=null_filter, dir_filter=null_filter):
self.path = path
self.match = match
self.output = output
self.file_filter = file_filter
self.dir_filter = dir_filter
self.encoding = "utf-8"
self.quit = False
def _search_file(self, filepath):
if self.quit:
raise SearchAborted()
self.output.begin_file(self, filepath)
if not is_text_file(filepath):
return
with open(filepath, "r") as f:
matched_file = False
for line_num, line in enumerate(f, 1):
line = line.rstrip("\r\n")
try:
line = line.decode(self.encoding)
except UnicodeDecodeError:
line = line.decode("latin-1")
if self.match(line):
if not matched_file:
self.output.add_file(self, filepath)
matched_file = True
self.output.add_line(self, line_num, line)
if self.quit:
raise SearchAborted()
if matched_file:
self.output.end_file(self)
def _search_dir(self, dirpath):
if self.quit:
raise SearchAborted()
try:
dirlist = os.listdir(dirpath)
except OSError:
pass
else:
dirlist.sort()
for name in dirlist:
self._search(dirpath, name)
def _search(self, dirpath, name):
if self.quit:
raise SearchAborted()
try:
info = get_file_info(dirpath, name)
if info.is_file and self.file_filter(info):
self._search_file(info.path)
elif info.is_dir and self.dir_filter(info):
self._search_dir(info.path)
except OSError:
pass
def search(self):
self.quit = False
try:
self._search(*os.path.split(self.path))
except SearchAborted:
self.output.abort_find(self)
except Exception as e:
self.output.end_find(self)
if not isinstance(e, (OSError, IOError)):
print traceback.format_exc()
else:
self.output.end_find(self)
def stop(self):
self.quit = True
class SearchFileOutput(object):
def __init__(self, file):
self.file = file
self.max_line_length = 100
def add_file(self, finder, filepath):
self.file.write(filepath + "\n")
def add_line(self, finder, line_num, line):
if len(line) > self.max_line_length:
line = line[:self.max_line_length] + "..."
self.file.write(" %d: %s\n" % (line_num, line))
def begin_file(self, finder, filepath):
pass
def end_file(self, finder):
self.file.write("\n")
def end_find(self, finder):
pass
def make_matcher(pattern, case_sensitive=True, is_regexp=False):
if not is_regexp:
pattern = "^.*" + re.escape(pattern)
flags = re.UNICODE
if not case_sensitive:
flags |= re.IGNORECASE
return re.compile(pattern, flags).search
if __name__ == "__main__":
import sys
Search(".", make_matcher("class"), SearchFileOutput(sys.stdout)).search()
| shaurz/devo | search.py | Python | mit | 3,549 |
#! /usr/bin/env python
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, object):
# remember we take ('noun', 'princess') tuples and convert them
self.subject = subject[1]
self.verb = verb[1]
self.object = object[1]
def get_sentence(self):
self.sentence = ' '.join([self.subject, self.verb, self.object])
return self.sentence
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next = peek(word_list)
if next == 'noun':
return match(word_list, 'noun')
elif next == 'direction':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list, subj):
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
def parse_sentence(word_list):
skip(word_list, 'stop')
start = peek(word_list)
if start == 'noun':
subj = match(word_list, 'noun')
return parse_subject(word_list, subj)
elif start == 'verb':
# assume the subject is the player then
return parse_subject(word_list, ('noun', 'player'))
else:
raise ParserError("Must start with subject, object or verb not: %s" % start)
| pedrogideon7/spy_quest | parser.py | Python | mit | 1,938 |
"""Contains code for an FadeCandy hardware for RGB LEDs."""
from typing import List
import logging
import json
import struct
from mpf.core.utility_functions import Util
from mpf.platforms.openpixel import OpenPixelClient
from mpf.platforms.openpixel import OpenpixelHardwarePlatform
MYPY = False
if MYPY: # pragma: no cover
from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import
class FadecandyHardwarePlatform(OpenpixelHardwarePlatform):
"""Base class for the FadeCandy hardware platform."""
__slots__ = [] # type: List[str]
def __init__(self, machine: "MachineController") -> None:
"""Initialise Fadecandy.
Args:
----
machine: The main ``MachineController`` object.
"""
super().__init__(machine)
self.log = logging.getLogger("FadeCandy")
self.log.debug("Configuring FadeCandy hardware interface.")
def __repr__(self):
"""Return string representation."""
return '<Platform.FadeCandy>'
async def _setup_opc_client(self):
self.opc_client = FadeCandyOPClient(self.machine, self.machine.config['open_pixel_control'])
await self.opc_client.connect()
class FadeCandyOPClient(OpenPixelClient):
"""Base class of an OPC client which connects to a FadeCandy server.
This class implements some FadeCandy-specific features that are not
available with generic OPC implementations.
"""
__slots__ = ["gamma", "whitepoint", "linear_slope", "linear_cutoff", "keyframe_interpolation", "dithering",
"config"]
def __init__(self, machine, config):
"""Initialise Fadecandy client.
Args:
----
machine: The main ``MachineController`` instance.
config: Dictionary which contains configuration settings for the
OPC client.
"""
super().__init__(machine, config)
self.log = logging.getLogger('FadeCandyClient')
self.update_every_tick = True
self.config = self.machine.config_validator.validate_config('fadecandy',
self.machine.config['fadecandy'])
self.gamma = self.config['gamma']
self.whitepoint = Util.string_to_event_list(self.config['whitepoint'])
self.whitepoint[0] = float(self.whitepoint[0])
self.whitepoint[1] = float(self.whitepoint[1])
self.whitepoint[2] = float(self.whitepoint[2])
self.linear_slope = self.config['linear_slope']
self.linear_cutoff = self.config['linear_cutoff']
self.keyframe_interpolation = self.config['keyframe_interpolation']
self.dithering = self.config['dithering']
if not self.keyframe_interpolation:
self.update_every_tick = False
async def connect(self):
"""Connect to the hardware."""
await super().connect()
self.set_global_color_correction()
self.write_firmware_options()
def __repr__(self):
"""Return str representation."""
return '<Platform.FadeCandyOPClient>'
def set_global_color_correction(self):
"""Write the current global color correction settings to the FadeCandy server.
This includes gamma, white point, linear slope, and linear cutoff.
"""
msg = json.dumps({
'gamma': self.gamma,
'whitepoint': self.whitepoint,
'linearSlope': self.linear_slope,
'linearCutoff': self.linear_cutoff
})
self.send(struct.pack(
"!BBHHH", 0x00, 0xFF, len(msg) + 4, 0x0001, 0x0001) + bytes(msg, 'UTF-8'))
def write_firmware_options(self):
"""Write the current firmware settings (keyframe interpolation and dithering) to the FadeCandy hardware."""
config_byte = 0x00
if not self.dithering:
config_byte |= 0x01
if not self.keyframe_interpolation:
config_byte |= 0x02
# manual LED control
# config_byte = config_byte | 0x04
# turn LED on
# config_byte = config_byte | 0x08
self.send(struct.pack(
"!BBHHHB", 0x00, 0xFF, 0x0005, 0x0001, 0x0002, config_byte))
| missionpinball/mpf | mpf/platforms/fadecandy.py | Python | mit | 4,250 |
from setuptools import setup, find_packages
import os
import sys
# This monstrous hack is to support /etc generation for the Debian package
# with fpm.
if sys.argv[1] == 'install' and os.environ.get('JACQUARD_DEBIAN_HACK'):
def debian_etc_hack(root):
import pathlib
root_path = pathlib.Path(root)
config_dir = root_path / 'etc' / 'jacquard'
try:
config_dir.mkdir(parents=True)
except FileExistsError:
pass
try:
(config_dir / 'plugins').mkdir()
except FileExistsError:
pass
with (config_dir / 'config.cfg').open('wb') as f_out:
with open('debian.cfg', 'rb') as f_in:
config_file = f_in.read()
f_out.write(config_file)
try:
(root_path / 'var' / 'jacquard').mkdir(parents=True)
except FileExistsError:
pass
debian_etc_hack(sys.argv[3])
del debian_etc_hack
with open('README.rst', 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='jacquard-split',
version='0.7.0',
url='https://github.com/prophile/jacquard',
description="Split testing server",
long_description=long_description,
author="Alistair Lynn",
author_email="alistair@alynn.co.uk",
keywords = (
'ab-testing',
'e-commerce',
'experiments',
'jacquard',
'metrics',
'redis',
'science',
'split-testing',
'testing',
'zucchini',
),
license='MIT',
zip_safe=False,
packages=find_packages(),
classifiers=(
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Office/Business',
),
install_requires=(
'redis',
'werkzeug',
'python-dateutil',
'pyyaml',
'sqlalchemy',
),
setup_requires=(
'pytest-runner',
),
tests_require=(
'pytest',
'redis==2.10.6',
'fakeredis==0.16.0',
'hypothesis<4',
),
entry_points={
'console_scripts': (
'jacquard = jacquard.cli:main',
),
'jacquard.storage_engines': (
'dummy = jacquard.storage.dummy:DummyStore',
'redis = jacquard.storage.redis:RedisStore',
'redis-cloned = jacquard.storage.cloned_redis:ClonedRedisStore',
'file = jacquard.storage.file:FileStore',
),
'jacquard.commands': (
'storage-dump = jacquard.storage.commands:StorageDump',
'storage-flush = jacquard.storage.commands:StorageFlush',
'storage-import = jacquard.storage.commands:StorageImport',
'storage-export = jacquard.storage.commands:StorageExport',
'set-default = jacquard.users.commands:SetDefault',
'override = jacquard.users.commands:Override',
'clear-overrides = jacquard.users.commands:OverrideClear',
'runserver = jacquard.service.commands:RunServer',
'launch = jacquard.experiments.commands:Launch',
'conclude = jacquard.experiments.commands:Conclude',
'load-experiment = jacquard.experiments.commands:Load',
'rollout = jacquard.buckets.commands:Rollout',
'settings-under-experiment = jacquard.experiments.commands:SettingsUnderActiveExperiments',
'bugpoint = jacquard.commands_dev:Bugpoint',
),
'jacquard.commands.list': (
'experiments = jacquard.experiments.commands:ListExperiments',
),
'jacquard.commands.show': (
'user = jacquard.users.commands:Show',
'defaults = jacquard.users.commands:Show',
'directory-entry = jacquard.directory.commands:ShowDirectoryEntry',
'experiment = jacquard.experiments.commands:Show',
),
'jacquard.directory_engines': (
'dummy = jacquard.directory.dummy:DummyDirectory',
'django = jacquard.directory.django:DjangoDirectory',
'union = jacquard.directory.union:UnionDirectory',
),
'jacquard.http_endpoints': (
'root = jacquard.service.endpoints:Root',
'user = jacquard.service.endpoints:User',
'experiments-overview = jacquard.service.endpoints:ExperimentsOverview',
'experiment = jacquard.service.endpoints:ExperimentDetail',
'experiment-partition = jacquard.service.endpoints:ExperimentPartition',
'defaults = jacquard.service.endpoints:Defaults',
),
},
)
| prophile/jacquard | setup.py | Python | mit | 4,910 |
from operator import itemgetter
from typing import Optional, cast
from libsyntyche.widgets import HBoxLayout, Label, Stretch, mk_signal1
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import Qt
from ..common import Settings
class TagInfoList(QtWidgets.QScrollArea):
error = mk_signal1(str)
print_ = mk_signal1(str)
class TagCountBar(QtWidgets.QWidget):
def __init__(self, parent: QtWidgets.QWidget,
percentage: float) -> None:
super().__init__(parent)
self.percentage = percentage
def paintEvent(self, ev: QtGui.QPaintEvent) -> None:
right_offset = (1 - self.percentage) * ev.rect().width()
painter = QtGui.QPainter(self)
painter.fillRect(ev.rect().adjusted(0, 0, -int(right_offset), 0),
painter.background())
painter.end()
def __init__(self, parent: QtWidgets.QWidget, settings: Settings) -> None:
super().__init__(parent)
self.setSizeAdjustPolicy(self.AdjustToContents)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.tag_macros: dict[str, str] = settings.tag_macros.value
settings.tag_macros.changed.connect(self.set_tag_macros)
self.panel = QtWidgets.QWidget(self)
self.panel.setObjectName('tag_info_list_panel')
self.panel.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Maximum)
layout = QtWidgets.QGridLayout(self.panel)
layout.setColumnStretch(2, 1)
layout.setHorizontalSpacing(10)
# layout.setSizeConstraint(layout.SetMinAndMaxSize)
# TODO: something less ugly than this
self.setFixedHeight(200)
self.panel.setLayout(layout)
self.setWidget(self.panel)
self.setWidgetResizable(True)
self.hide()
def clear(self) -> None:
layout = self.panel.layout()
while not layout.isEmpty():
item = layout.takeAt(0)
if item and item.widget() is not None:
item.widget().deleteLater()
def set_tag_macros(self, tag_macros: dict[str, str]) -> None:
self.tag_macros = tag_macros
def _make_tag(self, tag: str) -> QtWidgets.QWidget:
tag_label_wrapper = QtWidgets.QWidget(self)
tag_label = Label(tag, name='tag', parent=tag_label_wrapper)
tag_label.setStyleSheet('background: #667;')
HBoxLayout(tag_label, Stretch(), parent=tag_label_wrapper)
return tag_label_wrapper
def view_tags(self, tags: list[tuple[str, int]], sort_alphabetically: bool,
reverse: bool, name_filter: Optional[str]) -> None:
self.clear()
max_count = max(t[1] for t in tags)
if sort_alphabetically:
tags.sort(key=itemgetter(0))
else:
tags.sort(key=itemgetter(0), reverse=True)
tags.sort(key=itemgetter(1))
# If alphabetically, we want to default to ascending,
# but if we're sorting by usage count, we want it descending.
if reverse or (not sort_alphabetically and not reverse):
tags.reverse()
if name_filter:
tags = [t for t in tags if name_filter in t[0]]
layout = cast(QtWidgets.QGridLayout, self.panel.layout())
for n, (tag, count) in enumerate(tags):
# Tag name
layout.addWidget(self._make_tag(tag), n, 0)
# Tag count
layout.addWidget(Label(count, name='tag_info_count', parent=self),
n, 1, alignment=Qt.AlignBottom)
# Tag bar
count_bar = self.TagCountBar(self, count / max_count)
layout.addWidget(count_bar, n, 2)
self.show()
def view_macros(self) -> None:
# TODO: better view of this
self.clear()
layout = cast(QtWidgets.QGridLayout, self.panel.layout())
for n, (tag, macro) in enumerate(sorted(self.tag_macros.items())):
# Tag macro name
layout.addWidget(self._make_tag('@' + tag), n, 0)
# Tag macro expression
layout.addWidget(Label(macro, name='tag_info_macro_expression',
word_wrap=True, parent=self), n, 1)
self.show()
| nycz/sapfo | sapfo/index/taginfolist.py | Python | mit | 4,351 |
GnuXzPackage ('clutter', '1.10.6',
sources = [ 'http://source.clutter-project.org/sources/clutter/1.10/%{name}-%{version}.tar.xz' ],
)
| bl8/bockbuild | packages/clutter.py | Python | mit | 136 |
# -*- encoding: utf-8 -*-
import hashlib
import json
import os
import re
import magic
from perpetualfailure.db import session
from pyramid.authentication import (
Authenticated,
Everyone,
)
from pyramid.httpexceptions import (
HTTPException,
HTTPBadRequest,
HTTPFound,
HTTPNotFound,
HTTPForbidden,
)
from pyramid.view import view_config
from sqlalchemy.sql.expression import func
import temporals_web.gmod.models as m
@view_config(
route_name='servers.gmod.loading',
renderer="gmod/loading.mako",
)
def gmod_loading(request):
id_magic_number = 76561197960265728
community_id = int(request.GET["steamid"])
auth_server = (community_id - id_magic_number) & 1
auth_id = (community_id - id_magic_number - auth_server) / 2
steam_id = "STEAM_0:%i:%i" % (auth_server, auth_id)
player = request.steamweb.player_profile(request.GET["steamid"])
data = {}
for mode in session.query(m.LS_Gamemode).all():
data[mode.name] = mode
game_data = json.dumps(data, cls=m.ObjectEncoder)
return {"steamid": steam_id, "player": player, "game_data": game_data}
@view_config(route_name='servers.gmod.background')
def gmod_background(request):
map = None
if "map" in request.GET:
map = request.GET["map"]
gamemode = None
if "gamemode" in request.GET:
gamemode = request.GET["gamemode"]
query = session.query(m.LS_Background)
query = query.filter(m.LS_Background.gamemode.in_([gamemode, None]))
query = query.filter(m.LS_Background.map == map)
if query.count() < 1:
query = session.query(m.LS_Background)
query = query.filter(m.LS_Background.map == map)
if query.count() < 1:
query = session.query(m.LS_Background)
query = query.filter(m.LS_Background.map == map, m.LS_Background.gamemode == None)
if query.count() < 1:
query = session.query(m.LS_Background)
query = query.filter(m.LS_Background.gamemode == gamemode)
if query.count() < 1:
query = session.query(m.LS_Background)
bg = query.order_by(func.random()).first()
return HTTPFound(location=request.resolve(bg.url))
@view_config(
route_name='servers.gmod.acp.loading',
renderer="gmod/acp/loading.mako",
permission=Authenticated,
)
def acp_loading(request):
modes = session.query(m.LS_Gamemode)
return {"gamemodes": modes}
@view_config(
route_name='servers.gmod.acp.background.gallery',
renderer="gmod/acp/background/gallery.mako",
permission=Authenticated,
)
def acp_background_gallery(request):
backgrounds = session.query(m.LS_Background).all()
return {"backgrounds": backgrounds}
@view_config(
route_name='servers.gmod.acp.background.add',
renderer="gmod/acp/background/edit.mako",
permission=Authenticated,
)
def acp_background_add(request):
background = m.LS_Background()
if not request.permits("create", background):
return HTTPForbidden()
r = background_update(request, background, upload=True)
if isinstance(r, HTTPException):
return r
return {"background": background, "upload": True}
@view_config(
route_name='servers.gmod.acp.background.edit',
renderer="gmod/acp/background/edit.mako",
permission=Authenticated,
)
def acp_background_edit(request):
background = session.query(m.LS_Background).filter(m.LS_Background.id==request.matchdict["id"]).first()
if not request.permits("edit", background):
return HTTPForbidden()
r = background_update(request, background)
if isinstance(r, HTTPException):
return r
return {"background": background, "upload": False}
@view_config(
route_name='servers.gmod.acp.gamemode.add',
renderer="gmod/acp/gamemode.mako",
permission=Authenticated,
)
def acp_gamemode_add(request):
mode = m.LS_Gamemode()
if not request.permits("create", mode):
return HTTPForbidden()
r = gamemode_update(request, mode)
if isinstance(r, HTTPException):
return r
return {"gamemode": mode}
@view_config(
route_name='servers.gmod.acp.gamemode.edit',
renderer="gmod/acp/gamemode.mako",
permission=Authenticated,
)
def acp_gamemode_edit(request):
mode = session.query(m.LS_Gamemode).filter(m.LS_Gamemode.id==request.matchdict["id"]).first()
if not request.permits("edit", mode):
return HTTPForbidden()
r = gamemode_update(request, mode)
if isinstance(r, HTTPException):
return r
return {"gamemode": mode}
def gamemode_update(request, gamemode):
if request.method != "POST":
return None
for key in ['title', 'name', 'rules', 'extrainfo']:
if key not in request.POST:
return HTTPBadRequest()
def clean(text):
lines = text.split("\n")
output = []
for line in lines:
oldline = line
if line.startswith("- "):
line = line[2:]
line = re.sub("^[0-9]+\. ", "", line)
if oldline == line and line.strip():
if not output:
output.append("")
output[-1] = (output[-1] + " " + line.strip()).strip()
elif line.strip():
output.append(line.strip())
return output
gamemode.title = request.params['title']
gamemode.name = request.params['name']
gamemode.rules = clean(request.params['rules'])
gamemode.extrainfo = clean(request.params['extrainfo'])
session.add(gamemode)
session.flush()
return HTTPFound(location=request.route_path('servers.gmod.acp.gamemode.edit', id=gamemode.id))
def background_update(request, gamemode, upload=False):
if request.method != "POST":
return None
for key in ['map', 'gamemode']:
if key not in request.POST:
return HTTPBadRequest()
if upload:
if 'image' not in request.POST or not request.POST['image'].file:
return HTTPBadRequest()
image = request.POST['image'].file
mime = magic.from_buffer(image.read(1024), mime=True)
if not mime.startswith("image/"):
return HTTPBadRequest()
ext = mime.split("/")[-1]
if ext not in ["png", "jpeg", "jpg", "gif", "bmp", "tiff", "targa"]:
return HTTPBadRequest()
image.seek(0)
hash = hashlib.sha1(image.read()).hexdigest()
image.seek(0)
cdn_path = os.path.join("bg", "%s.%s" % (hash, ext))
path = os.path.join(request.registry.settings["upload_path"], cdn_path)
with open(path, "w") as targetFile:
targetFile.write(image.read())
gamemode.url = "cdn:%s" % cdn_path
gamemode.map = request.params['map']
gamemode.gamemode = request.params['gamemode'] if request.params['gamemode'] else None
session.add(gamemode)
session.flush()
return HTTPFound(location=request.route_path('servers.gmod.acp.background.edit', id=gamemode.id))
| 404d/Temporals-Web | temporals_web/gmod/views.py | Python | mit | 7,019 |
"""Functions for accessing HDF5 files."""
from __future__ import division
from __future__ import print_function
import re
import h5py as h5
import numpy as np
import six
from six.moves import range
from ..utils import filter_regex, to_list
def _ls(item, recursive=False, groups=False, level=0):
keys = []
if isinstance(item, h5.Group):
if groups and level > 0:
keys.append(item.name)
if level == 0 or recursive:
for key in list(item.keys()):
keys.extend(_ls(item[key], recursive, groups, level + 1))
elif not groups:
keys.append(item.name)
return keys
def ls(filename, group='/', recursive=False, groups=False,
regex=None, nb_key=None, must_exist=True):
"""List name of records HDF5 file.
Parameters
----------
filename:
Path of HDF5 file.
group:
HDF5 group to be explored.
recursive: bool
If `True`, list records recursively.
groups: bool
If `True`, only list group names but not name of datasets.
regex: str
Regex to filter listed records.
nb_key: int
Maximum number of records to be listed.
must_exist: bool
If `False`, return `None` if file or group does not exist.
Returns
-------
list
`list` with name of records in `filename`.
"""
if not group.startswith('/'):
group = '/%s' % group
h5_file = h5.File(filename, 'r')
if not must_exist and group not in h5_file:
return None
keys = _ls(h5_file[group], recursive, groups)
for i, key in enumerate(keys):
keys[i] = re.sub('^%s/' % group, '', key)
h5_file.close()
if regex:
keys = filter_regex(keys, regex)
if nb_key is not None:
keys = keys[:nb_key]
return keys
def write_data(data, filename):
"""Write data in dict `data` to HDF5 file."""
is_root = isinstance(filename, str)
group = h5.File(filename, 'w') if is_root else filename
for key, value in six.iteritems(data):
if isinstance(value, dict):
key_group = group.create_group(key)
write_data(value, key_group)
else:
group[key] = value
if is_root:
group.close()
def hnames_to_names(hnames):
"""Flattens `dict` `hnames` of hierarchical names.
Converts hierarchical `dict`, e.g. hnames={'a': ['a1', 'a2'], 'b'}, to flat
list of keys for accessing HDF5 file, e.g. ['a/a1', 'a/a2', 'b']
"""
names = []
for key, value in six.iteritems(hnames):
if isinstance(value, dict):
for name in hnames_to_names(value):
names.append('%s/%s' % (key, name))
elif isinstance(value, list):
for name in value:
names.append('%s/%s' % (key, name))
elif isinstance(value, str):
names.append('%s/%s' % (key, value))
else:
names.append(key)
return names
def reader(data_files, names, batch_size=128, nb_sample=None, shuffle=False,
loop=False):
if isinstance(names, dict):
names = hnames_to_names(names)
else:
names = to_list(names)
# Copy, since list will be changed if shuffle=True
data_files = list(to_list(data_files))
# Check if names exist
h5_file = h5.File(data_files[0], 'r')
for name in names:
if name not in h5_file:
raise ValueError('%s does not exist!' % name)
h5_file.close()
if nb_sample:
# Select the first k files s.t. the total sample size is at least
# nb_sample. Only these files will be shuffled.
_data_files = []
nb_seen = 0
for data_file in data_files:
h5_file = h5.File(data_file, 'r')
nb_seen += len(h5_file[names[0]])
h5_file.close()
_data_files.append(data_file)
if nb_seen >= nb_sample:
break
data_files = _data_files
else:
nb_sample = np.inf
file_idx = 0
nb_seen = 0
while True:
if shuffle and file_idx == 0:
np.random.shuffle(data_files)
h5_file = h5.File(data_files[file_idx], 'r')
data_file = dict()
for name in names:
data_file[name] = h5_file[name]
nb_sample_file = len(list(data_file.values())[0])
if shuffle:
# Shuffle data within the entire file, which requires reading
# the entire file into memory
idx = np.arange(nb_sample_file)
np.random.shuffle(idx)
for name, value in six.iteritems(data_file):
data_file[name] = value[:len(idx)][idx]
nb_batch = int(np.ceil(nb_sample_file / batch_size))
for batch in range(nb_batch):
batch_start = batch * batch_size
nb_read = min(nb_sample - nb_seen, batch_size)
batch_end = min(nb_sample_file, batch_start + nb_read)
_batch_size = batch_end - batch_start
if _batch_size == 0:
break
data_batch = dict()
for name in names:
data_batch[name] = data_file[name][batch_start:batch_end]
yield data_batch
nb_seen += _batch_size
if nb_seen >= nb_sample:
break
h5_file.close()
file_idx += 1
assert nb_seen <= nb_sample
if nb_sample == nb_seen or file_idx == len(data_files):
if loop:
file_idx = 0
nb_seen = 0
else:
break
def _to_dict(data):
if isinstance(data, np.ndarray):
data = [data]
return dict(zip(range(len(data)), data))
def read_from(reader, nb_sample=None):
from .utils import stack_dict
data = dict()
nb_seen = 0
is_dict = True
for data_batch in reader:
if not isinstance(data_batch, dict):
data_batch = _to_dict(data_batch)
is_dict = False
for key, value in six.iteritems(data_batch):
values = data.setdefault(key, [])
values.append(value)
nb_seen += len(list(data_batch.values())[0])
if nb_sample and nb_seen >= nb_sample:
break
data = stack_dict(data)
if nb_sample:
for key, value in six.iteritems(data):
data[key] = value[:nb_sample]
if not is_dict:
data = [data[i] for i in range(len(data))]
return data
def read(data_files, names, nb_sample=None, batch_size=1024, *args, **kwargs):
data_reader = reader(data_files, names, batch_size=batch_size,
nb_sample=nb_sample, loop=False, *args, **kwargs)
return read_from(data_reader, nb_sample)
| cangermueller/deepcpg | deepcpg/data/hdf.py | Python | mit | 6,707 |
"""
Link prediction algorithms.
"""
from math import log
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ['resource_allocation_index',
'jaccard_coefficient',
'adamic_adar_index',
'preferential_attachment',
'cn_soundarajan_hopcroft',
'ra_index_soundarajan_hopcroft',
'within_inter_cluster']
def _apply_prediction(G, func, ebunch=None):
"""Applies the given function to each edge in the specified iterable
of edges.
`G` is an instance of :class:`networkx.Graph`.
`func` is a function on two inputs, each of which is a node in the
graph. The function can return anything, but it should return a
value representing a prediction of the likelihood of a "link"
joining the two nodes.
`ebunch` is an iterable of pairs of nodes. If not specified, all
non-edges in the graph `G` will be used.
"""
if ebunch is None:
ebunch = nx.non_edges(G)
return ((u, v, func(u, v)) for u, v in ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def resource_allocation_index(G, ebunch=None):
r"""Compute the resource allocation index of all node pairs in ebunch.
Resource allocation index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Resource allocation index will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their resource allocation index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.75000000'
'(2, 3) -> 0.75000000'
References
----------
.. [1] T. Zhou, L. Lu, Y.-C. Zhang.
Predicting missing links via local information.
Eur. Phys. J. B 71 (2009) 623.
https://arxiv.org/pdf/0901.0553.pdf
"""
def predict(u, v):
return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v))
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def jaccard_coefficient(G, ebunch=None):
r"""Compute the Jaccard coefficient of all node pairs in ebunch.
Jaccard coefficient of nodes `u` and `v` is defined as
.. math::
\frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Jaccard coefficient will be computed for each pair of nodes
given in the iterable. The pairs must be given as 2-tuples
(u, v) where u and v are nodes in the graph. If ebunch is None
then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Jaccard coefficient.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 0.60000000'
'(2, 3) -> 0.60000000'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
union_size = len(set(G[u]) | set(G[v]))
if union_size == 0:
return 0
return len(list(nx.common_neighbors(G, u, v))) / union_size
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def adamic_adar_index(G, ebunch=None):
r"""Compute the Adamic-Adar index of all node pairs in ebunch.
Adamic-Adar index of `u` and `v` is defined as
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|}
where $\Gamma(u)$ denotes the set of neighbors of $u$.
This index leads to zero-division for nodes only connected via self-loops.
It is intended to be used when no self-loops are present.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Adamic-Adar index will be computed for each pair of nodes given
in the iterable. The pairs must be given as 2-tuples (u, v)
where u and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their Adamic-Adar index.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 1) -> 2.16404256'
'(2, 3) -> 2.16404256'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v))
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def preferential_attachment(G, ebunch=None):
r"""Compute the preferential attachment score of all node pairs in ebunch.
Preferential attachment score of `u` and `v` is defined as
.. math::
|\Gamma(u)| |\Gamma(v)|
where $\Gamma(u)$ denotes the set of neighbors of $u$.
Parameters
----------
G : graph
NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
Preferential attachment score will be computed for each pair of
nodes given in the iterable. The pairs must be given as
2-tuples (u, v) where u and v are nodes in the graph. If ebunch
is None then all non-existent edges in the graph will be used.
Default value: None.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their preferential attachment score.
Examples
--------
>>> import networkx as nx
>>> G = nx.complete_graph(5)
>>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %d' % (u, v, p)
...
'(0, 1) -> 16'
'(2, 3) -> 16'
References
----------
.. [1] D. Liben-Nowell, J. Kleinberg.
The Link Prediction Problem for Social Networks (2004).
http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
"""
def predict(u, v):
return G.degree(u) * G.degree(v)
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cn_soundarajan_hopcroft(G, ebunch=None, community='community'):
r"""Count the number of common neighbors of all node pairs in ebunch
using community information.
For two nodes $u$ and $v$, this function computes the number of
common neighbors and bonus one for each common neighbor belonging to
the same community as $u$ and $v$. Mathematically,
.. math::
|\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w)
where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The score will be computed for each pair of nodes given in the
iterable. The pairs must be given as 2-tuples (u, v) where u
and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their score.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(3)
>>> G.nodes[0]['community'] = 0
>>> G.nodes[1]['community'] = 0
>>> G.nodes[2]['community'] = 0
>>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)])
>>> for u, v, p in preds:
... '(%d, %d) -> %d' % (u, v, p)
'(0, 2) -> 2'
References
----------
.. [1] Sucheta Soundarajan and John Hopcroft.
Using community information to improve the precision of link
prediction methods.
In Proceedings of the 21st international conference companion on
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
http://doi.acm.org/10.1145/2187980.2188150
"""
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
cnbors = list(nx.common_neighbors(G, u, v))
neighbors = (sum(_community(G, w, community) == Cu for w in cnbors)
if Cu == Cv else 0)
return len(cnbors) + neighbors
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def ra_index_soundarajan_hopcroft(G, ebunch=None, community='community'):
r"""Compute the resource allocation index of all node pairs in
ebunch using community information.
For two nodes $u$ and $v$, this function computes the resource
allocation index considering only common neighbors belonging to the
same community as $u$ and $v$. Mathematically,
.. math::
\sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|}
where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
neighbors of $u$.
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The score will be computed for each pair of nodes given in the
iterable. The pairs must be given as 2-tuples (u, v) where u
and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their score.
Examples
--------
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
>>> G.nodes[0]['community'] = 0
>>> G.nodes[1]['community'] = 0
>>> G.nodes[2]['community'] = 1
>>> G.nodes[3]['community'] = 0
>>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
'(0, 3) -> 0.50000000'
References
----------
.. [1] Sucheta Soundarajan and John Hopcroft.
Using community information to improve the precision of link
prediction methods.
In Proceedings of the 21st international conference companion on
World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
http://doi.acm.org/10.1145/2187980.2188150
"""
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
if Cu != Cv:
return 0
cnbors = nx.common_neighbors(G, u, v)
return sum(1 / G.degree(w) for w in cnbors
if _community(G, w, community) == Cu)
return _apply_prediction(G, predict, ebunch)
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def within_inter_cluster(G, ebunch=None, delta=0.001, community='community'):
"""Compute the ratio of within- and inter-cluster common neighbors
of all node pairs in ebunch.
For two nodes `u` and `v`, if a common neighbor `w` belongs to the
same community as them, `w` is considered as within-cluster common
neighbor of `u` and `v`. Otherwise, it is considered as
inter-cluster common neighbor of `u` and `v`. The ratio between the
size of the set of within- and inter-cluster common neighbors is
defined as the WIC measure. [1]_
Parameters
----------
G : graph
A NetworkX undirected graph.
ebunch : iterable of node pairs, optional (default = None)
The WIC measure will be computed for each pair of nodes given in
the iterable. The pairs must be given as 2-tuples (u, v) where
u and v are nodes in the graph. If ebunch is None then all
non-existent edges in the graph will be used.
Default value: None.
delta : float, optional (default = 0.001)
Value to prevent division by zero in case there is no
inter-cluster common neighbor between two nodes. See [1]_ for
details. Default value: 0.001.
community : string, optional (default = 'community')
Nodes attribute name containing the community information.
G[u][community] identifies which community u belongs to. Each
node belongs to at most one community. Default value: 'community'.
Returns
-------
piter : iterator
An iterator of 3-tuples in the form (u, v, p) where (u, v) is a
pair of nodes and p is their WIC measure.
Examples
--------
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)])
>>> G.nodes[0]['community'] = 0
>>> G.nodes[1]['community'] = 1
>>> G.nodes[2]['community'] = 0
>>> G.nodes[3]['community'] = 0
>>> G.nodes[4]['community'] = 0
>>> preds = nx.within_inter_cluster(G, [(0, 4)])
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 4) -> 1.99800200'
>>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5)
>>> for u, v, p in preds:
... '(%d, %d) -> %.8f' % (u, v, p)
...
'(0, 4) -> 1.33333333'
References
----------
.. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes.
Link prediction in complex networks based on cluster information.
In Proceedings of the 21st Brazilian conference on Advances in
Artificial Intelligence (SBIA'12)
https://doi.org/10.1007/978-3-642-34459-6_10
"""
if delta <= 0:
raise nx.NetworkXAlgorithmError('Delta must be greater than zero')
def predict(u, v):
Cu = _community(G, u, community)
Cv = _community(G, v, community)
if Cu != Cv:
return 0
cnbors = set(nx.common_neighbors(G, u, v))
within = set(w for w in cnbors
if _community(G, w, community) == Cu)
inter = cnbors - within
return len(within) / (len(inter) + delta)
return _apply_prediction(G, predict, ebunch)
def _community(G, u, community):
"""Get the community of the given node."""
node_u = G.nodes[u]
try:
return node_u[community]
except KeyError:
raise nx.NetworkXAlgorithmError('No community information')
| sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/link_prediction.py | Python | mit | 16,714 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': 'foobar'})
raise AssertionError("Accepted invalid elysium address")
except JSONRPCException as e:
assert("changeAddress must be a valid elysium address" in e.error['message'])
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 2})
except JSONRPCException as e:
assert('changePosition out of bounds' == e.error['message'])
else:
assert(False)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0];
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
if __name__ == '__main__':
RawTransactionsTest().main()
| elysiumd/windows-wallet-13.2 | qa/rpc-tests/fundrawtransaction.py | Python | mit | 28,411 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-20 18:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hamask', '0011_auto_20170615_1520'),
]
operations = [
migrations.AddField(
model_name='workout',
name='day_of_week',
field=models.CharField(blank=True, choices=[('1', 'Sunday'), ('2', 'Monday'), ('3', 'Tuesday'), ('4', 'Wednesday'), ('5', 'Thursday'), ('6', 'Friday'), ('7', 'Saturday')], max_length=30, null=True),
),
migrations.AlterField(
model_name='program',
name='start_date',
field=models.DateField(blank=True, null=True),
),
]
| rawenihcam/BER-SERKR | hamask/migrations/0012_auto_20170620_1435.py | Python | mit | 780 |
import os
import requests
import json
import pandas as pd
import numpy as np
import time
from datetime import datetime
TMDB_KEY = "60027f35df522f00e57a79b9d3568423"
"""
def get_tmdb_id_list():
#function to get all Tmdb_id between 06-16
import requests
import json
# from year 1996-2016
year = range(2006,2017)
## 50 pages
page_num = range(1,50)
id_list = []
tmdb_id_query = "https://api.themoviedb.org/3/discover/movie?" \
+ "api_key=%s" \
+ "&language=en-US&sort_by=release_date.asc" \
+ "&include_adult=false&include_video=false" \
+ "&page=%d" \
+ "&primary_release_year=%d"
for n in page_num:
for yr in year:
rq = requests.get(tmdb_id_query % (TMDB_KEY,n,yr)).json()
for item in rq['results']:
id_list.append(item['id'])
return id_list
start = time.time()
ID_LIST = get_tmdb_id_list()
stop = time.time()
print(ID_LIST)
print(stop - start)
"""
query = "https://api.themoviedb.org/3/movie/%d?" \
+"api_key=%s" \
+"&language=en-US"
movie_id = 78
request = requests.get(query %(movie_id,TMDB_KEY)).json()
| andurilhuang/Movie_Income_Prediction | paper/historycode/toAnna/get_test.py | Python | mit | 1,236 |
"""Start a tcp gateway."""
import click
from mysensors.cli.helper import (
common_gateway_options,
handle_msg,
run_async_gateway,
run_gateway,
)
from mysensors.gateway_tcp import AsyncTCPGateway, TCPGateway
def common_tcp_options(func):
"""Supply common tcp gateway options."""
func = click.option(
"-p",
"--port",
default=5003,
show_default=True,
type=int,
help="TCP port of the connection.",
)(func)
func = click.option(
"-H", "--host", required=True, help="TCP address of the gateway."
)(func)
return func
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def tcp_gateway(**kwargs):
"""Start a tcp gateway."""
gateway = TCPGateway(event_callback=handle_msg, **kwargs)
run_gateway(gateway)
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def async_tcp_gateway(**kwargs):
"""Start an async tcp gateway."""
gateway = AsyncTCPGateway(event_callback=handle_msg, **kwargs)
run_async_gateway(gateway)
| theolind/pymysensors | mysensors/cli/gateway_tcp.py | Python | mit | 1,102 |
# -*- coding: utf-8 -*-
from double_linked import DoubleLinkedList
class Deque(object):
'''Deque is a composition of Double Linked List'''
def __init__(self, input=None):
'''create doubly linked list'''
self.deque = DoubleLinkedList(input)
def append(self, val):
self.deque.append(val)
def append_left(self, val):
self.deque.insert(val)
def pop(self):
return self.deque.pop()
def pop_left(self):
return self.deque.shift()
def peek(self):
try:
return self.deque.head.data
except AttributeError:
return None
def peek_left(self):
try:
return self.deque.tail.data
except AttributeError:
return None
def size(self):
size = 0
current_spot = self.deque.head
while current_spot:
size += 1
current_spot = current_spot.toward_tail
return size
| paulsheridan/data-structures | src/deque.py | Python | mit | 964 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
import os
sourcefiles = [ 'fast_likelihood.pyx']
ext_modules = [Extension("fast_likelihood",
sourcefiles,
include_dirs = [np.get_include()],
extra_compile_args=['-O3', '-fopenmp', '-lc++'],
extra_link_args=['-fopenmp'],
language='c++')]
setup(
name = 'fastgmm',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
) | jeremy-ma/gmmmc | gmmmc/fastgmm/setup_fast_likelihood.py | Python | mit | 597 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, thumbor-community
# Use of this source code is governed by the MIT license that can be
# found in the LICENSE file.
from pyvows import Vows, expect
from tc_shortener.generators.short_generator import Generator
from tc_core.context import Context
from thumbor.config import Config
from thumbor.importer import Importer
@Vows.batch
class ShortGeneratorVows(Vows.Context):
class AShortGenerator(Vows.Context):
def topic(self):
config = Config()
importer = Importer(config)
context = Context(None, config, importer)
return Generator(context)
class WithIncorrectUrl(Vows.Context):
@Vows.capture_error
def topic(self, short_generator):
return short_generator.get('')
def should_raise_error(self, topic):
expect(topic).to_be_an_error_like(ValueError)
class WhenShortening(Vows.Context):
def topic(self, short_generator):
return short_generator.get('/unsafe/200x300/image.jpg')
def should_preserve_image(self, topic):
expect(topic).to_match(r'^.*/image.jpg$')
def should_be_fixed_length(self, topic):
expect(topic).to_length(22+len('/image.jpg')) | thumbor-community/shortener | vows/generators/short_generator_vows.py | Python | mit | 1,326 |
import mutable_attr
import unittest
class T(unittest.TestCase):
def test_foo(self):
mutable_attr.y = 3
| github/codeql | python/ql/test/query-tests/Imports/general/mutates_in_test.py | Python | mit | 117 |
"""This module contains a prototype implementation of the
TT-cross-based minimization procedure
"""
import numpy as np
import math
import tt
from ..maxvol import maxvol
from ..utils.rect_maxvol import rect_maxvol
def reshape(a, sz):
return np.reshape(a, sz, order = 'F')
def mkron(a, b):
return np.kron(a, b)
def mysvd(a, full_matrices=False):
try:
return np.linalg.svd(a, full_matrices)
except:
return np.linalg.svd(a + np.max(np.abs(a).flatten()) * 1e-14 * np.random.randn(a.shape[0], a.shape[1]), full_matrices)
def min_func(fun, bounds_min, bounds_max, d=None, rmax=10, n0=64, nswp=10, verb=True, smooth_fun=None):
"""Find (approximate) minimal value of the function on a d-dimensional grid."""
if d is None:
d = len(bounds_min)
a = np.asanyarray(bounds_min).copy()
b = np.asanyarray(bounds_max).copy()
else:
a = np.ones(d) * bounds_min
b = np.ones(d) * bounds_max
if smooth_fun is None:
smooth_fun = lambda p, lam: (math.pi/2 - np.arctan(p - lam))
#smooth_fun = lambda p, lam: np.exp(-10*(p - lam))
#We do not need to store the cores, only the interfaces!
Rx = [[]] * (d + 1) #Python list for the interfaces
Rx[0] = np.ones((1, 1))
Rx[d] = np.ones((1, 1))
Jy = [np.empty(0)] * (d + 1)
ry = rmax * np.ones(d + 1, dtype = np.int)
ry[0] = 1
ry[d] = 1
n = n0 * np.ones(d, dtype = np.int)
fun_evals = 0
grid = [np.reshape(np.linspace(a[i], b[i], n[i]),(n[i], 1)) for i in xrange(d)]
for i in xrange(d - 1):
#cr1 = y[i]
ry[i + 1] = min(ry[i + 1], n[i] * ry[i])
cr1 = np.random.randn(ry[i], n[i], ry[i + 1])
cr1 = reshape(cr1, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cr1)
ind = maxvol(q)
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
#Jy{i+1} = [kron(ones(n(i),1), Jy{i}), kron((1:n(i))', ones(ry(i),1))];
#Jy{i+1} = Jy{i+1}(ind,:);
swp = 0
dirn = -1
i = d - 1
lm = 999999999999
while swp < nswp:
#Right-to-left sweep
#The idea: compute the current core; compute the function of it;
#Shift locally or globally? Local shift would be the first try
#Compute the current core
if np.size(Jy[i]) == 0:
w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w1 = mkron(np.ones((n[i] * ry[i + 1], 1)), Jy[i])
w2 = mkron(mkron(np.ones((ry[i + 1], 1)), grid[i]), np.ones((ry[i], 1)))
if np.size(Jy[i + 1]) == 0:
w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1)))
J = np.hstack((w1, w2, w3))
#Just add some random indices to J, which is rnr x d, need to make rn (r + r0) x add,
#i.e., just generate random r, random n and random multiindex
cry = fun(J)
fun_evals += cry.size
cry = reshape(cry, (ry[i], n[i], ry[i + 1]))
min_cur = np.min(cry.flatten("F"))
ind_cur = np.argmin(cry.flatten("F"))
if lm > min_cur:
lm = min_cur
x_full = J[ind_cur, :]
val = fun(x_full)
if verb:
print 'New record:', val, 'Point:', x_full, 'fevals:', fun_evals
cry = smooth_fun(cry, lm)
if ( dirn < 0 and i > 0):
cry = reshape(cry, (ry[i], n[i] * ry[i + 1]))
cry = cry.T
#q, r = np.linalg.qr(cry)
u, s, v = mysvd(cry, full_matrices=False)
ry[i] = min(ry[i], rmax)
q = u[:, :ry[i]]
ind = rect_maxvol(q)[0]#maxvol(q)
ry[i] = ind.size
w1 = mkron(np.ones((ry[i + 1], 1)), grid[i])
if np.size(Jy[i + 1]) == 0:
w2 = np.zeros((n[i] * ry[i + 1], 0))
else:
w2 = mkron(Jy[i + 1], np.ones((n[i], 1)))
Jy[i] = np.hstack((w1, w2))
Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1))
Jy[i] = Jy[i][ind, :]
if ( dirn > 0 and i < d - 1):
cry = reshape(cry, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cry)
#ind = maxvol(q)
ind = rect_maxvol(q)[0]
ry[i + 1] = ind.size
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
i += dirn
if i == d or i == -1:
dirn = -dirn
i += dirn
swp = swp + 1
return val, x_full
def min_tens(tens, rmax=10, nswp=10, verb=True, smooth_fun=None):
"""Find (approximate) minimal element in a TT-tensor."""
if smooth_fun is None:
smooth_fun = lambda p, lam: (math.pi/2 - np.arctan(p - lam))
d = tens.d
Rx = [[]] * (d + 1) # Python list for the interfaces
Rx[0] = np.ones((1, 1))
Rx[d] = np.ones((1, 1))
Jy = [np.empty(0)] * (d + 1)
ry = rmax * np.ones(d + 1, dtype=np.int)
ry[0] = 1
ry[d] = 1
n = tens.n
elements_seen = 0
phi_left = [np.empty(0)] * (d + 1)
phi_left[0] = np.array([1])
phi_right = [np.empty(0)] * (d + 1)
phi_right[d] = np.array([1])
cores = tt.tensor.to_list(tens)
# Fill initial multiindex J randomly.
grid = [np.reshape(range(n[i]), (n[i], 1)) for i in xrange(d)]
for i in xrange(d - 1):
ry[i + 1] = min(ry[i + 1], n[i] * ry[i])
ind = sorted(np.random.permutation(ry[i] * n[i])[0:ry[i + 1]])
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1)
phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1))
phi_left[i + 1] = phi_left[i + 1][ind, :]
swp = 0
dirn = -1
i = d - 1
lm = 999999999999
while swp < nswp:
#Right-to-left sweep
#The idea: compute the current core; compute the function of it;
#Shift locally or globally? Local shift would be the first try
#Compute the current core
if np.size(Jy[i]) == 0:
w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w1 = mkron(np.ones((n[i] * ry[i + 1], 1)), Jy[i])
w2 = mkron(mkron(np.ones((ry[i + 1], 1)), grid[i]), np.ones((ry[i], 1)))
if np.size(Jy[i + 1]) == 0:
w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0))
else:
w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1)))
J = np.hstack((w1, w2, w3))
phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1)
phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1]))
cry = np.tensordot(phi_left[i], np.tensordot(cores[i], phi_right[i + 1], 1), 1)
elements_seen += cry.size
cry = reshape(cry, (ry[i], n[i], ry[i + 1]))
min_cur = np.min(cry.flatten("F"))
ind_cur = np.argmin(cry.flatten("F"))
if lm > min_cur:
lm = min_cur
x_full = J[ind_cur, :]
val = tens[x_full]
if verb:
print 'New record:', val, 'Point:', x_full, 'elements seen:', elements_seen
cry = smooth_fun(cry, lm)
if dirn < 0 and i > 0:
cry = reshape(cry, (ry[i], n[i] * ry[i + 1]))
cry = cry.T
#q, r = np.linalg.qr(cry)
u, s, v = mysvd(cry, full_matrices=False)
ry[i] = min(ry[i], rmax)
q = u[:, :ry[i]]
ind = rect_maxvol(q)[0]#maxvol(q)
ry[i] = ind.size
w1 = mkron(np.ones((ry[i + 1], 1)), grid[i])
if np.size(Jy[i + 1]) == 0:
w2 = np.zeros((n[i] * ry[i + 1], 0))
else:
w2 = mkron(Jy[i + 1], np.ones((n[i], 1)))
Jy[i] = np.hstack((w1, w2))
Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1))
Jy[i] = Jy[i][ind, :]
phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1)
phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1]))
phi_right[i] = phi_right[i][:, ind]
if dirn > 0 and i < d - 1:
cry = reshape(cry, (ry[i] * n[i], ry[i + 1]))
q, r = np.linalg.qr(cry)
#ind = maxvol(q)
ind = rect_maxvol(q)[0]
ry[i + 1] = ind.size
phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1)
phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1))
phi_left[i + 1] = phi_left[i + 1][ind, :]
w1 = mkron(np.ones((n[i], 1)), Jy[i])
w2 = mkron(grid[i], np.ones((ry[i], 1)))
Jy[i + 1] = np.hstack((w1, w2))
Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1))
Jy[i + 1] = Jy[i + 1][ind, :]
i += dirn
if i == d or i == -1:
dirn = -dirn
i += dirn
swp = swp + 1
return val, x_full
| gasongjian/ttpy | tt/optimize/tt_min.py | Python | mit | 9,368 |
# coding: utf-8
"""
labeled_images文件夹中:
1. 包含的文件夹名为标记名
2. 标记名下的文件夹中包含了学习图片
"""
import os
from sklearn import svm
from PIL import Image
from numpy import array
from utils import *
clf = None
def get_image_fit_data(dir_name):
"""读取labeled_images文件夹的图片,返回图片的特征矩阵及相应标记"""
X = []
Y = []
name_list = os.listdir(dir_name)
for name in name_list:
if not os.path.isdir(os.path.join(dir_name, name)):
continue
image_files = os.listdir(os.path.join(dir_name, name))
for img in image_files:
i = Image.open(os.path.join(dir_name, name, img))
X.append(array(i).flatten())
Y.append(name)
return X, Y
def get_classifier_from_learn():
"""学习数据获取分类器"""
global clf
if not clf:
clf = svm.SVC()
X, Y = get_image_fit_data("labeled_images")
clf.fit(X, Y)
return clf
def main():
clf = get_classifier_from_learn()
print(clf)
PX, PY = get_image_fit_data("predict_images")
for x, y in zip(PX, PY):
r = clf.predict(x.reshape(1, -1))
print(r, y)
if __name__ == '__main__':
main()
| mythkiven/python | demo/CET查询/learn_images.py | Python | mit | 1,312 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext
from django.utils import simplejson as json
from django.conf import settings
from StringIO import StringIO
from gitdb import IStream
from git import *
from git.exc import InvalidGitRepositoryError
from collections import defaultdict
from datetime import datetime
import os
_hook = """#!/bin/sh
cd ..
env -i git reset --hard > /dev/null 2>&1
env -i git update-index > /dev/null 2>&1
"""
def _do_commit(repo, path, content, commit_msg=None):
""" Do a commit """
# Create the blob object
stream = StringIO(content.encode('utf-8'))
stream.seek(0, 2)
streamlen = stream.tell()
stream.seek(0)
istream = IStream('blob', streamlen, stream)
# Add it to the repository object database
repo.odb.store(istream)
# Create the corresponding Blob object
blob = Blob(repo, istream.binsha, Blob.file_mode, path.encode('utf-8'))
# Add blob to the index
repo.index.add([IndexEntry.from_blob(blob)])
if not commit_msg:
commit_msg = ugettext(u'Update Wiki: {0}').format(path).encode('utf-8')
repo.index.commit(commit_msg)
class Repository(object):
""" Repository object. """
@classmethod
def new(cls, gitdir):
""" Initialize a repository and create the root commit """
# Create repository
if os.path.exists(gitdir.encode('utf-8')):
return cls(gitdir)
repo = Repo.init(gitdir.encode('utf-8'))
repo.config_writer().set_value('receive', 'denyCurrentBranch', 'ignore')
# Create hook to automatically update when we receive commits from clients
post_receive = os.path.join(gitdir, '.git', 'hooks', 'post-receive')
with open(post_receive, 'w') as f:
f.write(_hook)
os.chmod(post_receive, 0775)
# Create the initial commit
_do_commit(repo, u'{0}.md'.format(settings.WIKI_INDEX), '# Home', commit_msg=ugettext(u'Initialize'))
return cls(gitdir)
def __init__(self, gitdir):
""" Initialize repository. """
self.repo = Repo(gitdir.encode('utf-8'))
self.gitdir = gitdir
self.parse()
@property
def git(self):
return self.repo.git
@property
def head(self):
return self.repo.head
def parse(self):
""" Parse Tree and Blob objects. """
# Do git reset --hard and git update-index
self.repo.head.reset(index=True, working_tree=True)
self.repo.git.update_index()
self.repo_tree = self.repo.tree()
self.entries = [e for e in self.repo_tree.traverse()]
self.blobs = [b for b in self.entries if isinstance(b, Blob)]
self.trees = [self.repo_tree] + [t for t in self.entries if isinstance(t, Tree)]
def exists(self, path):
""" Check if path exists in repository. """
if path == self.repo_tree.path:
return True
for e in self.entries:
if path == e.path:
return True
return False
def is_dir(self, path):
""" Check if path is a directory. """
for t in self.trees:
if path == t.path:
return True
return False
def get_file_mimetype(self, path):
""" Get mimetype of file stored in ``path``. """
if self.is_dir(path):
return 'inode/directory'
for blob in self.blobs:
if blob.path == path:
return blob.mime_type
def set_content(self, path, content, commit_msg=None):
""" Add new content in ``path``. """
_do_commit(self.repo, path, content, commit_msg)
# Update internal informations
self.parse()
def put_uploaded_file(self, path, ufile, commit_msg=None):
""" Put an uploaded file to the repository. """
# Re-parse to be sure
self.parse()
# Get absolute path to the file
abspath = os.path.join(self.gitdir, path)
# Make directory for the file
try:
os.makedirs(os.path.dirname(abspath))
except OSError:
pass
# Write the file
with open(abspath, 'wb') as f:
for chunk in ufile.chunks():
f.write(chunk)
# Add it to the repository
import sys
print >>sys.stderr, type(path), path
self.repo.index.add([path.encode('utf-8')])
# And commit
if not commit_msg:
commit_msg = ugettext(u'Upload document: {0}').format(path).encode('utf-8')
self.repo.index.commit(commit_msg)
# Update internal informations
self.parse()
def get_content(self, path):
""" Get content of file stored in ``path``. """
for blob in self.blobs:
if blob.path == path:
return blob.data_stream.read(), blob.name, blob.mime_type
def rm_content(self, path):
""" Remove file located at ``path``. """
self.repo.index.remove([path.encode('utf-8')])
self.repo.index.commit(ugettext(u'Update Wiki: {0} deleted'.format(path)).encode('utf-8'))
self.parse()
def commit(self, message):
""" Create an empty commit """
c = Commit.create_from_tree(self.repo, self.repo.tree(), message, head=True)
def get_folder_tree(self, path):
""" Get list of files contained in ``path``. """
for tree in self.trees:
if tree.path == path:
ret = []
ret = ret + [{'path': b.path, 'name': b.name, 'type': b.mime_type} for b in tree.blobs]
ret = ret + [{'path': t.path, 'name': t.name, 'type': 'inode/directory'} for t in tree.trees]
return ret
def get_file_history(self, path):
""" Get history for a file """
return [self.repo.commit(line.split(' ', 1)[0]) for line in self.repo.git.log('--pretty=oneline', '--', path.encode('utf-8')).splitlines()]
def get_history(self, limit=None):
""" Get repository's history """
if limit:
return [self.repo.commit(line.split(' ', 1)[0]) for line in self.repo.git.log('--pretty=oneline', '-{0}'.format(limit)).splitlines()]
return [self.repo.commit(line.split(' ', 1)[0]) for line in self.repo.git.log('--pretty=oneline').splitlines()]
def get_file_diffs(self, path):
""" Get diffs for a file """
diffs = {'diffs': []}
if self.exists(path):
commits = self.get_file_history(path)
for c in commits:
diff = {
'msg': c.message,
'date': datetime.fromtimestamp(c.authored_date),
'author': c.author.name,
'sha': c.hexsha,
'path': path,
}
if c.parents:
diff['parent_sha'] = c.parents[0].hexsha
diffs['diffs'].append(diff)
return diffs
def get_diffs(self, limit=None):
""" Return repository's diffs. """
commits = self.get_history(limit=limit)
diffs = {'diffs': []}
for c in commits:
diff = {
'msg': c.message,
'date': datetime.fromtimestamp(c.authored_date),
'author': c.author.name,
'sha': c.hexsha
}
if c.parents:
diff['parent_sha'] = c.parents[0].hexsha
diffs['diffs'].append(diff)
return diffs
def get_tree(self):
""" Get full tree of repository as json. """
ret = {'node': {
'name': '/',
'path': '/',
'type': 'tree',
'children': []
}}
# Get all paths from the repository
for e in self.entries:
spath = e.path.split('/')
# We do not want the __media__ in our tree
if spath[0] == '__media__':
continue
node = ret['node']
# Build tree before inserting node
for d in spath[:-1]:
new_node = {'node': {
'name': d,
'path': e.path,
'type': 'tree',
'children': []
}}
# Search if the node is already in the tree
for n in node['children']:
if d == n['node']['name']:
new_node = n
break
# If not, just add it
else:
node['children'].append(new_node)
# Up level
node = new_node['node']
if isinstance(e, Tree):
new_node = {'node': {
'name': e.name,
'path': e.path,
'type': 'tree',
'children': []
}}
else:
new_node = {'node': {
'name': e.name,
'path': e.path,
'type': 'file'
}}
node['children'].append(new_node)
return ret
def get_json_tree(self):
return json.dumps(self.get_tree())
def search(self, pattern):
""" Search for a pattern inside the repository and returns the list of results. """
results = []
# Do the search
try:
out = self.git.grep('-i', '-I', '--cached', pattern.encode('utf-8'))
except GitCommandError:
# No results found
return []
for line in out.splitlines():
# Exclude __media__
if not line.startswith('__media__'):
sep = line.find(':')
url = line[:sep]
match = line[sep + 1:]
# Remove markdown extension
if url.endswith('.md'):
url = url[:url.rfind('.md')]
# Append to the results
results.append ((url, match))
# Group results
groups = defaultdict(list)
for result in results:
groups[result[0]].append(result[1])
results = groups.items()
return results
| 9h37/pompadour-wiki | pompadour_wiki/pompadour_wiki/apps/utils/git_db.py | Python | mit | 10,263 |
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg") # important to call this right after
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib import style
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
try:
import Tkinter as tk
import ttk
import tkFont
import tkMessageBox
except:
import tkinter as tk
from tkinter import ttk
from tkinter import font as tkFont
from tkinter import messagebox as tkMessageBox
from auquanToolbox.metrics import metrics, baseline
def loadgui(back_data, exchange, base_index, budget, logger):
######################
# Setup data
######################
position = back_data['POSITION']
close = back_data['CLOSE']
# position as % of total portfolio
long_position = (position * close).div(back_data['VALUE'], axis=0)
short_position = long_position.copy()
long_position[long_position < 0] = 0
short_position[short_position > 0] = 0
daily_pnl = back_data['DAILY_PNL'] / budget
total_pnl = back_data['TOTAL_PNL'] / budget
if base_index:
baseline_data = baseline(exchange, base_index, total_pnl.index, logger)
stats = metrics(daily_pnl, total_pnl, baseline_data, base_index)
else:
baseline_data = {}
stats = metrics(daily_pnl, total_pnl, {}, base_index)
daily_return = daily_pnl.sum(axis=1)
total_return = total_pnl.sum(axis=1)
long_exposure = long_position.sum(axis=1)
short_exposure = short_position.sum(axis=1)
zero_line = np.zeros(daily_pnl.index.size)
# print to logger
for x in stats.keys():
logger.info('%s : %0.2f' % (x, stats[x]))
def isDate(val):
# Function to validate if a given entry is valid date
try:
d = pd.to_datetime(val)
if d > daily_pnl.index[0] and d < daily_pnl.index[-1]:
return True
else:
return False
except ValueError:
raise ValueError("Not a Valid Date")
return False
def newselection(event):
# Function to autoupdate chart on new selection from dropdown
i = dropdown.current()
market = ['TOTAL PORTFOLIO'] + daily_pnl.columns.values.tolist()
plot(daily_pnl, total_pnl, long_position, short_position, baseline_data, base_index, market[i], box_value2.get(), box_value3.get())
def plot(daily_pnl, total_pnl, long_position, short_position,
baseline_data, base_index, market='TOTAL PORTFOLIO',
start=daily_pnl.index.format()[0],
end=daily_pnl.index.format()[-1]):
# New plot when custom fields are changed
plt.clf()
# plt.style.use("seaborn-whitegrid")
daily_pnl = daily_pnl.loc[start:end]
total_pnl = total_pnl.loc[start:end]
long_position = long_position.loc[start:end]
short_position = short_position.loc[start:end]
if market == 'TOTAL PORTFOLIO':
daily_return = daily_pnl.sum(axis=1)
total_return = total_pnl.sum(axis=1)
long_exposure = long_position.sum(axis=1)
short_exposure = short_position.sum(axis=1)
else:
daily_return = daily_pnl[market]
total_return = total_pnl[market]
long_exposure = long_position[market]
short_exposure = short_position[market]
zero_line = np.zeros(daily_pnl.index.size)
# f, plot_arr = plt.subplots(3, sharex=True)
total_plot = plt.subplot2grid((10, 8), (0, 0), colspan=12, rowspan=4)
daily_plot = plt.subplot2grid((10, 8), (5, 0), colspan=12, rowspan=2, sharex=total_plot)
position_plot = plt.subplot2grid((10, 8), (8, 0), colspan=12, rowspan=2, sharex=total_plot)
ind = np.arange(len(daily_pnl.index))
total_plot.set_title('Total PnL')
total_plot.plot(ind, zero_line, 'k')
total_plot.plot(ind, total_return.values, 'b', linewidth=0.5, label='strategy')
total_plot.legend(loc='upper left')
total_plot.autoscale(tight=True)
plt.setp(total_plot.get_xticklabels(), visible=False)
total_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
total_plot.set_ylabel('Cumulative Performance')
total_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
if base_index:
total_plot.plot(ind, baseline_data['TOTAL_PNL'], 'g', linewidth=0.5, label=base_index)
daily_plot.set_title('Daily PnL')
daily_plot.plot(ind, zero_line, 'k')
daily_plot.bar(ind, daily_return.values, 0.2, align='center', color='c', label='strategy')
daily_plot.legend(loc='upper left')
daily_plot.autoscale(tight=True)
plt.setp(daily_plot.get_xticklabels(), visible=False)
daily_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
daily_plot.set_ylabel('Daily Performance')
daily_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
position_plot.set_title('Daily Exposure')
position_plot.plot(ind, zero_line, 'k')
position_plot.bar(ind, short_exposure.values, 0.3, linewidth=0, align='center', color='r', label='short')
position_plot.bar(ind, long_exposure.values, 0.3, linewidth=0, align='center', color='b', label='long')
position_plot.legend(loc='upper left')
position_plot.autoscale(tight=True)
position_plot.xaxis.set_major_formatter(mtick.FuncFormatter(format_date))
position_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
position_plot.set_ylabel('Long/Short %')
position_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
plt.gcf().canvas.draw()
def update_plot():
# Callback Function for plot button
try:
d1 = pd.to_datetime(box_value2.get())
d2 = pd.to_datetime(box_value3.get())
if d1 >= daily_pnl.index[0] and d2 <= daily_pnl.index[-1]:
plot(daily_pnl, total_pnl, long_position, short_position, baseline_data, base_index, box_value.get(), box_value2.get(), box_value3.get())
else:
tkMessageBox.showinfo("Date out of Range", "Please enter a date from %s to %s" % (daily_pnl.index[0].strftime('%Y-%m-%d'), daily_pnl.index[-1].strftime('%Y-%m-%d')))
except ValueError:
raise ValueError("Not a Valid Date")
def close_window():
# Callback function for Quit Button
GUI.destroy()
GUI.quit()
def format_date(x, pos=None):
# Format axis ticklabels to dates
thisind = np.clip(int(x + 0.5), 0, len(daily_pnl.index) - 1)
return daily_pnl.index[thisind].strftime('%b-%y')
def format_perc(y, pos=None):
# Format axis ticklabels to %
if budget > 1:
return '{percent:.2%}'.format(percent=y)
else:
return y
def onFrameConfigure(canvas):
canvas.configure(scrollregion=canvas.bbox("all"))
######################
# GUI mainloop
######################
# Create widget
GUI = tk.Tk()
GUI.title('Backtest Results')
winCanvas = tk.Canvas(GUI, borderwidth=0, background="#ffffff", width=1500, height=1000)
frame = tk.Frame(winCanvas, background="#ffffff")
vsb = tk.Scrollbar(GUI, orient="vertical", command=winCanvas.yview)
hsb = tk.Scrollbar(GUI, orient="horizontal", command=winCanvas.xview)
winCanvas.configure(yscrollcommand=vsb.set)
winCanvas.configure(xscrollcommand=hsb.set)
vsb.pack(side="left", fill="y")
hsb.pack(side="bottom", fill="x")
winCanvas.pack(side="right", fill="both", expand=True)
winCanvas.create_window((50, 50), window=frame, anchor="nw")
frame.bind("<Configure>", lambda event, canvas=winCanvas: onFrameConfigure(winCanvas))
# Create dropdown for market
Label_1 = tk.Label(frame, text="Trading Performance:")
Label_1.grid(row=0, column=0, sticky=tk.EW)
box_value = tk.StringVar()
dropdown = ttk.Combobox(frame, textvariable=box_value, state='readonly')
dropdown['values'] = ['TOTAL PORTFOLIO'] + daily_pnl.columns.values.tolist()
dropdown.grid(row=0, column=1, sticky=tk.EW)
dropdown.current(0)
dropdown.bind('<<ComboboxSelected>>', newselection)
# Create entry field for start date
Label_2 = tk.Label(frame, text="Start Date")
Label_2.grid(row=0, column=2, sticky=tk.EW)
box_value2 = tk.StringVar(frame, value=daily_pnl.index.format()[0])
start = tk.Entry(frame, textvariable=box_value2, validate='key', validatecommand=(GUI.register(isDate), '%P'))
start.grid(row=0, column=3, sticky=tk.EW)
# Create entry field for end date
Label_3 = tk.Label(frame, text="End Date")
Label_3.grid(row=0, column=4, sticky=tk.EW)
box_value3 = tk.StringVar(frame, value=daily_pnl.index.format()[-1])
end = tk.Entry(frame, textvariable=box_value3, validate='key', validatecommand=(GUI.register(isDate), '%P'))
end.grid(row=0, column=5, sticky=tk.EW)
# Create Plot button to reload chart
button1 = tk.Button(frame, text='PLOT', command=update_plot)
button1.grid(row=0, column=6, sticky=tk.EW)
# Create text widget with backtest results
customFont1 = tkFont.Font(family="Helvetica", size=9, weight="bold")
customFont2 = tkFont.Font(family="Helvetica", size=12)
text = tk.Text(frame, height=3, width=50, wrap=tk.WORD, bd=5, padx=10, pady=5)
text.grid(row=1, column=0, columnspan=7, sticky=tk.EW)
String1 = ''
String2 = ''
for y in stats.keys():
String1 = String1 + y + '\t\t'
x = stats[y]
if budget > 1 and 'Ratio' not in y:
String2 = String2 + '{percent:.2%}'.format(percent=x) + '\t\t'
else:
String2 = String2 + '%0.2f' % x + '\t\t'
text.insert(tk.END, String1)
text.insert(tk.END, '\n')
text.insert(tk.END, String2)
text.tag_add("keys", "1.0", "1.end")
text.tag_config("keys", font=customFont1)
text.tag_add("values", "2.0", "2.end")
text.tag_config("values", foreground="red", font=customFont2)
# Create canvas to plot chart
f = plt.figure(figsize=(16, 8))
canvas = FigureCanvasTkAgg(f, master=frame)
canvas.get_tk_widget().grid(row=2, column=0, columnspan=7, rowspan=1, sticky=tk.NSEW)
toolbar_frame = tk.Frame(frame)
toolbar_frame.grid(row=4, column=0, columnspan=7)
# plot 3 subplots for total position, daily position and exposure
plt.style.use("seaborn-whitegrid")
total_plot = plt.subplot2grid((10, 8), (0, 0), colspan=12, rowspan=4)
daily_plot = plt.subplot2grid((10, 8), (5, 0), colspan=12, rowspan=2, sharex=total_plot)
position_plot = plt.subplot2grid((10, 8), (8, 0), colspan=12, rowspan=2, sharex=total_plot)
ind = np.arange(len(daily_pnl.index))
total_plot.set_title('Total PnL')
total_plot.plot(ind, zero_line, 'k')
total_plot.plot(ind, total_return.values, 'b', linewidth=0.5, label='strategy')
total_plot.legend(loc='upper left')
total_plot.autoscale(tight=True)
plt.setp(total_plot.get_xticklabels(), visible=False)
total_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
total_plot.set_ylabel('Cumulative Performance')
total_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
if base_index:
total_plot.plot(ind, baseline_data['TOTAL_PNL'], 'g', linewidth=0.5, label=base_index)
daily_plot.set_title('Daily PnL')
daily_plot.plot(ind, zero_line, 'k')
daily_plot.bar(ind, daily_return.values, 0.2, align='center', color='c', label='strategy')
daily_plot.legend(loc='upper left')
daily_plot.autoscale(tight=True)
plt.setp(daily_plot.get_xticklabels(), visible=False)
daily_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
daily_plot.set_ylabel('Daily Performance')
daily_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
position_plot.set_title('Daily Exposure')
position_plot.plot(ind, zero_line, 'k')
position_plot.bar(ind, short_exposure.values, 0.3, linewidth=0, align='center', color='r', label='short')
position_plot.bar(ind, long_exposure.values, 0.3, linewidth=0, align='center', color='b', label='long')
position_plot.legend(loc='upper left')
position_plot.autoscale(tight=True)
position_plot.xaxis.set_major_formatter(mtick.FuncFormatter(format_date))
position_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
position_plot.set_ylabel('Long/Short')
position_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
plt.gcf().canvas.draw()
# Create Quit Button
button2 = tk.Button(frame, text='QUIT', command=close_window)
button2.grid(row=4, column=6, sticky=tk.EW)
GUI.mainloop()
| Auquan/auquan-toolbox-python | auquanToolbox/resultviewer.py | Python | mit | 13,093 |
# -*- coding: utf-8 -*-
"""Module providing base class migration for blog entry content"""
import lxml
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
from meetshaus.blog.blogpost import IBlogPost
from plone import api
from plone.portlets.interfaces import ILocalPortletAssignable, IPortletManager, \
IPortletAssignmentMapping
from zope.component import getMultiAdapter, getUtility
from zope.lifecycleevent import modified
from meetshaus.blog.blogentry import IBlogEntry
class BlogMigrationView(BrowserView):
""" Migrate blog content
Move blog entries to folderish blog posting content types and
transfer the associated images to the folder content
"""
def __call__(self):
self.has_blog_entries = len(self.blog_entries()) > 0
return self.render()
def render(self):
return self.index()
def blog_entries(self):
items = api.content.find(
context=api.portal.get(),
object_provides=IBlogEntry,
sort_on='effective',
sort_order='reverse'
)
return items
def blog_entries_count(self):
return len(self.blog_entries())
def used_image_assets(self, uuid):
item = api.content.get(UID=uuid)
html_body = item.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
image_idx = len(images)
return image_idx
class BlogMigrationRunnerView(BrowserView):
""" Blog migration runner """
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}/@@migration-finished?_authenticator={1}'.format(
base_url, authenticator.token())
self._migrate_blog_posts()
modified(context)
context.reindexObject(idxs='modified')
return self.request.response.redirect(next_url)
def _migrate_blog_posts(self):
context = aq_inner(self.context)
migrated = []
not_migrated = []
results = api.content.find(
context=api.portal.get(),
object_provides=IBlogEntry
)
for brain in results:
obj = brain.getObject()
html_body = obj.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
img_list = list()
if images:
for i in images:
img_src = i.attrib['src']
if img_src.startswith('resolve'):
uuid = img_src.split('/')[1]
img_list.append(uuid)
new_item = api.content.create(
type='meetshaus.blog.blogpost',
title=obj.Title(),
description=obj.Description(),
container=context
)
setattr(new_item, 'Subject', obj.Subject())
setattr(new_item, 'text', obj.text)
api.content.transition(obj=new_item, transition='publish')
effective = obj.EffectiveDate()
new_item.setEffectiveDate(effective)
modified(new_item)
new_item.reindexObject(idxs='modified')
# for img_uid in img_list:
# img_obj = api.content.get(UID=img_uid)
# api.content.move(source=img_obj, target=new_item)
migrated.append(obj.UID())
info_message_template = 'There are {0} objects migrated.'
warn_message_template = 'There are {0} objects not migrated.'
if migrated:
msg = info_message_template.format(len(migrated))
if not_migrated:
msg = warn_message_template.format(len(not_migrated))
api.portal.show_message(
message=msg,
request=self.request
)
return len(migrated)
class BlogMigrationFinishedView(BrowserView):
""" Migration done """
def __call__(self):
return self.render()
def render(self):
return self.index()
class GatherAssetsView(BrowserView):
""" Gather image assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._gather_assets()
modified(context)
context.reindexObject(idxs='modified')
return self.request.response.redirect(next_url)
def _collect_assets(self):
context = aq_inner(self.context)
html_body = context.text.raw
xhtml = lxml.html.document_fromstring(html_body)
images = xhtml.xpath('//img')
img_list = list()
if images:
for i in images:
img_src = i.attrib['src']
if img_src.startswith('resolve'):
uuid = img_src.split('/')[1]
img_list.append(uuid)
return img_list
def _gather_assets(self):
context = aq_inner(self.context)
migrated = 0
contained_images = self._collect_assets()
for uuid in contained_images:
image = api.content.get(UID=uuid)
try:
api.content.move(source=image, target=context)
migrated += 1
except:
# catch potential errors beforehand and debug
import pdb; pdb.set_trace()
pass
modified(context)
context.reindexObject(idxs='modified')
return migrated
class CollectAssets(BrowserView):
""" Collect all assigned images and assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._collect_assets()
return self.request.response.redirect(next_url)
@staticmethod
def _collect_assets():
results = api.content.find(
context=api.portal.get(),
object_provides=IBlogPost
)
for brain in results:
context = brain.getObject()
context.restrictedTraverse('@@gather-assets')()
return
class RemovePortletAssignments(BrowserView):
""" Gather image assets and move to current context"""
def __call__(self):
return self.render()
def render(self):
context = aq_inner(self.context)
base_url = context.absolute_url()
authenticator = getMultiAdapter((context, self.request),
name=u"authenticator")
next_url = '{0}?_authenticator={1}'.format(
base_url, authenticator.token())
self._cleanup_assignments()
return self.request.response.redirect(next_url)
@staticmethod
def _cleanup_assignments():
catalog = api.portal.get_tool('portal_catalog')
all_brains = catalog.searchResults()
for i in all_brains:
obj = i.getObject()
if not ILocalPortletAssignable.providedBy(obj):
continue
for manager_name in ('plone.leftcolumn','plone.rightcolumn'):
manager = getUtility(IPortletManager, name=manager_name)
assignment_mapping = getMultiAdapter((obj, manager),
IPortletAssignmentMapping)
for item in list(assignment_mapping.keys()):
del assignment_mapping[item]
| potzenheimer/meetshaus | src/meetshaus.blog/meetshaus/blog/browser/migration.py | Python | mit | 8,180 |
#!/usr/bin/python
################
# The MIT License (MIT)
#
# Copyright (c) <2013> <Martin de Bruyn>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
############################################################
#----------------------------------------------------------------------#
"""@ package Input
Keep all inputs here.
"""
# System imports
import logging as log
import sys
# Panda imports
from direct.showbase.InputStateGlobal import inputState
from direct.showbase.DirectObject import DirectObject
# MeoTech imports
#----------------------------------------------------------------------#
class InputHandler(DirectObject):
"""InputHandler.
Keyboard stuff
"""
def __init__(self, _game):
"""InputHandler INIT"""
# Game
self.game = _game
# Keyboard
inputState.watchWithModifiers('forward', 'w')
inputState.watchWithModifiers('left', 'a')
inputState.watchWithModifiers('reverse', 's')
inputState.watchWithModifiers('right', 'd')
inputState.watchWithModifiers('turnLeft', 'q')
inputState.watchWithModifiers('turnRight', 'e')
inputState.watchWithModifiers('space', 'space')
#inputState.watchWithModifiers('ctrl', 'lcontrol_down')
self.accept("mouse1", self.shootLight)
# App exit temp
base.accept("escape", sys.exit)
# mouse
self.winXhalf = base.win.getXSize()/2
self.winYhalf = base.win.getYSize()/2
# Should move the camera stuff to the baseCamera.py
base.camera.reparentTo(self.game.meotech.engine.GameObjects["player"].bulletBody)
base.camLens.setFov(90)
base.camLens.setNear(0.5)
self.mouseSpeedX = 15
self.mouseSpeedY = 0.2
self.camP = 10
def shootLight(self):
print "shoot"
cone = self.game.player.flashlightConeBody
base.messenger.send("shootLight", [cone])
def getMouse(self, dt):
player = self.game.meotech.engine.GameObjects["player"]
flashlight = self.game.player.flashlightConeBody
flashlight_lamp = self.game.player.flashlight
flashlight_light = self.game.player.flashlightLight
# Handle mouse
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
omega = (x - self.winXhalf)*-self.mouseSpeedX
player.bulletBody.node().setAngularMovement(omega)
#flashlight.setH(flashlight, base.camera.getH())
cam = base.cam.getP() - (y - self.winYhalf) * self.mouseSpeedY
flashlight.setHpr(base.cam.getHpr())
if cam <-80:
cam = -80
elif cam > 90:
cam = 90
base.cam.setP(cam)
flashlight.setP(cam + 90)
flashlight_lamp.setZ(flashlight.getZ() - 0.6)
flashlight_lamp.setY(flashlight.getY() - 0.55)
flashlight_light.setHpr(flashlight_lamp.find("LightPos").getHpr() + 90)
| grimfang/quickShadows | src/game/input.py | Python | mit | 4,080 |
"""
WSGI config for cv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cv.settings")
application = get_wsgi_application()
| cthtuf/django-cv | cv/wsgi.py | Python | mit | 381 |
import json
import math
__author__ = 'apostol3'
class Map:
def __init__(self, w, h):
self.max_time = 120
self.size = (w, h)
self.walls = []
self.headline = []
self.cars = []
self.finish = []
self.objects = []
self.car_size = (1.8/2, 4.6/2)
def start_new_wall(self):
self.walls.append([])
def append_wall_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
self.start_new_wall()
return
self.walls[-1].append((x, y))
def append_headline_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
return
self.headline.append((x, y))
def create_car(self, x, y):
self.cars.append((x, y, 3 * math.pi / 2))
def append_finish_point(self, x, y):
if x > self.size[0] or y > self.size[1]:
self.finish.clear()
if len(self.finish) < 2:
self.finish.append((x, y))
else:
self.finish = [(x, y)]
@staticmethod
def open_from_file(file):
f = open(file, 'r')
doc = json.load(f)
f.close()
size = doc['size']
map = Map(*size)
map.max_time = doc['max_time']
map.walls = doc['walls']
map.finish = doc['finish']
map.headline = doc['headline']
map.cars = doc['cars']
return map
def save_to_file(self, file):
filename = open(file, 'w')
doc = {'size': self.size, 'max_time': self.max_time, 'finish': self.finish,
'walls': self.walls, 'headline': self.headline, 'cars': self.cars}
if len(doc['walls']) != 0 and len(doc['walls'][-1]) == 0:
doc['walls'].pop()
out_inf = json.dumps(doc, indent=4)
filename.write(out_inf)
filename.close()
| Apostol3/race_env_editor | map.py | Python | mit | 1,833 |
from gym import envs
print(envs.registry.all()) | GitYiheng/reinforcement_learning_test | test00_previous_files/gym_test5.py | Python | mit | 47 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Usuarios',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=225, verbose_name='Nombre')),
('middle_name', models.CharField(max_length=225, verbose_name='Apellido Paterno')),
('last_name', models.CharField(max_length=225, null=True, verbose_name='Apellido Materno', blank=True)),
('email', models.EmailField(unique=True, max_length=225, verbose_name='Correo')),
('user_name', models.CharField(unique=True, max_length=225, verbose_name='Nombre de Usuario')),
('password', models.CharField(max_length=225, verbose_name='Contrasenia')),
('user_type', models.CharField(max_length=225, verbose_name='Tipo de Usuario')),
],
),
]
| diego-d5000/Estudiala | estudiala/estudiala/usuarios/migrations/0001_initial.py | Python | mit | 1,127 |
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = 'linklist.tests.urls'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(__file__, '../../../static/')
MEDIA_ROOT = os.path.join(__file__, '../../../media/')
STATICFILES_DIRS = (
os.path.join(__file__, 'tests/test_static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), '../templates'),
)
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(
os.path.dirname(__file__), 'coverage')
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_nose',
'filer',
'easy_thumbnails',
]
INTERNAL_APPS = [
'linklist.tests.test_app',
'linklist',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
SECRET_KEY = 'PoP43cj5=(cj36$_8a!6ar0u"(hF5b24kns&gz7u*k*@a5tCCf'
LANGUAGES = [
('en', 'English'),
]
| bitmazk/django-linklist | linklist/tests/test_settings.py | Python | mit | 2,306 |
import _plotly_utils.basevalidators
class DeltaValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="delta", parent_name="indicator", **kwargs):
super(DeltaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Delta"),
data_docs=kwargs.pop(
"data_docs",
"""
decreasing
:class:`plotly.graph_objects.indicator.delta.De
creasing` instance or dict with compatible
properties
font
Set the font used to display the delta
increasing
:class:`plotly.graph_objects.indicator.delta.In
creasing` instance or dict with compatible
properties
position
Sets the position of delta with respect to the
number.
reference
Sets the reference value to compute the delta.
By default, it is set to the current value.
relative
Show relative change
valueformat
Sets the value formatting rule using d3
formatting mini-language which is similar to
those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/indicator/_delta.py | Python | mit | 1,527 |
#!/usr/bin/env python
import json, time
from flask import Flask, request, render_template, Response
from gevent import pywsgi, monkey
from helpers.generateCalibration import GenerateCalibration
#monkey.patch_all()
app = Flask(__name__)
#cameraInstance = Camera()
runCalibration = GenerateCalibration('frames', 'calibration.json')
class VisionServer:
def __init__(self, queue):
self.inQueue = queue
@app.route('/')
def index():
return render_template('index.html')
@app.route('/hsl')
def hslPage():
return render_template('hsl.html')
@app.route('/calibrate')
def calibratePage():
return render_template('calibrate.html')
def genStream(camera):
while True:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + camera.get_frame() + b'\r\n')
time.sleep(0.005) # yes, this delay is intentional.
# maybe it's a hack, but hey, it works.
@app.route('/stream')
def stream():
return Response(genStream(cameraInstance), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/post', methods=['POST'])
def post():
if (request.form['action'] == 'changeHSL'):
cameraInstance.changeHSL({'component': request.form['component'], 'min': request.form['min'], 'max': request.form['max']})
elif (request.form['action'] == 'getHSL'):
return json.dumps(cameraInstance.getHSL())
elif (request.form['action'] == 'saveHSL'):
return str(cameraInstance.saveHSL())
elif (request.form['action'] == 'setExposure'):
return str(cameraInstance.setExposure(int(request.form['exposure'])))
elif (request.form['action'] == 'on' or request.form['action'] == 'off'):
if (request.form['action'] == 'on'):
visionController.start()
else:
visionController.stop()
return str(True);
return str(True)
@app.route('/capture')
def capture():
# makes directory if it doesn't exist
if not os.path.exists('frames'):
os.makedirs('frames')
# finds the highest int in filenames
maxN = 0
if (os.listdir('frames')):
files = os.listdir('frames')
for file in files:
this = file.split('.')[0]
if (this != ''):
if (int(this) > maxN):
maxN = int(this)
return str(cameraInstance.saveFrame('frames/' + str(maxN + 1) + '.jpg'))
@app.route('/calibrate')
def calibrate():
return str(runCalibration.run())
if __name__ == '__main__':
gevent_server = pywsgi.WSGIServer(('', 80), app)
gevent_server.serve_forever()
| 3299/visioninabox | server.py | Python | mit | 2,744 |
from .User import User
class Comment(object):
def __init__(self, commentData):
self.status = None
self.username_id = None
self.created_at_utc = None
self.created_at = None
self.bit_flags = None
self.user = None
self.comment = None
self.pk = None
self.type = None
self.media_id = None
self.status = commentData['status']
if 'user_id' in commentData and commentData['user_id']:
self.username_id = commentData['user_id']
self.created_at_utc = commentData['created_at_utc']
self.created_at = commentData['created_at']
if 'bit_flags' in commentData and commentData['bit_flags']:
self.bit_flags = commentData['bit_flags']
self.user = User(commentData['user'])
self.comment = commentData['text']
self.pk = commentData['pk']
if 'type' in commentData and commentData['type']:
self.type = commentData['type']
if 'media_id' in commentData and commentData['media_id']:
self.media_id = commentData['media_id']
def getStatus(self):
return self.status
def getUsernameId(self):
return self.username_id
def getCreatedAtUtc(self):
return self.created_at_utc
def created_at(self):
return self.created_at
def getBitFlags(self):
return self.bit_flags
def getUser(self):
return self.user
def getComment(self):
return self.comment
def getCommentId(self):
return self.pk
def getType(self):
return self.type
def getMediaId(self):
return self.media_id
| danleyb2/Instagram-API | InstagramAPI/src/http/Response/Objects/Comment.py | Python | mit | 1,671 |
from django.contrib import messages
from django.db.models import Q
from django.http import Http404
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
# Create your views here.
from .forms import VariationInventoryFormSet
from .mixins import StaffRequiredMixin
from .models import Product, Variation, Category
class CategoryListView(ListView):
model = Category
queryset = Category.objects.all()
template_name = "products/product_list.html"
class CategoryDetailView(DetailView):
model = Category
def get_context_data(self, *args, **kwargs):
context = super(CategoryDetailView, self).get_context_data(*args, **kwargs)
obj = self.get_object()
product_set = obj.product_set.all()
default_products = obj.default_category.all()
products = ( product_set | default_products ).distinct()
context["products"] = products
return context
class VariationListView(StaffRequiredMixin, ListView):
model = Variation
queryset = Variation.objects.all()
def get_context_data(self, *args, **kwargs):
context = super(VariationListView, self).get_context_data(*args, **kwargs)
context["formset"] = VariationInventoryFormSet(queryset=self.get_queryset())
return context
def get_queryset(self, *args, **kwargs):
product_pk = self.kwargs.get("pk")
if product_pk:
product = get_object_or_404(Product, pk=product_pk)
queryset = Variation.objects.filter(product=product)
return queryset
def post(self, request, *args, **kwargs):
formset = VariationInventoryFormSet(request.POST, request.FILES)
if formset.is_valid():
formset.save(commit=False)
for form in formset:
new_item = form.save(commit=False)
#if new_item.title:
product_pk = self.kwargs.get("pk")
product = get_object_or_404(Product, pk=product_pk)
new_item.product = product
new_item.save()
messages.success(request, "Your inventory and pricing has been updated.")
return redirect("products")
raise Http404
class ProductListView(ListView):
model = Product
queryset = Product.objects.all()
def get_context_data(self, *args, **kwargs):
context = super(ProductListView, self).get_context_data(*args, **kwargs)
context["now"] = timezone.now()
context["query"] = self.request.GET.get("q") #None
return context
def get_queryset(self, *args, **kwargs):
qs = super(ProductListView, self).get_queryset(*args, **kwargs)
query = self.request.GET.get("q")
if query:
qs = self.model.objects.filter(
Q(title__icontains=query) |
Q(description__icontains=query)
)
try:
qs2 = self.model.objects.filter(
Q(price=query)
)
qs = (qs | qs2).distinct()
except:
pass
return qs
import random
class ProductDetailView(DetailView):
model = Product
#template_name = "product.html"
#template_name = "<appname>/<modelname>_detail.html"
def get_context_data(self, *args, **kwargs):
context = super(ProductDetailView, self).get_context_data(*args, **kwargs)
instance = self.get_object()
#order_by("-title")
context["related"] = sorted(Product.objects.get_related(instance)[:6], key= lambda x: random.random())
return context
def product_detail_view_func(request, id):
#product_instance = Product.objects.get(id=id)
product_instance = get_object_or_404(Product, id=id)
try:
product_instance = Product.objects.get(id=id)
except Product.DoesNotExist:
raise Http404
except:
raise Http404
template = "products/product_detail.html"
context = {
"object": product_instance
}
return render(request, template, context) | insta-code1/ecommerce | src/products/views.py | Python | mit | 3,661 |
from pathlib import Path
from subprocess import (PIPE, Popen)
import fnmatch
import shutil
import os
def test_distribute(tmp_path):
"""
Check that the scripts to compute a trajectory are generated correctly
"""
cmd1 = "distribute_jobs.py -i test/test_files/input_test_distribute_derivative_couplings.yml"
cmd2 = "distribute_jobs.py -i test/test_files/input_test_distribute_absorption_spectrum.yml"
for cmd in [cmd1, cmd2]:
print("testing: ", cmd)
call_distribute(tmp_path, cmd)
def call_distribute(tmp_path, cmd):
"""
Execute the distribute script and check that if finish succesfully.
"""
try:
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
out, err = p.communicate()
if err:
raise RuntimeError(err)
check_scripts()
finally:
remove_chunk_folder()
def check_scripts():
"""
Check that the distribution scripts were created correctly
"""
paths = fnmatch.filter(os.listdir('.'), "chunk*")
# Check that the files are created correctly
files = ["launch.sh", "chunk_xyz*", "input.yml"]
for p in paths:
p = Path(p)
for f in files:
try:
next(p.glob(f))
except StopIteration:
msg = f"There is not file: {f}"
print(msg)
raise RuntimeError(msg)
def remove_chunk_folder():
""" Remove resulting scripts """
for path in fnmatch.filter(os.listdir('.'), "chunk*"):
shutil.rmtree(path)
| felipeZ/nonAdiabaticCoupling | test/test_distribute.py | Python | mit | 1,556 |
# This is just a simple example of how to inspect ASTs visually.
#
# This can be useful for developing new operators, etc.
import ast
from cosmic_ray.mutating import MutatingCore
from cosmic_ray.operators.comparison_operator_replacement import MutateComparisonOperator
code = "((x is not y) ^ (x is y))"
node = ast.parse(code)
print()
print(ast.dump(node))
core = MutatingCore(0)
operator = MutateComparisonOperator(core)
new_node = operator.visit(node)
print()
print(ast.dump(new_node))
| sixty-north/cosmic-ray | tools/inspector.py | Python | mit | 491 |
"""empty message
Revision ID: 399106d8a6ad
Revises: None
Create Date: 2015-03-06 03:55:19.157958
"""
# revision identifiers, used by Alembic.
revision = '399106d8a6ad'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('price', sa.Float(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('product')
op.drop_table('category')
### end Alembic commands ###
| enixdark/im-r-e-d-i-s | flask-cook/migrations/versions/399106d8a6ad_.py | Python | mit | 1,071 |
# Hangman for real tho(t)
import random
def pickWord(words):
toReturn = random.choice(lol)
return toReturn
def drawHangman(parts):
if parts >= 1:
print(" O")
if parts >= 4:
print("/|\\")
elif parts >= 3:
print("/|")
elif parts >= 2:
print(" |")
if parts >= 6:
print("/ \\")
elif parts == 5:
print("/")
print("\n")
with open("scrabble.txt", "r") as paroleFile:
lol = paroleFile.read().split("\n")
word = pickWord(lol)
completo = False
okLetter = False
guessedLetters = set()
progress = ["_" for i in range(len(word))]
remainingLetters = len(word)
guesses = 0
while not completo:
okLetter = False
for i in progress:
print(i, end="")
while not okLetter:
print("\n\n\nGuess your letter: ")
letter = input().upper()
if letter in guessedLetters:
print("You already tried that ")
else:
guessedLetters.add(letter)
okLetter = True
if letter not in word:
print("Wrong letter ")
guesses += 1
print("Guesses remaining: ", 7 - guesses, "\n")
else:
for i in range(len(word)):
if word[i] == letter:
progress[i] = letter
remainingLetters -= 1
drawHangman(guesses)
if remainingLetters <= 0:
for i in progress:
print(i, end="")
print("\n\nYou won ye")
completo = True
if guesses > 6:
print(" ^^ DED ^^ \n")
print("Hai perso lol\n")
print("\nLa parola era", str(word))
completo = True | daniLOLZ/variaRoba | Python/32.py | Python | mit | 1,364 |
#!/usr/bin/env python
# coding: UTF-8
from __future__ import division
import numpy as np
def left_multiplication(g, x):
"""
Multiplication action of a group and a vector.
"""
return np.dot(g, x)
def trans_adjoint(g, x):
return np.dot(np.dot(g,x),g.T)
class RungeKutta(object):
def __init__(self, method):
self.method = method
self.movement = self.method.movement
self.nb_stages = len(self.method.edges) + 1
def compute_vectors(self, movement_field, stages):
"""
Compute the Lie algebra elements for the stages.
"""
return np.array([movement_field(stage) for stage in stages])
def get_iterate(self, movement_field, action):
def evol(stages):
new_stages = stages.copy()
for (i,j, transition) in self.method.edges:
# inefficient as a) only some vectors are needed b) recomputed for each edge
vects = self.compute_vectors(movement_field, new_stages)
# the order of the edges matters; the goal is that explicit method need only one iteration
new_stages[i] = action(self.movement(transition(vects)), new_stages[j])
return new_stages
return evol
@classmethod
def fix(self, iterate, z):
"""
Find a fixed point to the iterating function `iterate`.
"""
for i in range(30):
new_z = iterate(z)
if np.allclose(z, new_z, atol=1e-10, rtol=1e-16):
break
z = new_z
else:
raise Exception("No convergence after {} steps".format(i))
return z, i
def step(self, movement_field, x0, action=None):
if action is None:
action = left_multiplication
iterate = self.get_iterate(movement_field, action)
z0 = np.array([x0]*self.nb_stages) # initial guess
z, i = self.fix(iterate, z0)
return z[-1]
| olivierverdier/homogint | homogint/homogint.py | Python | mit | 1,957 |
__author__ = 'Fabrizio Lungo<fab@lungo.co.uk>'
import os
import yaml
from __exceptions__.FileNotFound import FileNotFound
from section import ConfigurationSection
class Configuration(ConfigurationSection):
def __init__(self, fn='config.yml', name=None, create=False):
self._fn = fn
self._create = create
self.reload()
if name is None:
name=fn
self._name = name
def reload(self):
if self._create and not os.path.exists(self._fn):
self._config = {}
elif os.path.exists(self._fn):
with open(self._fn, "r") as f:
self._config = yaml.load(f)
else:
raise FileNotFound(filename=self._fn)
def save(self):
with open(self._fn, "w") as f:
yaml.dump(self._config, f) | flungo/python-yaml-config | lib/yamlconfig/config.py | Python | mit | 821 |
"""
Django settings for magnet project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.utils.translation import ugettext_lazy as _
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's**osz#c22#hn13(@0++r+2eq4^7$$7qafa%3$f#g^b_&4$7zv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Our apps
'magnet.apps.users',
# 3rd Party apps
'crispy_forms',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'magnet.core.middleware.ForceDefaultLanguageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'magnet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'magnet', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'magnet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'id'
LANGUAGES = (
('id', _('Indonesian')),
('en', _('English')),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Set custom user model
AUTH_USER_MODEL = 'users.User'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'magnet', 'static'),
os.path.join(BASE_DIR, 'node_modules'), # packages installed by yarn
]
# Auth with mobile phone
AUTHENTICATION_BACKENDS = [
'magnet.apps.users.backends.MagnetBackend',
]
# Translation files
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'magnet', 'locales')
]
# Crispy form template pack
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Security settings
if not DEBUG:
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
try:
from .local_settings import *
except:
pass
| SeiryuZ/magnet | magnet/settings.py | Python | mit | 4,246 |
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Datos de Prueba
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# Modelo de Logistic Regression
class BoWClassifier(nn.Module):
def __init__(self, num_labels, vocab_size):
super(BoWClassifier, self).__init__()
# (Tamanio Entrada TE, Tamanio Salida TS) Dimensiones: A=TS*TE x=TE b=TS
self.linear = nn.Linear(vocab_size, num_labels) # Logistic Regression solo es: y = Ax + b
def forward(self, bow_vec):
return F.log_softmax(self.linear(bow_vec))
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
def train_model(model,data):
for epoch in range(100):
for instance, label in data:
model.zero_grad()
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
target = autograd.Variable(make_target(label, label_to_ix))
log_probs = model(bow_vec)
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
return model
def test_model(model,test_data):
for instance, label in test_data:
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec)
print(log_probs)
return model
if __name__ == "__main__":
torch.manual_seed(1)
# Diccionario {Word:ID}
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
#print(word_to_ix)
#### Vars
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
#### CREAR Modelo
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE) #model.parameters() es de dimension [2,26] ([etiquetas,tokens+bias])
# Todo debe ser convertido a autograd.Variable para armar el grafo de operaciones
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(autograd.Variable(bow_vector))
#print(log_probs)
#### ENTRENAR Modelo
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
model = train_model(model,data)
# Index corresponding to Spanish goes up, English goes down!
model = test_model(model,test_data)
print(next(model.parameters())[:, word_to_ix["good"]])
| josdaza/deep-toolbox | PyTorch/LogisticClassifier.py | Python | mit | 2,935 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2014 Rajat Agarwal
import os, sys
import unittest
import sqoot
if 'PUBLIC_API_KEY' in os.environ and 'PRIVATE_API_KEY' in os.environ:
PUBLIC_API_KEY = os.environ['PUBLIC_API_KEY']
PRIVATE_API_KEY = os.environ['PRIVATE_API_KEY']
else:
try:
from _creds import *
except ImportError:
print "Please create a creds.py file in this package, based upon creds.example.py"
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
sys.path.append('/home/ragarwal/sqoot')
class BaseEndpointTestCase(unittest.TestCase):
def setUp(self):
self.api = sqoot.Sqoot(
privateApiKey=PRIVATE_API_KEY,
publicApiKey=PUBLIC_API_KEY,
)
| ragarwal6397/sqoot | sqoot/tests/__init__.py | Python | mit | 757 |
# $Id: TestWebDAVAccess.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for FileAccess module
#
import os
# Make sure python-kerberos package is installed
import kerberos
import sys
import httplib
import urllib2
import urllib2_kerberos
import re
import base64
import unittest
from urlparse import urlparse
sys.path.append("../..")
readmetext="This directory is the root of the ADMIRAL shared file system.\n"
mountpoint="mountadmiralwebdav"
readmefile="ADMIRAL.README"
theurl="http://zoo-admiral-ibrg.zoo.ox.ac.uk/webdav/TestUser1"
class TestWebDAVAccess(unittest.TestCase):
def setUp(self):
# mount WebDAV share here
status=os.system('mount '+mountpoint)
self.assertEqual(status, 0, 'Mount failure')
return
def tearDown(self):
os.system('umount '+mountpoint)
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testReadMe(self):
# Test assumes ADMIRAL shared file system is mounted at mountpoint
# Open README file
f = open(mountpoint+'/'+readmefile)
assert (f), "README file open failed"
# Read first line
l = f.readline()
# Close file
f.close()
# Check first line
self.assertEqual(l, readmetext, 'Unexpected README content')
return
def testCreateFile(self):
f = open(mountpoint+'/testCreateWebDAVFile.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(mountpoint+'/testCreateWebDAVFile.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content')
return
def testUpdateFile(self):
filename = mountpoint+'/testUpdateWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'a+')
f.write('Test update of file\n')
f.close()
f = open(filename,'r')
l1 = f.readline()
l2 = f.readline()
f.close()
self.assertEqual(l1, 'Test creation of file\n', 'Unexpected file content: l1')
self.assertEqual(l2, 'Test update of file\n', 'Unexpected file content: l2')
return
def testRewriteFile(self):
filename = mountpoint+'/testRewriteWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'w+')
f.write('Test rewrite of file\n')
f.close()
f = open(filename,'r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test rewrite of file\n', 'Unexpected file content')
return
def testDeleteFile(self):
filename1 = mountpoint+'/testCreateWebDAVFile.tmp'
filename2 = mountpoint+'/testRewriteWebDAVFile.tmp'
filename3 = mountpoint+'/testUpdateWebDAVFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
assert (False), "File "+filename2+" not deleted"
except:
pass
# Test and delete third file
try:
s = os.stat(filename3)
except:
assert (False), "File "+filename3+" not found or other stat error"
os.remove(filename3)
try:
s = os.stat(filename3)
assert (False), "File "+filename3+" not deleted"
except:
pass
return
def testWebDAVFile(self):
h1 = httplib.HTTPConnection('zakynthos.zoo.ox.ac.uk')
h1.request('GET','/webdav')
res=h1.getresponse()
authreq = str(res.status) + ' ' + res.reason
print authreq
self.assertEqual(authreq, '401 Authorization Required', 'Unexpected response')
return
def testWebDAVFileUrlLib(self):
#_ignore = kerberos.GSS_C_DELEG_FLAG
#from kerberos import GSS_C_DELEG_FLAG,GSS_C_MUTUAL_FLAG,GSS_C_SEQUENCE_FLAG
#_ignore, ctx = kerberos.authGSSClientInit('krbtgt/OX.AC.UK@OX.AC.UK', gssflags=GSS_C_DELEG_FLAG|GSS_C_MUTUAL_FLAG|GSS_C_SEQUENCE_FLAG)
_ignore, ctx = kerberos.authGSSClientInit('HTTP@zakynthos.zoo.ox.ac.uk')
_ignore = kerberos.authGSSClientStep(ctx, '')
tgt = kerberos.authGSSClientResponse(ctx)
opener = urllib2.build_opener()
opener.add_handler(urllib2_kerberos.HTTPKerberosAuthHandler())
resp = opener.open(theurl)
print resp
return
req = urllib2.Request(theurl)
try:
handle = urllib2.urlopen(req)
except IOError, e:
pass
else:
assert (False), theurl + " isn't protected by authentication."
if not hasattr(e, 'code') or e.code != 401:
# we got an error - but not a 401 error
assert (False), theurl + " Error: " + e
authline = e.headers['www-authenticate']
# this gets the www-authenticate line from the headers
# which has the authentication scheme and realm in it
authobj = re.compile(
r'''(?:\s*www-authenticate\s*:)?\s*(\w*)\s+realm=['"]([^'"]+)['"]''',
re.IGNORECASE)
# this regular expression is used to extract scheme and realm
matchobj = authobj.match(authline)
if not matchobj:
# if the authline isn't matched by the regular expression
# then something is wrong
assert (False), "Malformed authentication header: " + authline
scheme = matchobj.group(1)
realm = matchobj.group(2)
# here we've extracted the scheme
# and the realm from the header
print scheme
print realm
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMe"
, "testCreateFile"
, "testRewriteFile"
, "testUpdateFile"
, "testDeleteFile"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testWebDAVFile"
, "testWebDAVFileUrlLib"
]
}
return TestUtils.getTestSuite(TestWebDAVAccess, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileAccess", getTestSuite, sys.argv)
# End.
| bhavanaananda/DataStage | test/FileShare/tests/TestWebDAVAccess.py | Python | mit | 7,828 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-02 21:54
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('round', '0013_plot_batch'),
]
operations = [
migrations.AlterField(
model_name='round',
name='score',
field=models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=4),
),
]
| adminq80/Interactive_estimation | game/round/migrations/0014_auto_20161102_2154.py | Python | mit | 509 |
from office365.runtime.paths.service_operation import ServiceOperationPath
from office365.sharepoint.base_entity import BaseEntity
class Principal(BaseEntity):
"""Represents a user or group that can be assigned permissions to control security."""
@property
def id(self):
"""Gets a value that specifies the member identifier for the user or group.
:rtype: int or None
"""
return self.properties.get('Id', None)
@property
def title(self):
"""Gets a value that specifies the name of the principal.
:rtype: str or None
"""
return self.properties.get('Title', None)
@title.setter
def title(self, value):
self.set_property('Title', value)
@property
def login_name(self):
"""Gets the login name of the principal.
:rtype: str or None
"""
return self.properties.get('LoginName', None)
@property
def user_principal_name(self):
"""Gets the UPN of the principal.
:rtype: str or None
"""
return self.properties.get('UserPrincipalName', None)
@property
def is_hidden_in_ui(self):
"""Gets the login name of the principal.
:rtype: bool or None
"""
return self.properties.get('IsHiddenInUI', None)
@property
def principal_type(self):
"""Gets the login name of the principal.
:rtype: int or None
"""
return self.properties.get('PrincipalType', None)
def set_property(self, name, value, persist_changes=True):
super(Principal, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if self._resource_path is None:
if name == "Id":
self._resource_path = ServiceOperationPath(
"GetById", [value], self._parent_collection.resource_path)
elif name == "LoginName":
self._resource_path = ServiceOperationPath(
"GetByName", [value], self._parent_collection.resource_path)
return self
| vgrem/Office365-REST-Python-Client | office365/sharepoint/principal/principal.py | Python | mit | 2,099 |
import csv
import os
import random
import svm_classifier
def get_classifier_filename(classifier_name):
dir_name = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dir_name, 'classifiers', classifier_name + '.pkl')
def get_image_split(csv_filename, image_folder):
# Get image names and classifications
image_names = []
image_classes = {}
with open(csv_filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
# skip the first row
if row[1] == 'level':
continue
# uncomment this to get only two categories
# if int(row[1]) in [1, 2, 3]:
# continue
image_name = image_folder + '/' + row[0] + '.jpeg'
image_names.append(image_name)
image_classes[image_name] = int(row[1])
random.shuffle(image_names)
train_ratio = 0.75
train_num = int(train_ratio * len(image_names))
train_filenames = image_names[:train_num]
test_filenames = image_names[train_num:]
return train_filenames, test_filenames, image_classes
def get_image_split2(image_folder):
image_names = []
image_classes = {}
for image_class in range(5):
class_image_names = os.listdir(image_folder + '/' + str(image_class))
class_image_names = [image_folder + '/' + str(image_class) + '/' + name
for name in class_image_names]
image_names.extend(class_image_names)
for image_name in class_image_names:
image_classes[image_name] = image_class
random.shuffle(image_names)
train_ratio = 0.8
train_num = int(train_ratio * len(image_names))
train_filenames = image_names[:train_num]
test_filenames = image_names[train_num:]
return train_filenames, test_filenames, image_classes
def get_classifier(classifier_name):
if classifier_name == 'svm':
return svm_classifier.SVMBatchClassifier([0, 1, 2, 3, 4])
else:
raise ValueError('invalid classifier: ' + classifier_name)
def get_fitted_classifier(classifier_name):
if classifier_name == 'svm':
return svm_classifier.SVMBatchClassifier.load()
else:
raise ValueError('invalid classifier: ' + str(classifier_name))
| chinmayhegde/retinopathy-detection | retinopathy/helpers.py | Python | mit | 2,286 |
# Copyright (C) 2009 Ashley J. Wilson
# This software is licensed as described in the file COPYING in the root
# directory of this distribution.
from storenode import StoreNode
from bundlenode import BundleNode
from packagenode import PackageNode
class RootNode(StoreNode):
"""The virtual root of a Store repository.
This node has as its children the BundleNode and/or set of PackageNodes,
that are specified by the trac.ini file as the interesting part of
the Store repository. RootNodes have only the special revision 'ONLY' and
no previous or next changeset.
"""
def __init__(self, repos, bundle_desc, package_desc):
"""Create a RootNode that will report the interesting subset of the Store
repository as its children.
bundle_desc is expected to be either an exact bundle name from Store, or a
comma-separated list of the same. Similarly, package_desc is either one or
many package prefixes. Either parameter may be 'ALL', which will return every
such entity in the repository, or '', which will return nothing of that type.
"""
StoreNode.__init__(self, '/', 'ONLY', StoreNode.DIRECTORY, repos)
if not bundle_desc:
self.bundle_names = []
else:
self.bundle_names = [bn.strip() for bn in bundle_desc.split(',')]
if not package_desc:
self.package_prefixes = []
else:
self.package_prefixes = [pp.strip() for pp in package_desc.split(',')]
def get_entries(self):
"""Generator method for the PackageNodes and/or BundleNodes contained
within this view of the repository.
"""
for bundle_name in self.bundle_names:
if bundle_name == 'ALL':
for bnode in BundleNode.all(self.repos):
yield bnode
elif bundle_name != None:
yield BundleNode.with_name(self.repos, bundle_name)
for package_prefix in self.package_prefixes:
if package_prefix == 'ALL':
for pkg in PackageNode.all(self.repos):
yield pkg
elif package_prefix != None:
for pkg in PackageNode.named_like(self.repos, package_prefix + '%'):
yield pkg
| smashwilson/strac | strac/rootnode.py | Python | mit | 2,306 |
"""Add archived field to custom form
Revision ID: 391b3fa1471
Revises: 473f91b5874
Create Date: 2016-01-23 15:51:40.304025
"""
# revision identifiers, used by Alembic.
revision = '391b3fa1471'
down_revision = '473f91b5874'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('custom_form', sa.Column('archived', sa.Boolean(), nullable=True))
op.alter_column('custom_form', 'price',
existing_type=mysql.FLOAT(),
nullable=True,
existing_server_default=sa.text("'0'"))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('custom_form', 'price',
existing_type=mysql.FLOAT(),
nullable=False,
existing_server_default=sa.text("'0'"))
op.drop_column('custom_form', 'archived')
### end Alembic commands ###
| viaict/viaduct | migrations/versions/2016_01_23_391b3fa1471_add_archived_field_to_custom_form.py | Python | mit | 1,019 |
import sys
import pigpio
import time
from colorama import Fore, Back, Style
def set_speed(lspeed, rspeed):
pi.hardware_PWM(left_servo_pin, 800, int(lspeed)*10000)
pi.hardware_PWM(right_servo_pin, 800, int(rspeed)*10000)
pi = pigpio.pi()
left_servo_pin = 13
right_servo_pin = 12
dead_pin = 17
die_distance = 8
ls = 100
rs = 100
print("start")
try:
while True:
set_speed(ls, rs)
if pi.read(dead_pin) == pigpio.LOW:
set_speed(0, 0)
except :
set_speed(0, 0)
sys.exit(0)
| yanadsl/ML-Autocar | test.py | Python | mit | 519 |
# Generated by Django 2.0.2 on 2018-03-13 02:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cc', '0003_auto_20180228_1145'),
]
operations = [
migrations.AlterField(
model_name='creditcard',
name='tail_no',
field=models.CharField(max_length=10),
),
]
| largetalk/tenbagger | capital/reactor/cc/migrations/0004_auto_20180313_1052.py | Python | mit | 386 |
from django.http import HttpResponse
from django.test import TestCase
from django.utils import unittest
from django.conf.urls import patterns
from rest_framework import permissions, status
try:
from rest_framework_oauth.authentication import OAuth2Authentication
except ImportError:
try:
from rest_framework.authentication import OAuth2Authentication
except ImportError:
OAuth2Authentication = None
try:
try:
from rest_framework_oauth.compat import oauth2_provider
from rest_framework_oauth.compat.oauth2_provider import oauth2
except ImportError:
# if oauth2 module can not be imported, skip the tests,
# because models have not been initialized.
oauth2_provider = None
except ImportError:
try:
from rest_framework.compat import oauth2_provider
from rest_framework.compat.oauth2_provider import oauth2 # NOQA
except ImportError:
# if oauth2 module can not be imported, skip the tests,
# because models have not been initialized.
oauth2_provider = None
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework.views import APIView
from rest_framework_jwt import utils
from rest_framework_jwt_courb.compat import get_user_model
from rest_framework_jwt_courb.settings import api_settings, DEFAULTS
from rest_framework_jwt_courb.authentication import JSONWebTokenAuthentication
User = get_user_model()
DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED = 'django-oauth2-provider not installed'
factory = APIRequestFactory()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns(
'',
(r'^jwt/$', MockView.as_view(
authentication_classes=[JSONWebTokenAuthentication])),
(r'^jwt-oauth2/$', MockView.as_view(
authentication_classes=[
JSONWebTokenAuthentication, OAuth2Authentication])),
(r'^oauth2-jwt/$', MockView.as_view(
authentication_classes=[
OAuth2Authentication, JSONWebTokenAuthentication])),
)
class JSONWebTokenAuthenticationTests(TestCase):
"""JSON Web Token Authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'jpueblo'
self.email = 'jpueblo@example.com'
self.user = User.objects.create_user(self.username, self.email)
def test_post_form_passing_jwt_auth(self):
"""
Ensure POSTing form over JWT auth with correct credentials
passes and does not require CSRF
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_jwt_auth(self):
"""
Ensure POSTing JSON over JWT auth with correct credentials
passes and does not require CSRF
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_jwt_auth(self):
"""
Ensure POSTing form over JWT auth without correct credentials fails
"""
response = self.csrf_client.post('/jwt/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_jwt_auth(self):
"""
Ensure POSTing json over JWT auth without correct credentials fails
"""
response = self.csrf_client.post('/jwt/', {'example': 'example'},
format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_no_jwt_header_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth without credentials fails
"""
auth = 'JWT'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Invalid Authorization header. No credentials provided.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_invalid_jwt_header_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth without correct credentials fails
"""
auth = 'JWT abc abc'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = ('Invalid Authorization header. Credentials string '
'should not contain spaces.')
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_expired_token_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth with expired token fails
"""
payload = utils.jwt_payload_handler(self.user)
payload['exp'] = 1
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Signature has expired.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_invalid_token_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth with invalid token fails
"""
auth = 'JWT abc123'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Error decoding signature.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
@unittest.skipUnless(oauth2_provider, DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED)
def test_post_passing_jwt_auth_with_oauth2_priority(self):
"""
Ensure POSTing over JWT auth with correct credentials
passes and does not require CSRF when OAuth2Authentication
has priority on authentication_classes
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/oauth2-jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response)
@unittest.skipUnless(oauth2_provider, DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED)
def test_post_passing_oauth2_with_jwt_auth_priority(self):
"""
Ensure POSTing over OAuth2 with correct credentials
passes and does not require CSRF when JSONWebTokenAuthentication
has priority on authentication_classes
"""
Client = oauth2_provider.oauth2.models.Client
AccessToken = oauth2_provider.oauth2.models.AccessToken
oauth2_client = Client.objects.create(
user=self.user,
client_type=0,
)
access_token = AccessToken.objects.create(
user=self.user,
client=oauth2_client,
)
auth = 'Bearer {0}'.format(access_token.token)
response = self.csrf_client.post(
'/jwt-oauth2/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response)
def test_post_form_passing_jwt_invalid_payload(self):
"""
Ensure POSTing json over JWT auth with invalid payload fails
"""
payload = dict(email=None)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
msg = 'Invalid payload.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_different_auth_header_prefix(self):
"""
Ensure using a different setting for `JWT_AUTH_HEADER_PREFIX` and
with correct credentials passes.
"""
api_settings.JWT_AUTH_HEADER_PREFIX = 'Bearer'
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'Bearer {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Restore original settings
api_settings.JWT_AUTH_HEADER_PREFIX = DEFAULTS['JWT_AUTH_HEADER_PREFIX']
| coUrbanize/django-rest-framework-jwt | tests/test_authentication.py | Python | mit | 9,810 |
#!/usr/bin/env python
# -*- coding: ascii -*-
"""
package.module
~~~~~~~~~~~~~
A description which can be long and explain the complete
functionality of this module even with indented code examples.
Class/Function however should not be documented here.
:copyright: year by my name, see AUTHORS for more details
:license: license_name, see LICENSE for more details
"""
import struct
import sys
outputfilename = 'raw_audio.out'
def do_convert(filename):
""" """
try:
f_in = open(filename, 'r')
f_out = open(outputfilename, 'wb')
sample = 0
for line in f_in:
try:
sample = int(line)
data = struct.pack("i", sample) # pack integer in a binary string
f_out.write(data)
except:
print "Cannot convert: " + line
finally:
f_in.close()
f_out.close()
if __name__=='__main__':
print "Converting..."
do_convert(sys.argv[1])
print "done. Written to " + outputfilename
| EPiCS/soundgates | hardware/tools/to_rawdata.py | Python | mit | 1,000 |
from gensim.corpora import MmCorpus
from gensim.utils import unpickle
class MetaMmCorpusWrapper:
"""Wrapper which loads MM corpus with metadata."""
def __init__(self, filename):
self.corpus = MmCorpus(filename)
self.metadata = unpickle(filename + ".metadata.cpickle")
def __iter__(self):
for i, doc in enumerate(self.corpus):
yield doc, self.metadata[i]
| vanam/clustering | clustering_system/corpus/MetaMmCorpusWrapper.py | Python | mit | 406 |
import pandas as pd
from pandas import DataFrame
from matplotlib import pyplot as plt
from matplotlib import style
style.use('ggplot')
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
#print df.head()
df['STD'] = pd.rolling_std(df['Close'], 25, min_periods=1)
ax1 = plt.subplot(2, 1, 1)
df['Close'].plot()
plt.ylabel('Close')
# do not do sharex first
ax2 = plt.subplot(2, 1, 2, sharex = ax1)
df['STD'].plot()
plt.ylabel('Standard Deviation')
plt.show()
| PythonProgramming/Pandas-Basics-with-2.7 | pandas 8 - Standard Deviation.py | Python | mit | 486 |
#
# ida_kernelcache/offset.py
# Brandon Azad
#
# Functions for converting and symbolicating offsets.
#
import re
import idc
import idautils
import ida_utilities as idau
import internal
import kernel
import stub
_log = idau.make_log(1, __name__)
def initialize_data_offsets():
"""Convert offsets in data segments into offsets in IDA.
Segment names must be initialized with segments.initialize_segments() first.
"""
# Normally, for user-space programs, this operation would be dangerous because there's a good
# chance that a valid userspace address would happen to show up in regular program data that is
# not actually an address. However, since kernel addresses are numerically much larger, the
# chance of this happening is much less.
for seg in idautils.Segments():
name = idc.SegName(seg)
if not (name.endswith('__DATA_CONST.__const') or name.endswith('__got')
or name.endswith('__DATA.__data')):
continue
for word, ea in idau.ReadWords(seg, idc.SegEnd(seg), addresses=True):
if idau.is_mapped(word, value=False):
idc.OpOff(ea, 0, 0)
kernelcache_offset_suffix = '___offset_'
"""The suffix that gets appended to a symbol to create the offset name, without the offset ID."""
_offset_regex = re.compile(r"^(\S+)" + kernelcache_offset_suffix + r"\d+$")
"""A regular expression to match and extract the target name from an offset symbol."""
def offset_name_target(offset_name):
"""Get the target to which an offset name refers.
No checks are performed to ensure that the target actually exists.
"""
match = _offset_regex.match(offset_name)
if not match:
return None
return match.group(1)
def _process_offset(offset, ea, next_offset):
"""Process an offset in a __got section."""
# Convert the address containing the offset into an offset in IDA, but continue if it fails.
if not idc.OpOff(ea, 0, 0):
_log(1, 'Could not convert {:#x} into an offset', ea)
# Get the name to which the offset refers.
name = idau.get_ea_name(offset, user=True)
if not name:
_log(3, 'Offset at address {:#x} has target {:#x} without a name', ea, offset)
return False
# Make sure this isn't an offset to another stub or to a jump function to another stub. See the
# comment in _symbolicate_stub.
if stub.symbol_references_stub(name):
_log(1, 'Offset at address {:#x} has target {:#x} (name {}) that references a stub', ea,
offset, name)
return False
# Set the new name for the offset.
symbol = next_offset(name)
if symbol is None:
_log(0, 'Could not generate offset symbol for {}: names exhausted', name)
return False
if not idau.set_ea_name(ea, symbol, auto=True):
_log(2, 'Could not set name {} for offset at {:#x}', symbol, ea)
return False
return True
def _process_offsets_section(segstart, next_offset):
"""Process all the offsets in a __got section."""
for offset, ea in idau.ReadWords(segstart, idc.SegEnd(segstart), addresses=True):
if not offset_name_target(idau.get_ea_name(ea)):
# This is not a previously named offset.
if idau.is_mapped(offset, value=False):
_process_offset(offset, ea, next_offset)
else:
_log(-1, 'Offset {:#x} at address {:#x} is unmapped', offset, ea)
def initialize_offset_symbols():
"""Populate IDA with information about the offsets in an iOS kernelcache.
Search through the kernelcache for global offset tables (__got sections), convert each offset
into an offset type in IDA, and rename each offset according to its target.
This function does nothing in the newer 12-merged format kernelcache.
"""
next_offset = internal.make_name_generator(kernelcache_offset_suffix)
for ea in idautils.Segments():
segname = idc.SegName(ea)
if not segname.endswith('__got'):
continue
_log(2, 'Processing segment {}', segname)
_process_offsets_section(ea, next_offset)
| bazad/ida_kernelcache | ida_kernelcache/offset.py | Python | mit | 4,124 |
#Final Exam Problem 4-2
import random, pylab
# You are given this function
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
# You are given this class
class Die(object):
def __init__(self, valList):
""" valList is not empty """
self.possibleVals = valList[:]
def roll(self):
return random.choice(self.possibleVals)
# Implement this -- Coding Part 1 of 2
def makeHistogram(values, numBins, xLabel, yLabel, title=None):
"""
- values, a sequence of numbers
- numBins, a positive int
- xLabel, yLabel, title, are strings
- Produces a histogram of values with numBins bins and the indicated labels
for the x and y axis
- If title is provided by caller, puts that title on the figure and otherwise
does not title the figure
"""
pylab.hist(values, numBins)
pylab.xlabel(xLabel)
pylab.ylabel(yLabel)
if title != None:
pylab.title(title)
pylab.show()
# Implement this -- Coding Part 2 of 2
def getAverage(die, numRolls, numTrials):
"""
- die, a Die
- numRolls, numTrials, are positive ints
- Calculates the expected mean value of the longest run of a number
over numTrials runs of numRolls rolls.
- Calls makeHistogram to produce a histogram of the longest runs for all
the trials. There should be 10 bins in the histogram
- Choose appropriate labels for the x and y axes.
- Returns the mean calculated to 3 decimal places
"""
longest_runs = []
for x in range(numTrials):
rolls = [die.roll() for x in range(numRolls)]
run = 0
longest = 0
for i in range(len(rolls)):
if i == 0:
run += 1
longest += 1
else:
if rolls[i] == rolls[i-1]:
run += 1
if run > longest:
longest = run
else:
run = 1
longest_runs.append(longest)
makeHistogram(longest_runs, 10, 'Longest Run', 'Frequency', \
'Frequency of Longest Consecutive Dice Rolls')
return sum(longest_runs)/len(longest_runs)
# One test case
print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 1, 1000)) | johntauber/MITx6.00.2x | Final/Final_P4_1and2.py | Python | mit | 2,453 |
from __future__ import unicode_literals
import six
from rbtools.utils.encoding import force_unicode
class APIError(Exception):
def __init__(self, http_status, error_code, rsp=None, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.http_status = http_status
self.error_code = error_code
self.rsp = rsp
def __str__(self):
code_str = 'HTTP %d' % self.http_status
if self.error_code:
code_str += ', API Error %d' % self.error_code
if self.rsp and 'err' in self.rsp:
return '%s (%s)' % (self.rsp['err']['msg'], code_str)
else:
return code_str
class AuthorizationError(APIError):
pass
class BadRequestError(APIError):
def __str__(self):
lines = [super(BadRequestError, self).__str__()]
if self.rsp and 'fields' in self.rsp:
lines.append('')
for field, error in six.iteritems(self.rsp['fields']):
lines.append(' %s: %s' % (field, '; '.join(error)))
return '\n'.join(lines)
class CacheError(Exception):
"""An exception for caching errors."""
class ServerInterfaceError(Exception):
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.msg = msg
def __str__(self):
"""Return the error message as a unicode string.
Returns:
unicode:
The error message as a unicode string.
"""
return force_unicode(self.msg)
API_ERROR_TYPE = {
400: BadRequestError,
401: AuthorizationError,
}
def create_api_error(http_status, *args, **kwargs):
error_type = API_ERROR_TYPE.get(http_status, APIError)
return error_type(http_status, *args, **kwargs)
| reviewboard/rbtools | rbtools/api/errors.py | Python | mit | 1,777 |
"""Tool specific version checking to identify out of date dependencies.
This provides infrastructure to check version strings against installed
tools, enabling re-installation if a version doesn't match. This is a
lightweight way to avoid out of date dependencies.
"""
from __future__ import print_function
from distutils.version import LooseVersion
from cloudbio.custom import shared
from cloudbio.fabutils import quiet
def _parse_from_stdoutflag(out, flag, stdout_index=-1):
"""Extract version information from a flag in verbose stdout.
flag -- text information to identify the line we should split for a version
stdout_index -- Position of the version information in the split line. Defaults
to the last item.
"""
for line in out.split("\n") + out.stderr.split("\n"):
if line.find(flag) >= 0:
parts = line.split()
return parts[stdout_index].strip()
print("Did not find version information with flag %s from: \n %s" % (flag, out))
return ""
def _clean_version(x):
if x.startswith("upstream/"):
x = x.replace("upstream/", "")
if x.startswith("("):
x = x[1:].strip()
if x.endswith(")"):
x = x[:-1].strip()
if x.startswith("v"):
x = x[1:].strip()
return x
def up_to_date(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
iversion = get_installed_version(env, cmd, version, args, stdout_flag,
stdout_index)
if not iversion:
return False
else:
return LooseVersion(iversion) >= LooseVersion(version)
def is_version(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
iversion = get_installed_version(env, cmd, version, args, stdout_flag,
stdout_index)
if not iversion:
return False
else:
return LooseVersion(iversion) == LooseVersion(version)
def get_installed_version(env, cmd, version, args=None, stdout_flag=None,
stdout_index=-1):
"""Check if the given command is up to date with the provided version.
"""
if shared._executable_not_on_path(cmd):
return False
if args:
cmd = cmd + " " + " ".join(args)
with quiet():
path_safe = ("export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:{s}/lib/pkgconfig && "
"export PATH=$PATH:{s}/bin && "
"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{s}/lib && ".format(s=env.system_install))
out = env.safe_run_output(path_safe + cmd)
if stdout_flag:
iversion = _parse_from_stdoutflag(out, stdout_flag, stdout_index)
else:
iversion = out.strip()
iversion = _clean_version(iversion)
if " not found in the pkg-config search path" in iversion:
return False
return iversion
| chapmanb/cloudbiolinux | cloudbio/custom/versioncheck.py | Python | mit | 2,864 |
from google.appengine.ext.webapp import template
from models.user import User
from models.page import Page
import webapp2
import json
class PageHandler(webapp2.RequestHandler):
def get(self, page_id):
template_params = {}
user = None
if self.request.cookies.get('session'):
user = User.checkToken(self.request.cookies.get('session'))
if not user:
self.redirect('/')
page = Page.getPageUser(user,title)
if page:
html = template.render("web/templates/page.html", template_params)
self.response.write(html)
app = webapp2.WSGIApplication([
('/pages/(.*)', PageHandler),
], debug=True)
| racheliel/My-little-business | MyLittleBuisness/web/pages/page.py | Python | mit | 648 |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailsearch import urls as wagtailsearch_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from feeds.feeds import BlogFeed
admin.autodiscover()
urlpatterns = patterns('',
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^search/', include(wagtailsearch_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^blog/feed/$', BlogFeed(), name='blog_feed'),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
)
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'favicon.ico', permanent=True))
]
| niceguydave/wagtail-cookiecutter-foundation | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/urls.py | Python | mit | 1,301 |
from __future__ import absolute_import
from unittest import TestCase
from plotly.graph_objs import (Data, Figure, Layout, Line, Margin, Marker,
Scatter, XAxis, YAxis)
class TestGetData(TestCase):
fig = None
def setUp(self):
super(TestGetData, self).setUp()
self.fig = Figure(
data=Data([
Scatter(
x=[52698, 43117],
y=[53, 31],
mode='markers',
name='North America',
text=['United States', 'Canada'],
marker=Marker(
color='rgb(164, 194, 244)',
size=12,
line=Line(
color='white',
width=0.5
)
)
),
Scatter(
x=[39317, 37236, 35650, 30066, 29570, 27159, 23557, 21046,
18007],
y=[33, 20, 13, 19, 27, 19, 49, 44, 38],
mode='markers',
name='Europe',
text=['Germany', 'Britain', 'France', 'Spain', 'Italy',
'Czech Rep.', 'Greece', 'Poland'],
marker=Marker(
color='rgb(255, 217, 102)',
size=12,
line=Line(
color='white',
width=0.5
)
)
),
Scatter(
x=[42952, 37037, 33106, 17478, 9813, 5253, 4692, 3899],
y=[23, 42, 54, 89, 14, 99, 93, 70],
mode='markers',
name='Asia/Pacific',
text=['Australia', 'Japan', 'South Korea', 'Malaysia',
'China', 'Indonesia', 'Philippines', 'India'],
marker=Marker(
color='rgb(234, 153, 153)',
size=12,
line=Line(
color='white',
width=0.5
)
)
),
Scatter(
x=[19097, 18601, 15595, 13546, 12026, 7434, 5419],
y=[43, 47, 56, 80, 86, 93, 80],
mode='markers',
name='Latin America',
text=['Chile', 'Argentina', 'Mexico', 'Venezuela',
'Venezuela', 'El Salvador', 'Bolivia'],
marker=Marker(
color='rgb(142, 124, 195)',
size=12,
line=Line(
color='white',
width=0.5
)
)
)
]),
layout=Layout(
title='Quarter 1 Growth',
autosize=False,
width=500,
height=500,
xaxis=XAxis(
title='GDP per Capita',
showgrid=False,
zeroline=False
),
yaxis=YAxis(
title='Percent',
showline=False
),
margin=Margin(
l=65,
r=50,
b=65,
t=90
)
)
)
def test_get_data(self):
data = self.fig.get_data()
comp_data = [
{
'name': 'North America',
'text': ['United States', 'Canada'],
'x': [52698, 43117],
'y': [53, 31]
},
{
'name': 'Europe',
'text': ['Germany', 'Britain', 'France', 'Spain', 'Italy',
'Czech Rep.', 'Greece', 'Poland'],
'x': [39317, 37236, 35650, 30066, 29570, 27159, 23557, 21046,
18007],
'y': [33, 20, 13, 19, 27, 19, 49, 44, 38]
},
{
'name': 'Asia/Pacific',
'text': ['Australia', 'Japan', 'South Korea', 'Malaysia',
'China', 'Indonesia', 'Philippines', 'India'],
'x': [42952, 37037, 33106, 17478, 9813, 5253, 4692, 3899],
'y': [23, 42, 54, 89, 14, 99, 93, 70]},
{
'name': 'Latin America',
'text': ['Chile', 'Argentina', 'Mexico', 'Venezuela',
'Venezuela', 'El Salvador', 'Bolivia'],
'x': [19097, 18601, 15595, 13546, 12026, 7434, 5419],
'y': [43, 47, 56, 80, 86, 93, 80]
}
]
self.assertEqual(data, comp_data)
def test_get_data_flatten(self):
# this is similar to above, except nested objects are flattened
flat_data = self.fig.get_data(flatten=True)
comp_data = {
'Europe.x': [39317, 37236, 35650, 30066, 29570, 27159, 23557,
21046, 18007],
'Europe.y': [33, 20, 13, 19, 27, 19, 49, 44, 38],
'Asia/Pacific.x': [42952, 37037, 33106, 17478, 9813, 5253, 4692,
3899],
'Latin America.text': ['Chile', 'Argentina', 'Mexico', 'Venezuela',
'Venezuela', 'El Salvador', 'Bolivia'],
'North America.x': [52698, 43117],
'Asia/Pacific.y': [23, 42, 54, 89, 14, 99, 93, 70],
'Asia/Pacific.text': ['Australia', 'Japan', 'South Korea',
'Malaysia', 'China', 'Indonesia',
'Philippines', 'India'],
'North America.y': [53, 31],
'North America.text': ['United States', 'Canada'],
'Europe.text': ['Germany', 'Britain', 'France', 'Spain', 'Italy',
'Czech Rep.', 'Greece', 'Poland'],
'Latin America.x': [19097, 18601, 15595, 13546, 12026, 7434, 5419],
'Latin America.y': [43, 47, 56, 80, 86, 93, 80]
}
self.assertEqual(flat_data, comp_data)
# TODO test for Data, Scatter, etc..
def test_flatten_repeated_trace_names(self):
dl = Data([Scatter(name='thesame', x=[1, 2, 3]) for _ in range(3)])
data = dl.get_data(flatten=True)
comp_data = {
'thesame.x': [1, 2, 3],
'thesame_1.x': [1, 2, 3],
'thesame_2.x': [1, 2, 3]
}
self.assertEqual(data, comp_data)
| ee-in/python-api | plotly/tests/test_core/test_graph_objs/test_get_data.py | Python | mit | 6,703 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-18 11:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0088_auto_20190218_1136'),
('motifdb', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MDBMotif',
fields=[
('mass2motif_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='basicviz.Mass2Motif')),
],
bases=('basicviz.mass2motif',),
),
migrations.AddField(
model_name='mdbmotifset',
name='description',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='mdbmotifset',
name='featureset',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='basicviz.BVFeatureSet'),
),
migrations.AddField(
model_name='mdbmotif',
name='motif_set',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='motifdb.MDBMotifSet'),
),
]
| sdrogers/ms2ldaviz | ms2ldaviz/motifdb/migrations/0002_auto_20190218_1136.py | Python | mit | 1,323 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-26 15:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('my_app', '0005_auto_20170526_1534'),
]
operations = [
migrations.AddField(
model_name='phenotype',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| ortutay/23andme-phenotypes-hackathon | my_app/my_app/migrations/0006_phenotype_user.py | Python | mit | 689 |
#!/usr/bin/env python
# Copyright (c) 2016, Daniel Liew
# This file is covered by the license in LICENSE-SVCB.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read a result info describing a set of KLEE test case replays.
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
from load_klee_analysis import add_kleeanalysis_to_module_search_path
from load_native_analysis import add_nativeanalysis_to_module_search_path
add_KleeRunner_to_module_search_path()
add_kleeanalysis_to_module_search_path()
add_nativeanalysis_to_module_search_path()
from KleeRunner import ResultInfo
import KleeRunner.DriverUtil as DriverUtil
import KleeRunner.InvocationInfo
import KleeRunner.util
import nativeanalysis.analyse
import argparse
import logging
import os
import pprint
import subprocess
import sys
import yaml
_logger = logging.getLogger(__name__)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('result_info_file',
help='Result info file',
type=argparse.FileType('r'))
parser.add_argument('--dump-unknowns',
dest='dump_unknowns',
action='store_true')
parser.add_argument('--dump-timeouts',
dest='dump_timeouts',
action='store_true')
DriverUtil.parserAddLoggerArg(parser)
pargs = parser.parse_args()
DriverUtil.handleLoggerArgs(pargs, parser)
_logger.info('Loading "{}"...'.format(pargs.result_info_file.name))
resultInfos, resultInfoMisc = ResultInfo.loadResultInfos(pargs.result_info_file)
_logger.info('Loading complete')
# Check the misc data
if resultInfoMisc is None:
_logger.error('Expected result info to have misc data')
return 1
if resultInfoMisc['runner'] != 'NativeReplay':
_logger.error('Expected runner to have been NativeReplay but was "{}"'.format(
resultInfoMisc['runner']))
return 1
errorTypeToErrorListMap = dict()
multipeOutcomeList = []
for result_index, r in enumerate(resultInfos):
_logger.info('Processing {}/{}'.format(result_index + 1, len(resultInfos)))
raw_result = r.GetInternalRepr()
program_path = r.RawInvocationInfo['program']
outcome = nativeanalysis.analyse.get_test_case_run_outcome(raw_result)
error_list = None
try:
error_list = errorTypeToErrorListMap[type(outcome)]
except KeyError:
error_list = []
errorTypeToErrorListMap[type(outcome)] = error_list
error_list.append(outcome)
# Print report
print('#'*70)
print("# of test cases with multiple outcomes: {}".format(len(multipeOutcomeList)))
for ty, error_list in errorTypeToErrorListMap.items():
print("# of {}: {}".format(ty, len(error_list)))
if ty == nativeanalysis.analyse.UnknownError and pargs.dump_unknowns:
for error in error_list:
print(error)
if ty == nativeanalysis.analyse.TimeoutError and pargs.dump_timeouts:
for error in error_list:
print(error)
# Now emit as YAML
#as_yaml = yaml.dump(program_to_coverage_info, default_flow_style=False)
#pargs.output_yaml.write(as_yaml)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| delcypher/klee-runner | tools/result-info-native-replay-summary.py | Python | mit | 3,311 |
from __future__ import unicode_literals
"""
product initialization stuff
"""
import os
import featuremonkey
from .composer import get_composer
from django_productline import compare_version
_product_selected = False
def select_product():
"""
binds the frozen context the selected features
should be called only once - calls after the first call have
no effect
"""
global _product_selected
if _product_selected:
# tss already bound ... ignore
return
_product_selected = True
from django_productline import context, template
featuremonkey.add_import_guard('django.conf')
featuremonkey.add_import_guard('django.db')
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_productline.settings'
contextfile = os.environ['PRODUCT_CONTEXT_FILENAME']
equationfile = os.environ['PRODUCT_EQUATION_FILENAME']
#bind context and compose features
context.bind_context(contextfile)
get_composer().select_equation(equationfile)
# after composition we are now able to bind composed template settings
template.bind_settings()
featuremonkey.remove_import_guard('django.conf')
featuremonkey.remove_import_guard('django.db')
import django
if compare_version(django.get_version(), '1.7') >= 0:
django.setup()
# force import of settings and urls
# better fail during initialization than on the first request
from django.conf import settings
from django.core.urlresolvers import get_resolver
# eager creation of URLResolver
get_resolver(None)
# make sure overextends tag is registered
from django.template.loader import get_template
from overextends import models
def get_wsgi_application():
"""
returns the wsgi application for the selected product
this function is called by featuredjango.wsgi to get the wsgi
application object
if you need to refine the wsgi application object e.g. to add
wsgi middleware please refine django.core.wsgi.get_wsgi_application directly.
"""
# make sure the product is selected before importing and constructing wsgi app
select_product()
# return (possibly refined) wsgi application
from django.core.wsgi import get_wsgi_application
return get_wsgi_application()
| henzk/django-productline | django_productline/startup.py | Python | mit | 2,282 |
"""
Nodes that use web services to do something.
"""
import json
import httplib2
import urllib
from BeautifulSoup import BeautifulSoup
from nodetree import node, exceptions
from . import base
from .. import stages
class WebServiceNodeError(exceptions.NodeError):
pass
class BaseWebService(node.Node, base.TextWriterMixin):
"""Base class for web service nodes."""
abstract = True
stage = stages.POST
intypes = [unicode]
outtype = unicode
class MashapeProcessing(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://text-processing.com/api/"
parameters = [
dict(name="extract", value="phrases", choices=["phrases", "sentiment"]),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(text=input[:10000].encode("utf8", "replace"))
url = "%s/%s/" % (self.baseurl, self._params.get("extract", "phrases"))
request, content = http.request(url, "POST", headers=headers, body=urllib.urlencode(body))
if request["status"] == "503":
raise WebServiceNodeError("Daily limit exceeded", self)
elif request["status"] == "400":
raise WebServiceNodeError("No text, limit exceeded, or incorrect language", self)
out = u""
try:
data = json.loads(content)
except ValueError:
return content
for key in ["GPE", "VP", "LOCATION", "NP", "DATE"]:
keydata = data.get(key)
if keydata is not None:
out += "%s\n" % key
for entity in keydata:
out += " %s\n" % entity
return out
class DBPediaAnnotate(BaseWebService):
"""Mashape entity extraction."""
stage = stages.POST
baseurl = "http://spotlight.dbpedia.org/rest/annotate/"
parameters = [
dict(name="confident", value=0.2),
dict(name="support", value=20),
]
def process(self, input):
http = httplib2.Http()
headers = {}
body = dict(
text=input.encode("utf8", "replace"),
confidence=self._params.get("confident"),
support=self._params.get("support"),
)
url = "%s?%s" % (self.baseurl, urllib.urlencode(body))
request, content = http.request(url, "GET", headers=headers)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
out = u""
soup = BeautifulSoup(content)
for ref in soup.findAll("a"):
out += "%s\n" % ref.text
out += " %s\n\n" % ref.get("href")
return out
class OpenCalais(BaseWebService):
"""OpenCalias sematic markup."""
stage = stages.POST
baseurl = "http://api.opencalais.com/tag/rs/enrich"
parameters = [
]
def process(self, input):
http = httplib2.Http()
headers = {
"x-calais-licenseID": "dsza6q6zwa9nzvz9wbz7f6y5",
"content-type": "text/raw",
"Accept": "xml/rdf",
"enableMetadataType": "GenericRelations",
}
request, content = http.request(
self.baseurl,
"POST",
headers=headers,
body=input.encode("utf8")
)
if request["status"] != "200":
raise WebServiceNodeError("A web service error occured. Status: %s" % request["status"], self)
return content.decode("utf8")
| mikesname/python-ocrlab | ocrsite/ocrlab/nodes/web.py | Python | mit | 3,561 |
from unittest import skip
from django.shortcuts import resolve_url as r
from django.test import TestCase
@skip
class BotViewTest(TestCase):
def setUp(self):
fake_message = {
'data': [{
'entry': [
{
'messaging': {
'message': {
'text': 'Texto Mensagem'
},
'sender': {
'id': 123,
},
}
}
]
}]
}
import json
data = json.dumps(str(fake_message))
print(data)
self.response = self.client.post(r('bot:main'), fake_message)
def test_get(self):
self.assertEqual(200, self.response.status_code)
| tyagow/FacebookBot | src/bot/tests_bot/test_view.py | Python | mit | 870 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import StringIO
from pytest import raises
from aspen.website import Website
from aspen.http.response import Response
from aspen.exceptions import BadLocation
simple_error_spt = """
[---]
[---] text/plain via stdlib_format
{response.body}
"""
# Tests
# =====
def test_basic():
website = Website()
expected = os.getcwd()
actual = website.www_root
assert actual == expected
def test_normal_response_is_returned(harness):
harness.fs.www.mk(('index.html', "Greetings, program!"))
expected = '\r\n'.join("""\
HTTP/1.1
Content-Type: text/html
Greetings, program!
""".splitlines())
actual = harness.client.GET()._to_http('1.1')
assert actual == expected
def test_fatal_error_response_is_returned(harness):
harness.fs.www.mk(('index.html.spt', "[---]\nraise heck\n[---]\n"))
expected = 500
actual = harness.client.GET(raise_immediately=False).code
assert actual == expected
def test_redirect_has_only_location(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
website.redirect('http://elsewhere', code=304)
[---]"""))
actual = harness.client.GET(raise_immediately=False)
assert actual.code == 304
headers = actual.headers
assert headers.keys() == ['Location']
def test_nice_error_response_is_returned(harness):
harness.short_circuit = False
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(500)
[---]"""))
assert harness.client.GET(raise_immediately=False).code == 500
def test_nice_error_response_is_returned_for_404(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404)
[---]"""))
assert harness.client.GET(raise_immediately=False).code == 404
def test_response_body_doesnt_expose_traceback_by_default(harness):
harness.fs.project.mk(('error.spt', simple_error_spt))
harness.fs.www.mk(('index.html.spt', """
[---]
raise Exception("Can I haz traceback ?")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert "Can I haz traceback ?" not in response.body
def test_response_body_exposes_traceback_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.project.mk(('error.spt', simple_error_spt))
harness.fs.www.mk(('index.html.spt', """
[---]
raise Exception("Can I haz traceback ?")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert "Can I haz traceback ?" in response.body
def test_default_error_simplate_doesnt_expose_raised_body_by_default(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404, "Um, yeah.")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 404
assert "Um, yeah." not in response.body
def test_default_error_simplate_exposes_raised_body_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404, "Um, yeah.")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 404
assert "Um, yeah." in response.body
def test_nice_error_response_can_come_from_user_error_spt(harness):
harness.fs.project.mk(('error.spt', '[---]\n[---] text/plain\nTold ya.'))
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(420)
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 420
assert response.body == 'Told ya.'
def test_nice_error_response_can_come_from_user_420_spt(harness):
harness.fs.project.mk(('420.spt', """
[---]
msg = "Enhance your calm." if response.code == 420 else "Ok."
[---] text/plain
%(msg)s"""))
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(420)
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 420
assert response.body == 'Enhance your calm.'
def test_delegate_error_to_simplate_respects_original_accept_header(harness):
harness.fs.project.mk(('error.spt', """[---]
[---] text/fake
Lorem ipsum
[---] text/html
<p>Error</p>
[---] text/plain
Error
"""))
harness.fs.www.mk(('foo.spt',"""
from aspen import Response
[---]
raise Response(404)
[---] text/plain
"""))
response = harness.client.GET('/foo', raise_immediately=False, HTTP_ACCEPT=b'text/fake')
assert response.code == 404
assert 'text/fake' in response.headers['Content-Type']
def test_default_error_spt_handles_text_html(harness):
harness.fs.www.mk(('foo.html.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.html', raise_immediately=False)
assert response.code == 404
assert 'text/html' in response.headers['Content-Type']
def test_default_error_spt_handles_application_json(harness):
harness.fs.www.mk(('foo.json.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.json', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'application/json'
assert response.body == '''\
{ "error_code": 404
, "error_message_short": "Not Found"
, "error_message_long": ""
}
'''
def test_default_error_spt_application_json_includes_msg_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('foo.json.spt',"""
from aspen import Response
[---]
raise Response(404, "Right, sooo...")
[---]
"""))
response = harness.client.GET('/foo.json', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'application/json'
assert response.body == '''\
{ "error_code": 404
, "error_message_short": "Not Found"
, "error_message_long": "Right, sooo..."
}
'''
def test_default_error_spt_falls_through_to_text_plain(harness):
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Not found, program!\n\n"
def test_default_error_spt_fall_through_includes_msg_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404, "Try again!")
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Not found, program!\nTry again!\n"
def test_custom_error_spt_without_text_plain_results_in_406(harness):
harness.fs.project.mk(('error.spt', """
[---]
[---] text/html
<h1>Oh no!</h1>
"""))
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 406
def test_custom_error_spt_with_text_plain_works(harness):
harness.fs.project.mk(('error.spt', """
[---]
[---] text/plain
Oh no!
"""))
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Oh no!\n"
def test_autoindex_response_is_404_by_default(harness):
harness.fs.www.mk(('README', "Greetings, program!"))
assert harness.client.GET(raise_immediately=False).code == 404
def test_autoindex_response_is_returned(harness):
harness.fs.www.mk(('README', "Greetings, program!"))
harness.client.website.list_directories = True
body = harness.client.GET(raise_immediately=False).body
assert 'README' in body
def test_resources_can_import_from_project_root(harness):
harness.fs.project.mk(('foo.py', 'bar = "baz"'))
harness.fs.www.mk(('index.html.spt', "from foo import bar\n[---]\n[---]\nGreetings, %(bar)s!"))
assert harness.client.GET(raise_immediately=False).body == "Greetings, baz!"
def test_non_500_response_exceptions_dont_get_folded_to_500(harness):
harness.fs.www.mk(('index.html.spt', '''
from aspen import Response
[---]
raise Response(400)
[---]
'''))
response = harness.client.GET(raise_immediately=False)
assert response.code == 400
def test_errors_show_tracebacks(harness):
harness.fs.www.mk(('index.html.spt', '''
from aspen import Response
[---]
website.show_tracebacks = 1
raise Response(400,1,2,3,4,5,6,7,8,9)
[---]
'''))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert 'Response(400,1,2,3,4,5,6,7,8,9)' in response.body
class TestMiddleware(object):
"""Simple WSGI middleware for testing."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
if environ['PATH_INFO'] == '/middleware':
start_response('200 OK', [('Content-Type', 'text/plain')])
return ['TestMiddleware']
return self.app(environ, start_response)
def build_environ(path):
"""Build WSGI environ for testing."""
return {
'REQUEST_METHOD': b'GET',
'PATH_INFO': path,
'QUERY_STRING': b'',
'SERVER_SOFTWARE': b'build_environ/1.0',
'SERVER_PROTOCOL': b'HTTP/1.1',
'wsgi.input': StringIO.StringIO()
}
def test_call_wraps_wsgi_middleware(client):
client.website.algorithm.default_short_circuit = False
client.website.wsgi_app = TestMiddleware(client.website.wsgi_app)
respond = [False, False]
def start_response_should_404(status, headers):
assert status.lower().strip() == '404 not found'
respond[0] = True
client.website(build_environ('/'), start_response_should_404)
assert respond[0]
def start_response_should_200(status, headers):
assert status.lower().strip() == '200 ok'
respond[1] = True
client.website(build_environ('/middleware'), start_response_should_200)
assert respond[1]
# redirect
def test_redirect_redirects(website):
assert raises(Response, website.redirect, '/').value.code == 302
def test_redirect_code_is_settable(website):
assert raises(Response, website.redirect, '/', code=8675309).value.code == 8675309
def test_redirect_permanent_is_301(website):
assert raises(Response, website.redirect, '/', permanent=True).value.code == 301
def test_redirect_without_website_base_url_is_fine(website):
assert raises(Response, website.redirect, '/').value.headers['Location'] == '/'
def test_redirect_honors_website_base_url(website):
website.base_url = 'foo'
assert raises(Response, website.redirect, '/').value.headers['Location'] == 'foo/'
def test_redirect_can_override_base_url_per_call(website):
website.base_url = 'foo'
assert raises(Response, website.redirect, '/', base_url='b').value.headers['Location'] == 'b/'
def test_redirect_declines_to_construct_bad_urls(website):
raised = raises(BadLocation, website.redirect, '../foo', base_url='http://www.example.com')
assert raised.value.body == 'Bad redirect location: http://www.example.com../foo'
def test_redirect_declines_to_construct_more_bad_urls(website):
raised = raises(BadLocation, website.redirect, 'http://www.example.org/foo',
base_url='http://www.example.com')
assert raised.value.body == 'Bad redirect location: '\
'http://www.example.comhttp://www.example.org/foo'
def test_redirect_will_construct_a_good_absolute_url(website):
response = raises(Response, website.redirect, '/foo', base_url='http://www.example.com').value
assert response.headers['Location'] == 'http://www.example.com/foo'
def test_redirect_will_allow_a_relative_path(website):
response = raises(Response, website.redirect, '../foo', base_url='').value
assert response.headers['Location'] == '../foo'
def test_redirect_will_allow_an_absolute_url(website):
response = raises(Response, website.redirect, 'http://www.example.org/foo', base_url='').value
assert response.headers['Location'] == 'http://www.example.org/foo'
def test_redirect_can_use_given_response(website):
response = Response(65, 'Greetings, program!', {'Location': 'A Town'})
response = raises(Response, website.redirect, '/flah', response=response).value
assert response.code == 302 # gets clobbered
assert response.headers['Location'] == '/flah' # gets clobbered
assert response.body == 'Greetings, program!' # not clobbered
# canonicalize_base_url
def test_canonicalize_base_url_canonicalizes_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.GxT()
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/'
def test_canonicalize_base_url_includes_path_and_qs_for_GET(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.GxT('/foo/bar?baz=buz')
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/foo/bar?baz=buz'
def test_canonicalize_base_url_redirects_to_homepage_for_POST(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.PxST('/foo/bar?baz=buz')
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/'
def test_canonicalize_base_url_allows_good_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://localhost')
response = harness.client.GET()
assert response.code == 200
assert response.body == 'Greetings, program!'
def test_canonicalize_base_url_is_noop_without_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website()
response = harness.client.GET()
assert response.code == 200
assert response.body == 'Greetings, program!'
| jaraco/aspen | tests/test_website.py | Python | mit | 14,886 |
###
# Copyright (c) 2013, Nicolas Coevoet
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class ChanRegTestCase(PluginTestCase):
plugins = ('ChanReg',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| ncoevoet/ChanReg | test.py | Python | mit | 1,737 |
from flask import render_template, flash, redirect, request, url_for, abort
from flask_login import login_user, logout_user, login_required, current_user
from . import loans
from forms import LoanApplicationForm, ApproveLoan, RepayLoan
from ..models import db
from ..models import Loan, User
from datetime import date
@loans.route('/new_loan', methods=['GET', 'POST'])
@login_required
def request_loan():
if not current_user.is_borrower:
abort(403)
elif not current_user.is_approved:
abort(404)
elif current_user.is_owing:
flash('You cannot request a new loan if your still due!')
return redirect(url_for('loans.view'))
elif current_user.has_requested_loan:
flash('You cannot request a new loan if your last loan hasnt been approved!')
return redirect(url_for('loans.view'))
else:
form = LoanApplicationForm()
if form.validate_on_submit():
loan = Loan(loan_amt=form.loan_amt.data,
user=current_user._get_current_object())
if loan.loan_amt > loan.user.max_credit_amt:
flash('You can only borrow to a maximum of %s' %
loan.user.max_credit_amt)
return redirect(url_for('loans.request_loan'))
loans.is_requested = True
loan.user.has_requested_loan = True
db.session.add(loan)
db.session.commit()
flash(
'Success.Your Loan Application has been submitted.View it below.')
return redirect(url_for('loans.view'))
return render_template('loans/request_loan.html',
form=form, title="New Loan")
@loans.route('/view_history')
@login_required
def view():
if not current_user.is_borrower:
abort(403)
if not current_user.is_approved:
abort(404)
else:
loans = (Loan.query
.filter(Loan.user_id == current_user.id)
.order_by(Loan.requested_on.desc())
).all()
return render_template('loans/view.html',
loans=loans, title="My Loan Reqests")
@loans.route('/view_payment_history')
@login_required
def view_payment_history():
if not current_user.is_borrower:
abort(403)
if not current_user.is_approved:
abort(404)
else:
loans = (Loan.query
.filter(Loan.user_id == current_user.id)
.order_by(Loan.requested_on.desc())
).all()
return render_template('loans/view-payments.html',
loans=loans, title="My Loan Reqests")
'''View for if the user is credit worthy and can now borrow'''
@loans.route('/repay/loan/<id>', methods=['GET', 'POST'])
@login_required
def repay_loan(id):
if not current_user.is_borrower:
abort(403)
loan = Loan.query.filter_by(id=id).first()
if loan is None:
abort(404)
if not loan.is_approved:
flash('You cannot repay a loan that hasnt been approved')
return redirect(url_for('loans.view'))
else:
form = RepayLoan()
if current_user.is_borrower and form.validate_on_submit():
loan.my_funds = form.my_funds.data
flash('Your payment has been received. Please wait while we confirm it.')
return redirect(url_for('loans.view'))
return render_template('loans/repay-loan.html', form=form, loan=loan)
@loans.route('/clear/loan/balance/<id>', methods=['GET', 'POST'])
@login_required
def clear_loan_balance(id):
if not current_user.is_borrower:
abort(403)
loan = Loan.query.filter_by(id=id).first()
if loan is None:
abort(404)
if not loan.is_approved:
flash('You cannot repay a loan that hasnt been approved')
return redirect(url_for('loans.view'))
form = RepayLoan()
if current_user.is_borrower and form.validate_on_submit():
loan.my_funds = form.my_funds.data
flash('Your payment has been received. Please wait while we confirm it.')
return redirect(url_for('loans.view'))
return render_template('loans/repay-loan.html', form=form, loan=loan)
| Kimanicodes/wananchi | app/loans/views.py | Python | mit | 4,187 |
import re
import click
from lancet.utils import hr
@click.command()
@click.argument("query", required=False)
@click.pass_obj
def projects(lancet, query):
"""List Harvest projects, optionally filtered with a regexp."""
projects = lancet.timer.projects()
if query:
regexp = re.compile(query, flags=re.IGNORECASE)
def match(project):
match = regexp.search(project["name"])
if match is None:
return False
project["match"] = match
return True
projects = (p for p in projects if match(p))
for project in sorted(projects, key=lambda p: p["name"].lower()):
name = project["name"]
if "match" in project:
m = project["match"]
s, e = m.start(), m.end()
match = click.style(name[s:e], fg="green")
name = name[:s] + match + name[e:]
click.echo(
"{:>9d} {} {}".format(
project["id"], click.style("‣", fg="yellow"), name
)
)
@click.command()
@click.argument("project_id", type=int)
@click.pass_obj
def tasks(lancet, project_id):
"""List Harvest tasks for the given project ID."""
for task in lancet.timer.tasks(project_id):
click.echo(
"{:>9d} {} {}".format(
task["id"], click.style("‣", fg="yellow"), task["name"]
)
)
| GaretJax/lancet | lancet/commands/harvest.py | Python | mit | 1,408 |
"""
VerseBot for Reddit
By Matthieu Grieger
Continued By Team VerseBot
response.py
Copyright (c) 2015 Matthieu Grieger (MIT License)
"""
MAXIMUM_MESSAGE_LENGTH = 4000
class Response:
""" Class that holds the properties and methods of a comment
response. """
def __init__(self, message, parser, link=None):
""" Initializes a Response object. """
self.verse_list = list()
self.message = message
self.parser = parser
self.response = ""
if link is not None:
self.link = link
else:
self.link = ''
def add_verse(self, verse):
""" Adds a verse to the verse list.
:param verse: Verse to add to the list of verses
"""
self.verse_list.append(verse)
def is_duplicate_verse(self, verse):
""" Checks the incoming verse against the verse list to make sure
it is not a duplicate.
:param verse: Verse to check duplicates for
"""
for v in self.verse_list:
if (v.book == verse.book and
v.chapter == verse.chapter and
v.verse == verse.verse and
v.translation == verse.translation):
return True
return False
def construct_message(self):
""" Constructs a message response. """
for verse in self.verse_list:
verse.get_contents()
if verse.contents is not None:
if verse.verse is not None:
self.response += ("[**%s %d:%s | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.verse, verse.translation_title,
verse.permalink))
else:
self.response += ("[**%s %d | %s**](%s)\n\n>"
% (verse.book, verse.chapter,
verse.translation_title,
verse.permalink))
self.response += verse.contents
self.response += "\n\n"
if self.response == "":
return None
else:
if self.exceeds_max_length():
self.response = self.generate_overflow_response()
# self.response += self.get_comment_footer()
return self.response
def exceeds_max_length(self):
""" Returns true if the current response exceeds the maximum comment
length, returns false otherwise. """
return len(self.response) > MAXIMUM_MESSAGE_LENGTH
def generate_overflow_response(self):
""" Constructs and generates an overflow comment whenever the comment
exceeds the character limit set by MAXIMUM_MESSAGE_LENGTH. Instead of
posting the contents of the verse(s) in the comment, it links to
webpages that contain the contents of the verse(s). """
comment = ("The contents of the verse(s) you quoted exceed the %d "
"character limit. Instead, here are links to the "
"verse(s)!\n\n" % MAXIMUM_MESSAGE_LENGTH)
for verse in self.verse_list:
if verse.translation == "JPS":
overflow_link = verse.permalink
else:
if verse.verse is not None:
overflow_link = ("https://www.biblegateway.com/passage/"
"?search=%s+%s:%s&version=%s"
% (verse.book, verse.chapter, verse.verse,
verse.translation))
else:
overflow_link = verse.permalink
if verse.verse is not None:
comment += ("- [%s %d:%s (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.verse,
verse.translation, overflow_link))
else:
comment += ("- [%s %d (%s)](%s)\n\n"
% (verse.book, verse.chapter, verse.translation,
overflow_link))
return comment
'''
def get_comment_footer(self):
""" Returns the footer for the comment. """
return ("\n***\n[^Code](https://github.com/Team-VerseBot/versebot) ^|"
" ^/r/VerseBot ^| [^Contact ^Devs](https://github.com/"
"Team-VerseBot/versebot/issues) ^|"
" [^Usage](https://github.com/Team-VerseBot/versebot/blob/"
"master/README.md) ^|"
" [^Changelog](https://github.com/Team-VerseBot/versebot/blob/"
"master/CHANGELOG.md) ^|"
" [^Stats](http://adamgrieger.com/versebot/) ^|"
" [^Set ^a ^Default ^Translation](http://adamgrieger.com/"
"versebot#defaults) \n\n"
"^All ^texts ^provided ^by [^BibleGateway]"
"(http://biblegateway.com) ^and [^Bible ^Hub]"
"(http://biblehub.com)^. \n\n"
" ^Mistake? ^%(user)s ^can [^edit](/message/compose/"
"?to=%(bot)s&subject=edit+request&message={%(link)s} "
"Please+enter+your+revised+verse+quotations+below+in+the+usual"
"+bracketed+syntax.)"
" ^or [^delete](/message/compose/?to=%(bot)s&subject=delete"
"+request&message={%(link)s} "
"This+action+cannot+be+reversed!) ^this ^comment."
% {"user": self.message.author, "bot": REDDIT_USERNAME,
"link": self.link})
'''
| Matthew-Arnold/slack-versebot | versebot/response.py | Python | mit | 5,650 |
#!/usr/bin/env python3
'''
Make a stream emit at the pace of a slower stream
Pros:
Introduce a delay between events in an otherwise rapid stream (like range)
Cons:
When the stream being delayed runs out of events to push, the zipped stream
will keep pushing events, defined with the lambda fn passed to the zip operation.
'''
from time import sleep
from rx import Observable
# Generate an interval sequece, firing once each second
interval = Observable.interval(1000)
# 5..10
numbers = Observable.from_(range(5, 11))
# Zip two streams together so it emits at the pace of the slowest stream
source = Observable.zip(
interval,
numbers,
# Because we only push the elements of the `numbers` stream,
# As soon as it runs out of events, it will keep sending empty
# events to the subscribers
lambda _, n: n
)
sub1 = source.subscribe(
lambda v : print("Value published to observer 1: {0}".format(v)),
lambda e : print("Error! {0}".format(e)),
lambda : print("Completed!")
)
sub2 = source.subscribe(
lambda v : print("Value published to observer 2: {0}".format(v)),
lambda e : print("Error! {0}".format(e)),
lambda : print("Completed!")
)
# As noted above, we have to dispose the subscriptions before the `numbers`
# streams runs out, or the program will get stuck listening to empty events
sleep(5)
sub1.dispose()
sub2.dispose()
# => Value published to observer 1: 5
# => Value published to observer 2: 5
# => Value published to observer 1: 6
# => Value published to observer 2: 6
# => Value published to observer 2: 7
# => Value published to observer 1: 7
# => Value published to observer 2: 8
# => Value published to observer 1: 8
| Pysellus/streaming-api-test | rx-tests/rx-stream-pacing.py | Python | mit | 1,709 |
from __future__ import absolute_import
# encoding: UTF-8
from six import iteritems
from collections import defaultdict
from json import dumps
from ..config import CONFIG
class MissedKeys(object):
""" Object append missed key"""
def __init__(self, client):
self.client = client
self.key_folder = defaultdict(set)
def append(self, key, source_path=None):
""" Add key to missed
Args:
key (Key): translation key
source_path (string, optional): source path under which key have to be registered
"""
source_path = CONFIG['default_source'] if source_path is None else source_path
self.key_folder[source_path].add(key)
def register(self, source_path):
self.key_folder[source_path] = set()
def prepare(self):
""" Dict repr of keys list """
ret = []
for source, keys in iteritems(self.key_folder):
ret.append({
'source': source,
'keys': [key.as_dict for key in keys]})
return ret
def submit(self, missed_keys):
""" Submit keys over API
Args:
missed keys
"""
return self.client.post('sources/register_keys', params={'source_keys': dumps(missed_keys), 'options': '{"realtime": true}'})
def submit_all(self):
""" Submit all missed keys to server """
if len(self.key_folder) == 0:
return
ret = self.submit(self.prepare())
self.key_folder = defaultdict(set)
return ret
| translationexchange/tml-python | tml/translation/missed.py | Python | mit | 1,569 |
# -*- coding: utf-8 -*-
# Scrapy settings for appstore project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'appstore'
SPIDER_MODULES = ['appstore.spiders']
NEWSPIDER_MODULE = 'appstore.spiders'
ITEM_PIPELINES = {
'appstore.pipelines.AppstorePipeline': 300,
}
DOWNLOAD_DELAY=5
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'appstore (+http://www.yourdomain.com)'
| brucexiejiaming/App_store | part1_crawler/appstore/appstore/settings.py | Python | mit | 579 |
#!/bin/python3
import sys
n = int(input().strip())
arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
total = sum(arr)
print(total)
| costincaraivan/hackerrank | algorithms/warmup/python3/simple_array_sum.py | Python | mit | 149 |
import math
import sys
sys.path.append('..')
import Analyse.AFX as AFX
class State:
def __init__(self):
self.SenShifterState = True
self.MoodStrength = 1.0
self.positive = 0.0
self.negative = 0.0
def Process(self, score):
if self.SenShifterState is True:
self.positive += score
else:
self.negative += score
def Clear(self):
self.SenShifterState = True
self.MoodStrength = 1.0
self.positive = 0.0
self.negative = 0.0
def ChangeMood(self,mood):
if mood.startswith('I'):
self.MoodStrength *= 2
if mood.startswith('D'):
self.MoodStrength /= 2
def returnScore(self):
score = self.positive - self.negative
score *= self.MoodStrength
return score
#calulating the score pf specific sentence
def CaculateASentence(Sentence):
S = State()
for word in Sentence:
tag = AFX.GetWord(word,'Tag')
#if the word has no orientation or it is a boring word, just ignore it
if tag == 0.0 or tag is "Bor":
continue
if tag is "Con":
S.Clear()
elif tag is "Neg":
#if there is a negative tagged here, change the state of Sentiment Shifter
S.SenShifterState = -S.SenShifterState
elif tag is "Inc" or tag is "Dow":
S.ChangeMood(tag)
else:
S.Process(tag)
return S.returnScore()
#caculating the score of the Document with specific rules
def Run(Data):
ScoreList = []
counter = 0
for Sen in Data:
if Sen != []:
if AFX.GetWord(Sen[0],'Tag') is "Con":
word = AFX.GetWord(Sen[0],'Word')
print Sen
print CaculateASentence(Sen)
++counter
pass
#Most people don't like rainy, even if I like the weather quite much.
| MyRookie/SentimentAnalyse | src/Algorithm/ScoreCaculating.py | Python | mit | 1,593 |
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="area.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "info"),
strict=kwargs.pop("strict", True),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/area/stream/_token.py | Python | mit | 537 |
from django.shortcuts import render
from django.views.generic import View
from django.http import HttpResponseBadRequest
class GreetingsView(View):
template_name = 'greeting.html'
def get(self, request):
gender = request.GET.get('gender', None)
last_name = request.GET.get('last_name', None)
context = {'last_name': last_name, 'male_greeting' : False}
if gender == 'm':
context['male_greeting'] = True
elif gender == 'f':
context['male_greeting'] = False
else:
return HttpResponseBadRequest('Gender is either m or f')
if last_name == None or last_name == '':
return HttpResponseBadRequest('Last name should be specified')
return render(request, self.template_name, context) | SergePhil/RSOI_2017 | WebAppOne/custom_views/GreetingsView.py | Python | mit | 800 |
print str(range(5,0,-3))[:5]
print len(range(5,0,-3))
print range(5,0,-3)[0]
print range(5,0,-3)[1]
print range(5,0,-3)[-1]
| ArcherSys/ArcherSys | skulpt/test/run/t152.py | Python | mit | 124 |
# # # # #
# MOVE THE NEWLY DOWNLOADED TAS / PR CMIP5 data from work desktop to /Shared
# # # # #
def move_new_dir( fn, output_dir ):
dirname, basename = os.path.split( fn )
elems = basename.split('.')[0].split( '_' )
variable, cmor_table, model, scenario, experiment, years = elems
new_dir = os.path.join( output_dir, model, scenario, variable )
try:
if not os.path.exists( new_dir ):
os.makedirs( new_dir )
except:
pass
return shutil.copy( fn, new_dir )
if __name__ == '__main__':
import os, glob, shutil
path = '/srv/synda/sdt/data'
output_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/raw_cmip5_tas_pr'
filelist = []
for root, subs, files in os.walk( path ):
if len( files ) > 0:
filelist = filelist + [ os.path.join( root, i ) for i in files if i.endswith( '.nc' ) ]
out = [ move_new_dir( fn, output_dir ) for fn in filelist ]
# # # # # # # #
# # CHECK FOR DUPLICATES and remove by hand. this is tedious.
# GFDL - OK
# CCSM4 - FIXED OK
# GISS-E2-R - OK
# IPSL - OK
# MRI - OK | ua-snap/downscale | snap_scripts/epscor_sc/move_raw_cmip5_tas_pr.py | Python | mit | 1,039 |