repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
DependencyWatcher/crawler
|
dependencywatcher/crawler/rubygems.py
|
Python
|
apache-2.0
| 1,359
| 0.003679
|
from dependencywatcher.crawler.detectors import Detector
import urllib2, json, logging, os
logger = logging.getLogger(__name__)
class RubyGemsDetector(Detector):
""" rubygems.org API based information detector """
url = "https://rubygems.org/api/v1/gems/%s.json"
auth = "af93e383246a774566bcf661f9c9f591"
def __init__(self, manifest):
self.json = None
super(RubyGemsDetector, self).__init__(manifest)
def get(self, library_name):
url = RubyGemsDetector.url % library_name
logger.debug("Opening URL: %s" % url)
request = urllib2.Request(url)
request.add_header("Authorization", RubyGemsDetector.auth)
return j
|
son.load(urllib2.urlopen(request))
def detect(self, what, options, result):
if self.json is None:
self.json = self.get(self.manifest["name"])
try:
if what == "url":
result[what] = self.normalize(what, self.json["homepage_uri"])
elif what == "license":
result[what] = self.normalize(what, ", ".join(self.json["licenses"]))
elif what in ["version"]:
result[what] = self.normalize(what
|
, self.json[what])
if what == "description":
result[what] = self.normalize(what, self.json["info"])
except KeyError:
pass
|
jnadro/pybgfx
|
pybgfx/bgfx.py
|
Python
|
bsd-2-clause
| 45,223
| 0.010548
|
'''
Python bindings for bgfx.
'''
__author__ = "Jason Nadro"
|
__copyright__ = "Copyright 2016, Jason Nadro"
__credits__ = ["Jason Nadro"]
__license__ = "BSD 2-clause"
__version__ = "0.0.1"
__maintainer__ = "Jason Nadro"
__email__ = ""
__status__ = "Development"
import ctypes
from ctypes import Structure, POINTER, cast, byref, CFUNCTYPE
from ctypes import c_bool, c_int, c_int8, c_int16, c_int32, c_int64, c_u
|
int8, c_uint16, c_uint32, c_uint64, c_float, c_char_p, c_void_p, c_size_t, c_char
import os
bgfx_dll_path = os.path.dirname(__file__) + "\\bgfx-shared-libRelease"
_bgfx = ctypes.CDLL(bgfx_dll_path)
enum_type = c_int
# bgfx_renderer_type
bgfx_renderer_type = enum_type
(
BGFX_RENDERER_TYPE_NOOP,
BGFX_RENDERER_TYPE_DIRECT3D9,
BGFX_RENDERER_TYPE_DIRECT3D11,
BGFX_RENDERER_TYPE_DIRECT3D12,
BGFX_RENDERER_TYPE_GNM,
BGFX_RENDERER_TYPE_METAL,
BGFX_RENDERER_TYPE_OPENGLES,
BGFX_RENDERER_TYPE_OPENGL,
BGFX_RENDERER_TYPE_VULKAN,
BGFX_RENDERER_TYPE_COUNT
) = map(bgfx_renderer_type,range(10))
# bgfx_access
bgfx_access = enum_type
(
BGFX_ACCESS_READ,
BGFX_ACCESS_WRITE,
BGFX_ACCESS_READWRITE,
BGFX_ACCESS_COUNT
) = map(bgfx_access, range(4))
# bgfx_attrib
bgfx_attrib = enum_type
(
BGFX_ATTRIB_POSITION,
BGFX_ATTRIB_NORMAL,
BGFX_ATTRIB_TANGENT,
BGFX_ATTRIB_BITANGENT,
BGFX_ATTRIB_COLOR0,
BGFX_ATTRIB_COLOR1,
BGFX_ATTRIB_COLOR2,
BGFX_ATTRIB_COLOR3,
BGFX_ATTRIB_INDICES,
BGFX_ATTRIB_WEIGHT,
BGFX_ATTRIB_TEXCOORD0,
BGFX_ATTRIB_TEXCOORD1,
BGFX_ATTRIB_TEXCOORD2,
BGFX_ATTRIB_TEXCOORD3,
BGFX_ATTRIB_TEXCOORD4,
BGFX_ATTRIB_TEXCOORD5,
BGFX_ATTRIB_TEXCOORD6,
BGFX_ATTRIB_TEXCOORD7,
BGFX_ATTRIB_COUNT
) = map(bgfx_attrib, range(19))
# bgfx_attrib_type
bgfx_attrib_type = enum_type
(
BGFX_ATTRIB_TYPE_UINT8,
BGFX_ATTRIB_TYPE_UINT10,
BGFX_ATTRIB_TYPE_INT16,
BGFX_ATTRIB_TYPE_HALF,
BGFX_ATTRIB_TYPE_FLOAT,
BGFX_ATTRIB_TYPE_COUNT
) = map(bgfx_attrib_type, range(6))
# bgfx_texture_format
bgfx_texture_format = enum_type
(
BGFX_TEXTURE_FORMAT_BC1,
BGFX_TEXTURE_FORMAT_BC2,
BGFX_TEXTURE_FORMAT_BC3,
BGFX_TEXTURE_FORMAT_BC4,
BGFX_TEXTURE_FORMAT_BC5,
BGFX_TEXTURE_FORMAT_BC6H,
BGFX_TEXTURE_FORMAT_BC7,
BGFX_TEXTURE_FORMAT_ETC1,
BGFX_TEXTURE_FORMAT_ETC2,
BGFX_TEXTURE_FORMAT_ETC2A,
BGFX_TEXTURE_FORMAT_ETC2A1,
BGFX_TEXTURE_FORMAT_PTC12,
BGFX_TEXTURE_FORMAT_PTC14,
BGFX_TEXTURE_FORMAT_PTC12A,
BGFX_TEXTURE_FORMAT_PTC14A,
BGFX_TEXTURE_FORMAT_PTC22,
BGFX_TEXTURE_FORMAT_PTC24,
BGFX_TEXTURE_FORMAT_ATC,
BGFX_TEXTURE_FORMAT_ATCE,
BGFX_TEXTURE_FORMAT_ATCI,
BGFX_TEXTURE_FORMAT_ASTC4x4,
BGFX_TEXTURE_FORMAT_ASTC5x5,
BGFX_TEXTURE_FORMAT_ASTC6x6,
BGFX_TEXTURE_FORMAT_ASTC8x5,
BGFX_TEXTURE_FORMAT_ASTC8x6,
BGFX_TEXTURE_FORMAT_ASTC10x5,
BGFX_TEXTURE_FORMAT_UNKNOWN,
BGFX_TEXTURE_FORMAT_R1,
BGFX_TEXTURE_FORMAT_A8,
BGFX_TEXTURE_FORMAT_R8,
BGFX_TEXTURE_FORMAT_R8I,
BGFX_TEXTURE_FORMAT_R8U,
BGFX_TEXTURE_FORMAT_R8S,
BGFX_TEXTURE_FORMAT_R16,
BGFX_TEXTURE_FORMAT_R16I,
BGFX_TEXTURE_FORMAT_R16U,
BGFX_TEXTURE_FORMAT_R16F,
BGFX_TEXTURE_FORMAT_R16S,
BGFX_TEXTURE_FORMAT_R32I,
BGFX_TEXTURE_FORMAT_R32U,
BGFX_TEXTURE_FORMAT_R32F,
BGFX_TEXTURE_FORMAT_RG8,
BGFX_TEXTURE_FORMAT_RG8I,
BGFX_TEXTURE_FORMAT_RG8U,
BGFX_TEXTURE_FORMAT_RG8S,
BGFX_TEXTURE_FORMAT_RG16,
BGFX_TEXTURE_FORMAT_RG16I,
BGFX_TEXTURE_FORMAT_RG16U,
BGFX_TEXTURE_FORMAT_RG16F,
BGFX_TEXTURE_FORMAT_RG16S,
BGFX_TEXTURE_FORMAT_RG32I,
BGFX_TEXTURE_FORMAT_RG32U,
BGFX_TEXTURE_FORMAT_RG32F,
BGFX_TEXTURE_FORMAT_RGB8,
BGFX_TEXTURE_FORMAT_RGB8I,
BGFX_TEXTURE_FORMAT_RGB8U,
BGFX_TEXTURE_FORMAT_RGB8S,
BGFX_TEXTURE_FORMAT_RGB9E5F,
BGFX_TEXTURE_FORMAT_BGRA8,
BGFX_TEXTURE_FORMAT_RGBA8,
BGFX_TEXTURE_FORMAT_RGBA8I,
BGFX_TEXTURE_FORMAT_RGBA8U,
BGFX_TEXTURE_FORMAT_RGBA8S,
BGFX_TEXTURE_FORMAT_RGBA16,
BGFX_TEXTURE_FORMAT_RGBA16I,
BGFX_TEXTURE_FORMAT_RGBA16U,
BGFX_TEXTURE_FORMAT_RGBA16F,
BGFX_TEXTURE_FORMAT_RGBA16S,
BGFX_TEXTURE_FORMAT_RGBA32I,
BGFX_TEXTURE_FORMAT_RGBA32U,
BGFX_TEXTURE_FORMAT_RGBA32F,
BGFX_TEXTURE_FORMAT_R5G6B5,
BGFX_TEXTURE_FORMAT_RGBA4,
BGFX_TEXTURE_FORMAT_RGB5A1,
BGFX_TEXTURE_FORMAT_RGB10A2,
BGFX_TEXTURE_FORMAT_RG11B10F,
BGFX_TEXTURE_FORMAT_UNKNOWN_DEPTH,
BGFX_TEXTURE_FORMAT_D16,
BGFX_TEXTURE_FORMAT_D24,
BGFX_TEXTURE_FORMAT_D24S8,
BGFX_TEXTURE_FORMAT_D32,
BGFX_TEXTURE_FORMAT_D16F,
BGFX_TEXTURE_FORMAT_D24F,
BGFX_TEXTURE_FORMAT_D32F,
BGFX_TEXTURE_FORMAT_D0S8,
BGFX_TEXTURE_FORMAT_COUNT
) = map(bgfx_texture_format, range(86))
# bgfx_uniform_type
bgfx_uniform_type = enum_type
(
BGFX_UNIFORM_TYPE_SAMPLER,
BGFX_UNIFORM_TYPE_END,
BGFX_UNIFORM_TYPE_VEC4,
BGFX_UNIFORM_TYPE_MAT3,
BGFX_UNIFORM_TYPE_MAT4,
BGFX_UNIFORM_TYPE_COUNT
) = map(bgfx_uniform_type, range(6))
# backbuffer_ratio
backbuffer_ratio = enum_type
(
BGFX_BACKBUFFER_RATIO_EQUAL,
BGFX_BACKBUFFER_RATIO_HALF,
BGFX_BACKBUFFER_RATIO_QUARTER,
BGFX_BACKBUFFER_RATIO_EIGHTH,
BGFX_BACKBUFFER_RATIO_SIXTEENTH,
BGFX_BACKBUFFER_RATIO_DOUBLE,
BGFX_BACKBUFFER_RATIO_COUNT
) = map(backbuffer_ratio, range(7))
# occlusion_query_result
occlusion_query_result = enum_type
(
BGFX_OCCLUSION_QUERY_RESULT_INVISIBLE,
BGFX_OCCLUSION_QUERY_RESULT_VISIBLE,
BGFX_OCCLUSION_QUERY_RESULT_NORESULT,
BGFX_OCCLUSION_QUERY_RESULT_COUNT
) = map(occlusion_query_result, range(4))
# topology
topology = enum_type
(
BGFX_TOPOLOGY_TRI_LIST,
BGFX_TOPOLOGY_TRI_STRIP,
BGFX_TOPOLOGY_LINE_LIST,
BGFX_TOPOLOGY_LINE_STRIP,
BGFX_TOPOLOGY_POINT_LIST,
BGFX_TOPOLOGY_COUNT
) = map(topology, range(6))
# topology_convert
topology_convert = enum_type
(
BGFX_TOPOLOGY_CONVERT_TRI_LIST_FLIP_WINDING,
BGFX_TOPOLOGY_CONVERT_TRI_STRIP_FLIP_WINDING,
BGFX_TOPOLOGY_CONVERT_TRI_LIST_TO_LINE_LIST,
BGFX_TOPOLOGY_CONVERT_TRI_STRIP_TO_TRI_LIST,
BGFX_TOPOLOGY_CONVERT_LINE_STRIP_TO_LINE_LIST,
BGFX_TOPOLOGY_CONVERT_COUNT
) = map(topology_convert, range(6))
# topology_sort
topology_sort = enum_type
(
BGFX_TOPOLOGY_SORT_DIRECTION_FRONT_TO_BACK_MIN,
BGFX_TOPOLOGY_SORT_DIRECTION_FRONT_TO_BACK_AVG,
BGFX_TOPOLOGY_SORT_DIRECTION_FRONT_TO_BACK_MAX,
BGFX_TOPOLOGY_SORT_DIRECTION_BACK_TO_FRONT_MIN,
BGFX_TOPOLOGY_SORT_DIRECTION_BACK_TO_FRONT_AVG,
BGFX_TOPOLOGY_SORT_DIRECTION_BACK_TO_FRONT_MAX,
BGFX_TOPOLOGY_SORT_DISTANCE_FRONT_TO_BACK_MIN,
BGFX_TOPOLOGY_SORT_DISTANCE_FRONT_TO_BACK_AVG,
BGFX_TOPOLOGY_SORT_DISTANCE_FRONT_TO_BACK_MAX,
BGFX_TOPOLOGY_SORT_DISTANCE_BACK_TO_FRONT_MIN,
BGFX_TOPOLOGY_SORT_DISTANCE_BACK_TO_FRONT_AVG,
BGFX_TOPOLOGY_SORT_DISTANCE_BACK_TO_FRONT_MAX,
BGFX_TOPOLOGY_SORT_COUNT
) = map(topology_sort, range(13))
# view_mode
view_mode = enum_type
(
BGFX_VIEW_MODE_DEFAULT,
BGFX_VIEW_MODE_SEQUENTIAL,
BGFX_VIEW_MODE_DEPTH_ASCENDING,
BGFX_VIEW_MODE_DEPTH_DESCENDING,
BGFX_VIEW_MODE_CCOUNT
) = map(view_mode, range(5))
BGFX_PCI_ID_NONE = 0x0000
BGFX_PCI_ID_SOFTWARE_RASTERIZER = 0x0001
BGFX_PCI_ID_AMD = 0x1002
BGFX_PCI_ID_INTEL = 0x8086
BGFX_PCI_ID_NVIDIA = 0x10de
BGFX_RESET_NONE = 0x00000000 # //!< No reset flags.
BGFX_RESET_FULLSCREEN = 0x00000001 # //!< Not supported yet.
BGFX_RESET_FULLSCREEN_SHIFT = 0 # //!< Fullscreen bit shift.
BGFX_RESET_FULLSCREEN_MASK = 0x00000001 # //!< Fullscreen bit mask.
BGFX_RESET_MSAA_X2 = 0x00000010 # //!< Enable 2x MSAA.
BGFX_RESET_MSAA_X4 = 0x00000020 # //!< Enable 4x MSAA.
BGFX_RESET_MSAA_X8 = 0x00000030 # //!< Enable 8x MSAA.
BGFX_RESET_MSAA_X16 = 0x00000040 # //!< Enable 16x MSAA.
BGFX_RESET_MSAA_SHIFT = 4 # //!< MSAA mode bit shift.
BGFX_RESET_MSAA_MASK = 0x00000070 # //!< MSAA mode bit mask.
BGFX_RESET_VSYNC = 0x00000080 # //!< Enable V-Sync.
BGFX_RESET_MAXANISOTROPY = 0x00000100 # //!< Turn on/off max anisotropy.
BGFX_RESET_CAPTURE = 0x00000200 # //!< Begin screen capture.
BGFX_RESET_HMD = 0x00000400 # //!< HMD stereo
|
lord63/flask_toolbox
|
flask_toolbox/views/package.py
|
Python
|
mit
| 2,151
| 0.002789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask import Blueprint, render_template, Markup, url_for
from flask_toolbox.models import Package
package_page = Blueprint('package_page', __name__,
template_folder='templates')
@package_page.route('/packages')
def index():
packages = Package.query.order_by(Package.name).filter(Package.category_id != None).all()
sidebar_title = "All the packages"
package_list = [package.name for package in packages]
print(len(package_list))
return render_template(
'packages.html', packages=packages,
sidebar_title=sidebar_title, package_list=package_list)
@package_page.route('/packages/<package>')
def show(package):
the_package = Package.query.filter_by(name=package).first_or_404()
category = the_package.category
related_packages = [item.name for item in category.packages.order_by(Package.score.desc()).all()
if item.name != package]
sidebar_title = (
Markup("Other related packages in the <a href='{0}'>{1}</a> category".format(
url_for('category_page.show', category=category.name),
category.name
))
)
return render_template(
'package.html', package=the_package,
related_packages=related_packages, sidebar_title=sidebar_title)
@package_page.route('/packages/<package>/score')
def score(package):
flask = Package.query.filter_by(name="Flask").first()
the_package = Package.query.filter_by(name=package).first_or_404()
category = the_package.category
related_packa
|
ges = [item.name for item in category.packages.order_by(Package.score.desc()).all()
if item.name != package]
sidebar_title = (
M
|
arkup("Other related packages in the <a href='{0}'>{1}</a> category".format(
url_for('category_page.index', category=category.name),
category.name
))
)
return render_template(
'score.html', package=the_package, flask=flask,
related_packages=related_packages, sidebar_title=sidebar_title)
|
nwjs/chromium.src
|
third_party/blink/web_tests/external/wpt/webdriver/tests/support/fixtures.py
|
Python
|
bsd-3-clause
| 7,196
| 0.001112
|
import copy
import json
import os
import asyncio
import pytest
import webdriver
from urllib.parse import urlunsplit
from tests.support import defaults
from tests.support.helpers import cleanup_session, deep_update
from tests.support.inline import build_inline
from tests.support.http_request import HTTPRequest
# The webdriver session can outlive a pytest session
_current_session = None
# The event loop needs to outlive the webdriver session
_event_loop = None
_custom_session = False
def pytest_configure(config):
# register the capabilities marker
config.addinivalue_line(
"markers",
"capabilities: mark test to use capabilities"
)
@pytest.fixture
def capabilities():
"""Default capabilities to use for a new WebDriver session."""
return {}
def pytest_generate_tests(metafunc):
if "capabilities" in metafunc.fixturenames:
marker = metafunc.definition.get_closest_marker(name="capabilities")
if marker:
|
metafunc.parametrize("capabilities", marker.args, ids=None)
@pytest.fixture(scope="session")
def event_loop():
"""Change event_loop fixture to global."""
global _event_loop
if _event_loop is None:
_event_loop = asyncio.get_event_loop_policy().new_event_loop()
return _event_loop
@pytest.fixture
def http(configuration):
return HTTPRequest(configuration["host"], configuration["port"])
@pytest.fixture
def serve
|
r_config():
with open(os.environ.get("WD_SERVER_CONFIG_FILE"), "r") as f:
return json.load(f)
@pytest.fixture(scope="session")
def configuration():
host = os.environ.get("WD_HOST", defaults.DRIVER_HOST)
port = int(os.environ.get("WD_PORT", str(defaults.DRIVER_PORT)))
capabilities = json.loads(os.environ.get("WD_CAPABILITIES", "{}"))
return {
"host": host,
"port": port,
"capabilities": capabilities
}
async def reset_current_session_if_necessary(caps):
global _current_session
# If there is a session with different requested capabilities active than
# the one we would like to create, end it now.
if _current_session is not None:
if not _current_session.match(caps):
is_bidi = isinstance(_current_session, webdriver.BidiSession)
if is_bidi:
await _current_session.end()
else:
_current_session.end()
_current_session = None
@pytest.fixture(scope="function")
async def session(capabilities, configuration):
"""Create and start a session for a test that does not itself test session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test.
"""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps)
_current_session.start()
# Enforce a fixed default window size and position
if _current_session.capabilities.get("setWindowRect"):
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session
cleanup_session(_current_session)
@pytest.fixture(scope="function")
async def bidi_session(capabilities, configuration):
"""Create and start a bidi session.
Can be used for a test that does not itself test bidi session creation.
By default the session will stay open after each test, but we always try to start a
new one and assume that if that fails there is already a valid session. This makes it
possible to recover from some errors that might leave the session in a bad state, but
does not demand that we start a new session per test.
"""
global _current_session
# Update configuration capabilities with custom ones from the
# capabilities fixture, which can be set by tests
caps = copy.deepcopy(configuration["capabilities"])
caps.update({"webSocketUrl": True})
deep_update(caps, capabilities)
caps = {"alwaysMatch": caps}
await reset_current_session_if_necessary(caps)
if _current_session is None:
_current_session = webdriver.Session(
configuration["host"],
configuration["port"],
capabilities=caps,
enable_bidi=True)
_current_session.start()
await _current_session.bidi_session.start()
# Enforce a fixed default window size and position
if _current_session.capabilities.get("setWindowRect"):
_current_session.window.size = defaults.WINDOW_SIZE
_current_session.window.position = defaults.WINDOW_POSITION
yield _current_session.bidi_session
await _current_session.bidi_session.end()
cleanup_session(_current_session)
@pytest.fixture(scope="function")
def current_session():
return _current_session
@pytest.fixture
def url(server_config):
def url(path, protocol="http", domain="", subdomain="", query="", fragment=""):
domain = server_config["domains"][domain][subdomain]
port = server_config["ports"][protocol][0]
host = "{0}:{1}".format(domain, port)
return urlunsplit((protocol, host, path, query, fragment))
return url
@pytest.fixture
def inline(url):
"""Take a source extract and produces well-formed documents.
Based on the desired document type, the extract is embedded with
predefined boilerplate in order to produce well-formed documents.
The media type and character set may also be individually configured.
This helper function originally used data URLs, but since these
are not universally supported (or indeed standardised!) across
browsers, it now delegates the serving of the document to wptserve.
This file also acts as a wptserve handler (see the main function
below) which configures the HTTP response using query parameters.
This function returns a URL to the wptserve handler, which in turn
will serve an HTTP response with the requested source extract
inlined in a well-formed document, and the Content-Type header
optionally configured using the desired media type and character set.
Any additional keyword arguments are passed on to the build_url
function, which comes from the url fixture.
"""
def inline(src, **kwargs):
return build_inline(url, src, **kwargs)
return inline
@pytest.fixture
def iframe(inline):
"""Inline document extract as the source document of an <iframe>."""
def iframe(src, **kwargs):
return "<iframe src='{}'></iframe>".format(inline(src, **kwargs))
return iframe
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/theano/tensor/tests/test_blas.py
|
Python
|
agpl-3.0
| 87,605
| 0.00145
|
from __future__ import absolute_import, print_function, division
from copy import copy
from itertools import product as itertools_product
from unittest import TestCase
import numpy
from numpy import (arange, array, common_type, complex64, complex128, float32,
float64, newaxis, shape, transpose, zeros)
from numpy.testing import assert_array_almost_equal
from six.moves import xrange
import theano
import theano.tensor as T
from theano import tensor, In, shared, config
from theano.compat import exc_message
from theano.printing import pp
from theano.tensor.blas import (_dot22, _dot22scalar, res_is_a, _as_scalar,
_is_real_matrix, _gemm_canonicalize,
_factor_canonicalized, Gemm, Gemv,
gemm_inplace, gemm_no_inplace,
InconsistencyError, Ger, ger, ger_destructive)
from theano.tests import unittest_tools
from .test_basic import (as_tensor_variable, inplace_func,
compile, inplace)
import theano.tensor.blas_scipy
from theano.tests.unittest_tools import attr
if config.mode == 'FAST_COMPILE':
mode_not_fast_compile = 'FAST_RUN'
else:
mode_not_fast_compile = config.mode
mode_blas_opt = theano.compile.get_default_mode().including(
'BlasOpt', 'specialize', 'InplaceBlasOpt')
mode_blas_opt = mode_bla
|
s_opt.excluding('c_blas')
def test_dot_eq():
assert T.Dot() == T.Dot()
def sharedX(x, name):
return theano.shared(numpy.asarray(x, config.floatX), name=name)
class t_gemm(TestCase):
"""This test suite is supposed to establish that gemm works as it
is supposed to.
"""
def setUp(self):
unittest_tools.seed_rng()
Gemm.debug = Fals
|
e
@staticmethod
def _gemm(z, a, x, y, b):
assert a.shape == ()
assert b.shape == ()
return b * z + a * numpy.dot(x, y)
@staticmethod
def rand(*args):
return numpy.random.rand(*args)
def cmp(self, z_, a_, x_, y_, b_):
for dtype in ['float32', 'float64', 'complex64', 'complex128']:
z = numpy.asarray(z_, dtype=dtype)
a = numpy.asarray(a_, dtype=dtype)
x = numpy.asarray(x_, dtype=dtype)
y = numpy.asarray(y_, dtype=dtype)
b = numpy.asarray(b_, dtype=dtype)
def cmp_linker(z, a, x, y, b, l):
z, a, x, y, b = [numpy.asarray(p) for p in (z, a, x, y, b)]
z_orig = z.copy()
tz, ta, tx, ty, tb = [as_tensor_variable(p).type()
for p in (z, a, x, y, b)]
f = inplace_func([tz, ta, tx, ty, tb],
gemm_inplace(tz, ta, tx, ty, tb),
mode=compile.Mode(optimizer=None, linker=l))
new_z = f(z, a, x, y, b)
z_after = self._gemm(z_orig, a, x, y, b)
# print z_orig, z_after, z, type(z_orig), type(z_after), type(z)
unittest_tools.assert_allclose(z_after, z)
if a == 0.0 and b == 1.0:
return
elif z_orig.size == 0:
self.assertTrue(z.size == 0)
else:
self.assertFalse(numpy.all(z_orig == z))
cmp_linker(copy(z), a, x, y, b, 'c|py')
cmp_linker(copy(z), a, x, y, b, 'py')
if (not dtype.startswith("complex")
and theano.config.cxx):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
cmp_linker(copy(z), a, x, y, b, 'c')
def test0a(self):
Gemm.debug = True
try:
g = gemm_inplace([1.], 1., [1.], [1.], 1.)
except TypeError as e:
if exc_message(e) is Gemm.E_rank:
return
self.fail()
def test0(self):
try:
self.cmp(1., 0., 1.0, 1.0, 1.0)
except TypeError as e:
if exc_message(e) is Gemm.E_rank:
return
self.fail()
def test2(self):
try:
self.cmp(2., 1.0, [3, 2, 1.], [[1], [2], [3.]], 1.0)
except TypeError as e:
self.assertTrue(exc_message(e) == Gemm.E_rank)
return
self.fail()
def test4(self):
self.cmp(self.rand(3, 4), 1.0, self.rand(3, 5), self.rand(5, 4), 0.0)
def test5(self):
self.cmp(self.rand(3, 4), 1.0,
self.rand(3, 5), self.rand(5, 4), 1.0)
def test6(self):
self.cmp(self.rand(3, 4), 1.0,
self.rand(3, 5), self.rand(5, 4), -1.0)
def test7(self):
self.cmp(self.rand(3, 4), 0.0,
self.rand(3, 5), self.rand(5, 4), 0.0)
def test8(self):
self.cmp(self.rand(3, 4), 0.0,
self.rand(3, 5), self.rand(5, 4), 0.6)
def test9(self):
self.cmp(self.rand(3, 4), 0.0,
self.rand(3, 5), self.rand(5, 4), -1.0)
def test10(self):
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 5), self.rand(5, 4), 0.0)
def test11(self):
self.cmp(self.rand(3, 4), -1.0,
self.rand(3, 5), self.rand(5, 4), 1.0)
def test12(self):
self.cmp(self.rand(3, 4), -1.0,
self.rand(3, 5), self.rand(5, 4), -1.0)
def test_shape_0(self):
self.cmp(self.rand(0, 4), -1.0, self.rand(0, 5), self.rand(5, 4), -1.0)
self.cmp(self.rand(3, 0), -1.0, self.rand(3, 5), self.rand(5, 0), -1.0)
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 0), self.rand(0, 4), -1.0)
self.cmp(self.rand(0, 0), -1.0, self.rand(0, 5), self.rand(5, 0), -1.0)
self.cmp(self.rand(0, 0), -1.0, self.rand(0, 0), self.rand(0, 0), -1.0)
def test_factorised_scalar(self):
a = T.matrix()
b = T.matrix()
c = T.matrix()
s = theano.shared(numpy.zeros((5, 5)).astype(config.floatX))
lr1 = T.constant(0.01).astype(config.floatX)
lr2 = T.constant(2).astype(config.floatX)
l2_reg = T.constant(0.0001).astype(config.floatX)
# test constant merge with gemm
f = theano.function([a, b], updates=[(s, lr1 * T.dot(a, b) +
l2_reg * lr2 * s)],
mode=mode_not_fast_compile).maker.fgraph.toposort()
#[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# 2e-06)]
assert len(f) == 1
assert f[0].op == gemm_inplace
# test factored scalar with merge
f = theano.function([a, b], updates=[(s, lr1 * (T.dot(a, b) -
l2_reg * s))],
mode=mode_not_fast_compile).maker.fgraph.toposort()
#[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# -2e-06)]
assert len(f) == 1
assert f[0].op == gemm_inplace
# test factored scalar with merge and neg
f = theano.function([a, b],
updates=[(s, s - lr1 * (s * .0002 + T.dot(a, b)))],
mode=mode_not_fast_compile).maker.fgraph.toposort()
#[Gemm{inplace}(<TensorType(float64, matrix)>, -0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# 0.999998)]
assert len(f) == 1
assert f[0].op == gemm_inplace
def test_destroy_map0(self):
"""test that only first input can be overwritten"""
Z = as_tensor_variable(self.rand(2, 2))
try:
gemm_inplace(Z, 1.0, Z, Z, 1.0)
except InconsistencyError as e:
if exc_message(e) == Gemm.E_z_uniq:
return
self.fail()
def test_destroy_map1(self):
"""test that only first input can be overwritten"""
Z = as_tensor_variable(self.rand(2, 2))
A = as_tensor_variable(self.rand(2, 2))
try:
gemm_inplace(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0)
|
mattclark/osf.io
|
conftest.py
|
Python
|
apache-2.0
| 3,405
| 0.000881
|
from __future__ import print_function
import logging
import mock
import pytest
from faker import Factory
from website import settings as website_settings
from framework.celery_tasks import app as celery_app
logger = logging.getLogger(__name__)
# Silence some 3rd-party logging and some "loud" internal loggers
SILENT_LOGGERS = [
'api.caching.tasks',
'factory.generate',
'factory.containers',
'framework.analytics',
'framework.auth.core',
'website.app',
'website.archiver.tasks',
'website.mails',
'website.notifications.listeners',
'website.search.elastic_search',
'website.search_migration.migrate',
'website.util.paths',
'requests_oauthlib.oauth2_session',
'raven.base.Client',
'raven.contrib.django.client.DjangoClient',
'transitions.core',
'MARKDOWN',
'elasticsearch',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
@pytest.fixture(autouse=True)
def override_settings():
"""Override settings for the test environment.
"""
# Make tasks run synchronously, and
|
make sure exceptions get propagated
celery_app.conf.update({
'task_always_eager': True,
'task_eager_propagates': True,
})
website_settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
# TODO: Remove if this is unused?
website_settings.BCRYPT_LOG_ROUNDS = 1
# Make sure we don't accidentally send any emails
website_settings.SENDGRID_API_KEY = Non
|
e
# Set this here instead of in SILENT_LOGGERS, in case developers
# call setLevel in local.py
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
@pytest.fixture()
def fake():
return Factory.create()
_MOCKS = {
'osf.models.user.new_bookmark_collection': {
'mark': 'enable_bookmark_creation',
'replacement': lambda *args, **kwargs: None,
},
'osf.models.user._create_quickfiles_project': {
'mark': 'enable_quickfiles_creation',
'replacement': lambda *args, **kwargs: None,
},
'framework.celery_tasks.handlers._enqueue_task': {
'mark': 'enable_enqueue_task',
'replacement': lambda *args, **kwargs: None,
},
'osf.models.base.BaseModel.full_clean': {
'mark': 'enable_implicit_clean',
'replacement': lambda *args, **kwargs: None,
},
'osf.models.base._check_blacklist': {
'mark': 'enable_blacklist_check',
'replacement': lambda *args, **kwargs: False,
},
'website.search.search.search_engine': {
'mark': 'enable_search',
'replacement': mock.MagicMock()
},
'website.search.elastic_search': {
'mark': 'enable_search',
'replacement': mock.MagicMock()
}
}
@pytest.fixture(autouse=True, scope='session')
def _test_speedups():
mocks = {}
for target, config in _MOCKS.items():
mocks[target] = mock.patch(target, config['replacement'])
mocks[target].start()
yield mocks
for patcher in mocks.values():
patcher.stop()
@pytest.fixture(autouse=True)
def _test_speedups_disable(request, settings, _test_speedups):
patchers = []
for target, config in _MOCKS.items():
if not request.node.get_marker(config['mark']):
continue
patchers.append(_test_speedups[target])
patchers[-1].stop()
yield
for patcher in patchers:
patcher.start()
|
Octoberr/swmcdh
|
cong/zijixieyige.py
|
Python
|
apache-2.0
| 496
| 0.014113
|
import urllib.request
import re
def getHtml(url):
page = urllib.request.urlopen(url)
html = page.read().decode('utf-8')
return html
def getImg
|
(html):
reg = r'src="(.+?\.jpg)" pic_ext'
imgre = re.compile(reg)
imglist = re.findall(imgre,html)
x = 0
for imgurl in imglist:
urllib.request.urlretrieve(imgurl,'pic/%s.jpg' % x)
x+=1
return imglist
html = getHtml("http://tieba.baidu.com/p/2460150866")
list=getImg(html)
for i in
|
list:
print(i)
|
csparpa/robograph
|
robograph/datamodel/tests/test_buffers.py
|
Python
|
apache-2.0
| 703
| 0
|
import time
from robograph.datamodel.nodes.lib import buffers
def test_buffer():
instance = buffers.Buffer()
assert instance.requirements == []
expected = dict(a=1, b=2, c=3)
instance.input(expected)
instance.set_output_label('any')
assert instance.output() == expected
def test_detlayed_buffer():
delay = 2.5
instance = buffers.DelayedBuffer(sec
|
onds=delay)
assert instance.requirements == ['seconds']
expected = dict(a=1, b=2, c=3)
in
|
stance.input(expected)
instance.set_output_label('any')
start_time = time.time()
result = instance.output()
end_time = time.time()
assert result == expected
assert end_time - start_time >= delay
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_operations.py
|
Python
|
mit
| 21,247
| 0.001459
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class PersonGroupOperations(object):
"""PersonGroupOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create(
self, person_group_id, name, user_data=None, recognition_model="recognition_01", custom_headers=None, raw=False, **operation_config):
"""Create a new person group with specified personGroupId, name,
user-provided userData and recognitionModel.
<br /> A person group is the container of the uploaded person data,
including face recognition features.
<br /> After creation, use [PersonGroup Person -
Create](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/create)
to add persons into the group, and then call [PersonGroup -
Train](https://docs.microsoft.com/rest/api/faceapi/persongroup/train)
to get this group ready for [Face -
Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify).
<br /> No image will be stored. Only the person's extracted face
features and userData will be stored on server until [PersonGroup
Person -
Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/delete)
or [PersonGroup -
Delete](https://docs.microsoft.com/rest/api/faceapi/persongroup/delete)
is called.
<br/>'recognitionModel' should be specified to associate with this
person group. The default value for 'recognitionModel' is
'recognition_01', if the latest model needed, please explicitly specify
the model you need in this parameter. New faces that are added to an
existing person group will use the recognition model that's already
associated with the collection. Existing face features in a person
group can't be updated to features extracted by anot
|
her version of
recognition model.
Person group quota:
* Free-tier subscription quota: 1,000 person groups. Each holds up to
1,000 persons.
* S0-tier subscription quota: 1,000,000 person groups. Each holds up to
10,000 persons.
* to handle larger scale face identification problem, please consider
using
[LargePersonGroup](https://docs.microsoft.com/rest/api/faceapi/largepersongroup).
|
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param recognition_model: Possible values include: 'recognition_01',
'recognition_02', 'recognition_03', 'recognition_04'
:type recognition_model: str or
~azure.cognitiveservices.vision.face.models.RecognitionModel
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
body = models.MetaDataContract(name=name, user_data=user_data, recognition_model=recognition_model)
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'MetaDataContract')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
create.metadata = {'url': '/persongroups/{personGroupId}'}
def delete(
self, person_group_id, custom_headers=None, raw=False, **operation_config):
"""Delete an existing person group. Persisted face features of all people
in the person group will also be deleted.
:param person_group_id: Id referencing a particular person group.
:type person_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'personGroupId': self._serialize.url("person_group_id", person_group_id, 'str', max_length=64, pattern=r'^[a-z0-9-_]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/persongroups/{personGroupId}'}
def get(
self, person_group_id, return_recognition_model=False, custom_headers=None, raw=False, **operation_config):
"""Retrieve person group name, userData and recognitionModel. To get
person information under this personGroup, use [PersonGroup Person -
L
|
chrismeyersfsu/ansible-modules-core
|
files/lineinfile.py
|
Python
|
gpl-3.0
| 15,603
| 0.001795
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Ahti Kitsik <ak@ahtik.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: lineinfile
author:
- "Daniel Hokka Zakrissoni (@dhozac)"
- "Ahti Kitsik (@ahtik)"
extends_documentation_fragment:
- files
- validate
short_description: Ensure a particular line is in a file, or replace an
existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in
a file only. See the M(replace) module if you want to change
multiple, similar lines or check M(blockinfile) if you want to insert/update/remove a block of lines in a file.
For other cases, see the M(copy) or M(template) modules.
version_added: "0.7"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: false
version_added: 1.7
description:
- The regular expression to look for in every line of the file. For
C(state=present), the pattern to replace if found; only the last line
found will be replaced. For C(state=absent), the pattern of the line
to remove. Uses Python reg
|
ular expressions; see
U(http://docs.python.org/2/library/re.html).
state:
required: false
choices: [ present, absent ]
default: "present"
aliases: []
description:
- Whether the line should b
|
e there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the
file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.1"
description:
- Used with C(state=present). If set, line can contain backreferences
(both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly;
C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
If the C(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted
after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
If specified regular expression has no matches, EOF will be used instead.
May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
version_added: "1.1"
description:
- Used with C(state=present). If specified, the line will be inserted
before the last match of specified regular expression. A value is
available; C(BOF) for inserting the line at the beginning of the file.
If specified regular expression has no matches, the line will be
inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. By default it will fail if the file
is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- lineinfile:
dest: /etc/selinux/config
regexp: '^SELINUX='
line: 'SELINUX=enforcing'
- lineinfile:
dest: /etc/sudoers
state: absent
regexp: '^%wheel'
- lineinfile:
dest: /etc/hosts
regexp: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
owner: root
group: root
mode: 0644
- lineinfile:
dest: /etc/httpd/conf/httpd.conf
regexp: '^Listen '
insertafter: '^#Listen '
line: 'Listen 8080'
- lineinfile:
dest: /etc/services
regexp: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
# Add a line to a file if it does not exist, without passing regexp
- lineinfile:
dest: /tmp/testfile
line: '192.168.1.99 foo.lab.net foo'
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- lineinfile: "
dest: /etc/sudoers
state: present
regexp: '^%wheel'
line: '%wheel ALL=(ALL) NOPASSWD: ALL'
- lineinfile:
dest: /opt/jboss-as/bin/standalone.conf
regexp: '^(.*)Xms(\d+)m(.*)$'
line: '\1Xms${xms}m\3'
backrefs: yes
# Validate the sudoers file before saving
- lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%ADMIN ALL='
line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
"""
import re
import os
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b
from ansible.module_utils._text import to_bytes, to_native
def write_changes(module, b_lines, dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.writelines(b_lines)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile,
to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message, diff):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False, diff=diff):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def present(module, dest, regexp, line, insertafter, insertbefore, create,
backup, backrefs):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
b_destpath = os.path.dirname(b_dest)
if not os.path.exists(b_destpath) and not module.check_mode:
os.makedirs(b_destpath)
b_lines = []
else:
f = open(b_dest, 'rb')
b_lines = f.readlines()
|
waldenilson/TerraLegal
|
project/tramitacao/restrito/relatorio.py
|
Python
|
gpl-2.0
| 111,754
| 0.016833
|
# -*- coding: UTF-8 -*-
from django.template.context import RequestContext
from project.tramitacao.models import Tbpecastecnicas, \
Tbprocessorural,Tbchecklistprocessobase, Tbprocessosanexos, Tbprocessobase,Tbprocessourbano, Tbcaixa, AuthUser, Tbmunicipio, Tbprocessoclausula, Tbpendencia, Tbetapa, Tbtransicao
from project.geoinformacao.models import TbparcelaGeo
from project.tramitacao.relatorio_base import relatorio_ods_base_header,\
relatorio_ods_base
from django.db.models import Q
from django.contrib.auth.decorators import permission_required
from django.http.response import HttpResponse
from odslib import ODS
from django.shortcuts import render_to_response
from django.db.models import Q
from project.livro.models import Tbtituloprocesso
import datetime
import urllib2
import json
def lista(request):
return render_to_response('sicop/relatorio/lista.html',{}, context_instance = RequestContext(request))
#PROCESSOS QUE TEM PECA TECNICA
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def processo_peca(request):
if request.method == "POST":
p_rural = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbprocessorural.objects.filter( tbprocessobase__tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
p_rural_com_peca = []
p_rural = consulta.order_by( request.POST['ordenacao'] )
for r in p_rural:
if Tbpecastecnicas.objects.filter( nrcpfrequerente = r.nrcpfrequerente.replace('.','').replace('-','') ):
p_rural_com_peca.append( r )
#GERACAO
nome_relatorio = "relatorio-processos-com-peca"
titulo_relatorio = "RELATORIO DOS PROCESSOS COM PECAS TECNICAS"
planilha_relatorio = "Processos com peca"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(p_rural_com_peca), ods)
# TITULOS DAS COLUNAS
sheet.getCell(0, 6).setAlignHorizontal('center').stringValue( 'Processo' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(1, 6).setAlignHorizontal('center').stringValue( 'Requerente' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(2, 6).setAlignHorizontal('center').stringValue( 'Contato' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(3, 6).setAlignHorizontal('center').stringValue( 'Endereco' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(4, 6).setAlignHorizontal('center').stringValue( 'Conjuge' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(5, 6).setAlignHorizontal('center').stringValue( 'CPF' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(6, 6).setAlignHorizontal('center').stringValue( 'Caixa' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(7, 6).setAlignHorizontal('center').stringValue( 'Municipio' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(8, 6).setAlignH
|
orizon
|
tal('center').stringValue( 'Gleba' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(9, 6).setAlignHorizontal('center').stringValue( 'Qtd. Pendencias' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(10, 6).setAlignHorizontal('center').stringValue( 'Pendentes' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getCell(11, 6).setAlignHorizontal('center').stringValue( 'Notificadas' ).setFontSize('14pt').setBold(True).setCellColor("#ccff99")
sheet.getRow(1).setHeight('20pt')
sheet.getRow(2).setHeight('20pt')
sheet.getRow(6).setHeight('20pt')
sheet.getColumn(0).setWidth("2in")
sheet.getColumn(1).setWidth("5in")
sheet.getColumn(2).setWidth("2.5in")
sheet.getColumn(3).setWidth("5in")
sheet.getColumn(4).setWidth("5in")
sheet.getColumn(5).setWidth("2in")
sheet.getColumn(6).setWidth("2.5in")
sheet.getColumn(7).setWidth("2.5in")
sheet.getColumn(8).setWidth("2.5in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(9).setWidth("2in")
sheet.getColumn(10).setWidth("2in")
sheet.getColumn(11).setWidth("2in")
#DADOS DA CONSULTA
x = 5
for obj in p_rural_com_peca:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nrprocesso)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.nmrequerente)
sheet.getCell(2, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmcontato)
sheet.getCell(3, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.nmendereco)
sheet.getCell(4, x+2).setAlignHorizontal('center').stringValue(obj.nmconjuge)
sheet.getCell(5, x+2).setAlignHorizontal('center').stringValue(obj.nrcpfrequerente)
sheet.getCell(6, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbcaixa.nmlocalarquivo)
sheet.getCell(7, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbmunicipio.nome_mun)
sheet.getCell(8, x+2).setAlignHorizontal('center').stringValue(obj.tbprocessobase.tbgleba.nmgleba)
# buscar todas as pendencias do processo, que nao estao sanadas
pendencias_pendente = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 2)
)
pendencias_notificado = Tbpendencia.objects.filter(
Q(tbprocessobase__id = obj.tbprocessobase.id, tbstatuspendencia__id = 3)
)
sheet.getCell(9, x+2).setAlignHorizontal('center').stringValue( len(pendencias_pendente) + len(pendencias_notificado) )
# buscando as descricoes das pendencias pendentes
desc_pendencias = ''
for pend in pendencias_pendente:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(10, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
# buscando as descricoes das pendencias notificadas
desc_pendencias = ''
for pend in pendencias_notificado:
desc_pendencias += pend.tbtipopendencia.dspendencia + ' : ' + pend.dsdescricao + ' | '
sheet.getCell(11, x+2).setAlignHorizontal('center').stringValue( desc_pendencias )
x += 1
#GERACAO DO DOCUMENTO
relatorio_ods_base(ods, planilha_relatorio)
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
return render_to_response('sicop/relatorio/processo_peca.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.relatorio_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def peca_processo(request):
if request.method == "POST":
pecas = []
#CONSULTA ORDENADA E/OU BASEADA EM FILTROS DE PESQUISA
consulta = Tbpecastecnicas.objects.filter( tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
pecas_com_proc = []
pecas = consulta.order_by( request.POST['ordenacao'] )
for p in pecas:
if len(Tbprocessorural.objects.filter( nrcpfrequerente = p.nrcpfrequerente )) > 0:
pecas_com_proc.append(p)
#GERACAO
nome_relatorio = "relatorio-pecas-com-processo"
titulo_relatorio = "RELATORIO DAS PECAS TECNICAS COM PROCESSO"
planilha_relatorio = "Pecas com processo"
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, len(pecas_com_proc), ods)
# TITU
|
baroquebobcat/pants
|
src/python/pants/goal/pantsd_stats.py
|
Python
|
apache-2.0
| 1,009
| 0.006938
|
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import
|
(absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
class PantsDaemonStats(object):
"""Tr
|
acks various stats about the daemon."""
def __init__(self):
self.target_root_size = 0
self.affected_targets_size = 0
self.affected_targets_file_count = 0
self.scheduler_metrics = {}
def set_scheduler_metrics(self, scheduler_metrics):
self.scheduler_metrics = scheduler_metrics
def set_target_root_size(self, size):
self.target_root_size = size
def set_affected_targets_size(self, size):
self.affected_targets_size = size
def get_all(self):
res = dict(self.scheduler_metrics)
res.update({
'target_root_size': self.target_root_size,
'affected_targets_size': self.affected_targets_size,
})
return res
|
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/mlperf_test.py
|
Python
|
apache-2.0
| 7,794
| 0.003977
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains tests related to MLPerf.
Note this test only passes if the MLPerf compliance library is installed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import logging
import re
import six
import tensorflow.compat.v1 as tf
import benchmark_cnn
import datasets
import mlperf
import test_util
from models import model
from mlperf_compliance import mlperf_log
class _MlPerfTestModel(model.CNNModel):
"""A model to test the MLPerf compliance logging on."""
def __init__(self):
super(_MlPerfTestModel, self).__init__(
'mlperf_test_model', image_size=224, batch_size=2, learning_rate=1)
def add_inference(self, cnn):
assert cnn.top_layer.shape[1:] == (3, 224, 224)
cnn.conv(1, 1, 1, 1, 1, use_batch_norm=True)
cnn.mpool(1, 1, 1, 1, num_channels_in=1)
cnn.reshape([-1, 224 * 224])
cnn.affine(1, activation=None)
# Assert that the batch norm variables are filtered out for L2 loss.
variables = tf.global_variables() + tf.local_variables()
assert len(variables) > len(self.filter_l2_loss_vars(variables))
class MlPerfComplianceTest(tf.test.TestCase):
"""Tests the MLPerf compliance logs.
This serves as a quick check that we probably didn't break the compliance
logging. It is not mean to be as comprehensive as the official MLPerf
compliance checker will be.
"""
def setUp(self):
super(MlPerfComplianceTest, self).setUp()
benchmark_cnn.setup(benchmark_cnn.make_params())
# Map between regex and the number of times we expect to see that regex in the
# logs. Entry commented out with the comment FIXME indicate that
# tf_cnn_benchmarks currently fails compliance in that regard, and needs to be
# fixed to be MLPerf compliant.
EXPECTED_LOG_REGEXES = {
# Preprocessing tags
mlperf.tags.INPUT_ORDER: 2, # 1 for training, 1 for eval
# We pass --tf_random_seed=9876 in the test.
r'%s: 9876' % mlperf.tags.RUN_SET_RANDOM_SEED: 2,
# The Numpy random seed is hardcoded to 4321.
r'%s: 4321' % mlperf.tags.RUN_SET_RANDOM_SEED: 2,
r'%s: %d' % (mlperf.tags.PREPROC_NUM_TRAIN_EXAMPLES,
datasets.IMAGENET_NUM_TRAIN_IMAGES): 1,
r'%s: %d' % (mlperf.tags.PREPROC_NUM_EVAL_EXAMPLES,
datasets.IMAGENET_NUM_VAL_IMAGES): 1,
mlperf.tags.PREPROC_NUM_EVAL_EXAMPLES + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_MIN_OBJ_COV + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_RATIO_RANGE + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_AREA_RANGE + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_MAX_ATTEMPTS + '.*': 1,
mlperf.tags.INPUT_RANDOM_FLIP + '.*': 1,
r'%s: \[224, 224\].*' % mlperf.tags.INPUT_CENTRAL_CROP: 1,
r'%s: \[123.68, 116.78, 103.94\].*' % mlperf.tags.INPUT_MEAN_SUBTRACTION:
2,
r'%s: {"min": 256}.*' % mlperf.tags.INPUT_RESIZE_ASPECT_PRESERVING: 1,
# 1 for training, 1 for eval
r'%s: \[224, 224\].*' % mlperf.tags.INPUT_RESIZE: 2,
# Resnet model tags
mlperf.tags.MODEL_HP_BATCH_NORM + '.*': 2,
# 2 for training, 2 for eval. Although there's only 1 conv2d, each conv2d
# produces 2 logs.
mlperf.tags.MODEL_HP_CONV2D_FIXED_PADDING + '.*': 4,
mlperf.tags.MODEL_HP_RELU + '.*': 2,
mlperf.tags.MODEL_HP_INITIAL_MAX_POOL + '.*': 2,
mlperf.tags.MODEL_HP_DENSE + '.*': 4,
mlperf.tags.MODEL_HP_DENSE + '.*': 4,
# Note that tags our test model does not emit, like MODEL_HP_SHORTCUT_ADD,
# are omitted here.
r'%s: "categorical_cross_entropy".*' % mlperf.tags.MODEL_HP_LOSS_FN: 1,
# 1 for training, 2 because the _MlPerfTestModel calls this when building
# the model for both training and eval
r'%s: true' % mlperf.tags.MODEL_EXCLUDE_BN_FROM_L2: 3,
r'%s: 0.5.*' % mlperf.tags.MODEL_L2_REGULARIZATION: 1,
# Note we do not handle OPT_LR, since that is printed to stderr using
# tf.Print, which we cannot easily intercept.
# Other tags
'%s: "%s"' % (mlperf.tags.OPT_NAME, mlperf.tags.SGD_WITH_MOMENTUM): 1,
'%s: 0.5' % mlperf.tags.OPT_MOMENTUM: 1,
mlperf.tags.RUN_START: 1,
'%s: 2' % mlperf.tags.INPUT_BATCH_SIZE: 1,
mlperf.tags.TRAIN_LOOP: 1,
mlperf.tags.TRAIN_EPOCH + '.*': 1,
'%s: 2' % mlperf.tags.INPUT_SIZE: 2,
mlperf.tags.EVAL_START: 2,
mlperf.tags.EVAL_STOP: 2,
'%s: 6' % mlperf.tags.EVAL_SIZE: 2,
mlperf.tags.EVAL_ACCURACY + '.*': 2,
'%s: 2.0' % mlperf.tags.EVAL_TARGET: 2,
|
mlperf.tags.RUN_STOP +
|
'.*': 1,
mlperf.tags.RUN_FINAL: 1
}
EXPECTED_LOG_REGEXES = Counter({re.compile(k): v for
k, v in EXPECTED_LOG_REGEXES.items()})
def testMlPerfCompliance(self):
string_io = six.StringIO()
handler = logging.StreamHandler(string_io)
data_dir = test_util.create_black_and_white_images()
try:
mlperf_log.LOGGER.addHandler(handler)
params = benchmark_cnn.make_params(data_dir=data_dir,
data_name='imagenet',
batch_size=2,
num_warmup_batches=0,
num_batches=2,
num_eval_batches=3,
eval_during_training_every_n_steps=1,
distortions=False,
weight_decay=0.5,
optimizer='momentum',
momentum=0.5,
stop_at_top_1_accuracy=2.0,
tf_random_seed=9876,
ml_perf=True)
with mlperf.mlperf_logger(use_mlperf_logger=True, model='resnet50_v1.5'):
bench_cnn = benchmark_cnn.BenchmarkCNN(params, model=_MlPerfTestModel())
bench_cnn.run()
logs = string_io.getvalue().splitlines()
log_regexes = Counter()
for log in logs:
for regex in self.EXPECTED_LOG_REGEXES:
if regex.search(log):
log_regexes[regex] += 1
if log_regexes != self.EXPECTED_LOG_REGEXES:
diff_counter = Counter(log_regexes)
diff_counter.subtract(self.EXPECTED_LOG_REGEXES)
differences = []
for regex in (k for k in diff_counter.keys() if diff_counter[k]):
found_count = log_regexes[regex]
expected_count = self.EXPECTED_LOG_REGEXES[regex]
differences.append(' For regex %s: Found %d lines matching but '
'expected to find %d' %
(regex.pattern, found_count, expected_count))
raise AssertionError('Logs did not match expected logs. Differences:\n'
'%s' % '\n'.join(differences))
finally:
mlperf_log.LOGGER.removeHandler(handler)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
turbofish/mcverify
|
config.py
|
Python
|
bsd-2-clause
| 772
| 0.009067
|
# vim: set fileencoding=utf-8 ts=4 sw=4 expandtab fdm=marker:
"""
Small wrapper around the python ConfigParser module.
"""
import ConfigParser
CONFIG = ConfigParser.ConfigParser()
DEFAULTS = {
'patterns': {
|
'path' : '(?P<artist>\w+) - (?P<year>\d+) - (?P<album>\w+)'
}
}
def get_param(section, name):
try:
param = CONFIG.get(section, name)
except ConfigParser.NoOp
|
tionError or ConfigParser.NoSectionError:
param = None
if not param:
# Do a default lookup
try:
param = DEFAULTS[section][name]
except KeyError:
# Parameter is not in defaults
LOG.error("Error: Parameter [%s][%s] does not exist", section, name)
param = ""
return param
|
aldmbmtl/FloatingTools
|
tools/utilities.py
|
Python
|
mit
| 10,347
| 0.002996
|
"""
Validate the dependencies are installed.
"""
from __future__ import print_function
__all__ = [
'installRequiredToolbox',
'downloadDependency',
'addExtensionPath',
'loadExtensions',
'installPackage',
'tokenRefresher',
'validateToken',
'checkInstall',
'updateToken',
'TokenError',
'userToken',
]
# python imports
import re
import os
import sys
import imp
import json
import urllib
import shutil
import inspect
import zipfile
import threading
import traceback
import subprocess
# Tools imports
import tools
from tools import HFX_PATH
TOKEN = None
PIP_CHECKED = False
INSTALL_CHECKED = False
INSTALLED = []
class TokenError(Exception):
def __init__(self, *args):
tools.TOOLS_LOGGER.error(args[0])
# install handling
def downloadDependency(url, saveAs=None):
"""
Download a file required for FT.
:param url:
:param saveAs:
"""
localPath = os.path.join(tools.PACKAGES, os.path.basename(url) if not saveAs else saveAs)
return urllib.urlretrieve(url, localPath)[0]
def installPackage(package, pipName=None, test=True):
"""
Install packages into FT from pip.
:param package: the name to import the package
:param pipName: in-case the pip install name is different from the module name.
"""
global PIP_CHECKED
# check if pip is installed. This is installed at the Python installs site-packages. Everything else is installed in
# the FloatingTools/packages directory.
executable = None
args = []
if tools.activeWrapper():
if tools.activeWrapper().ARGS:
args = tools.activeWrapper().ARGS
if tools.activeWrapper().EXECUTABLE:
executable = tools.activeWrapper().EXECUTABLE
if not executable:
# determine executable from the application wrapper
executable = os.path.abspath(sys.executable)
prefix = os.listdir(sys.exec_prefix)
prefixLower = [f.lower() for f in prefix]
for possible in ['python', 'python.exe']:
if possible in prefixLower:
executable = os.path.abspath(os.path.join(sys.exec_prefix, prefix[prefixLower.index(possible)]))
break
try:
import pip
except ImportError:
tools.TOOLS_LOGGER.info("Python executable (+args) for pip install: " + executable)
# install pip
downloadPath = downloadDependency("https://raw.githubusercontent.com/aldmbmtl/tools/master/get-pip.py")
with open(downloadPath, 'r') as pipDL:
code = pipDL.read()
code = code.replace('sys.exit(pip.main(["install", "--upgrade"] + args))',
'sys.exit(pip.main(["install", "pip", "-t", "%s"]))' % tools.PACKAGES)
with open(downloadPath, 'w') as pipDL:
pipDL.write(code)
command = [os.path.abspath(executable)] + args + [downloadPath]
# execute the python pip install call
subprocess.call(command)
# delete get-pip.py
os.unlink(downloadPath)
try:
import pip
except ImportError:
raise Exception('Pip is required for install.' %
os.path.abspath(tools.__file__ + '/../../'))
# Verify the the target package exists
try:
__import__(package)
except ImportError:
if not pipName:
pipName = package
command = [os.path.abspath(executable), os.path.dirname(pip.__file__), 'install', pipName, '-t', tools.PACKAGES, '--no-cache-dir']
tools.TOOLS_LOGGER.info('Installing: ' + pipName)
tools.TOOLS_LOGGER.info(command)
subprocess.call(command)
|
if test:
# verify install
__import__(package)
# handle token system
def updateToken(token):
"""
For internal use.
"""
with open(tools.HFX_TOKEN, 'w') as tokenFil
|
e:
tokenFile.write(token)
validateToken()
# relaunch the initialize process with the new token
tools.initialize()
def validateToken():
"""
Checks the token created.
"""
global TOKEN
import requests
response = requests.get(tools.buildCall('shed'), headers={'Authorization': str(TOKEN)}, verify=False)
if not TOKEN or response.status_code == 401:
if not os.path.exists(tools.HFX_TOKEN):
with open(tools.HFX_TOKEN, 'w') as tokenFile:
tokenFile.write('')
with open(tools.HFX_TOKEN, 'r') as tokenFile:
refreshToken = tokenFile.read()
tools.TOOLS_LOGGER.info('Validating Access...')
data = 'grant_type=refresh_token&client_id=2okiehdqupvt6icqil6nl255pg&refresh_token=' + refreshToken
refreshResponse = requests.post(
'https://dev-floating-tools.auth.us-west-2.amazoncognito.com/oauth2/token',
data=data,
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
if 'id_token' in refreshResponse.json():
tools.TOOLS_LOGGER.info('Access Granted...')
TOKEN = refreshResponse.json()['id_token']
return
raise TokenError('%s' % response.json()['message'])
def tokenRefresher():
try:
validateToken()
except TokenError:
if tools.activeWrapper():
tools.activeWrapper().updateToken()
else:
tools.Wrapper.updateToken()
def userToken():
"""
Grab the users token.
"""
global TOKEN
# check the token saved in memory
if not TOKEN:
tokenRefresher()
return TOKEN
# extension handling
def addExtensionPath(path):
"""
Add a custom extensions path for your scripts and modifications to FloatingTools.
:param path: str to a place on disk.
"""
if not os.path.exists(path):
tools.TOOLS_LOGGER.warning('Extension path passed does not exist: ' + path)
return
for f in os.listdir(path):
if f == 'hfx_init.py':
try:
imp.load_source('hfx_init', os.path.join(path, f))
except ImportError:
traceback.print_exc()
def loadExtensions():
if 'HFX_PATH' in os.environ:
path = os.environ['HFX_PATH']
addExtensionPath(path)
# pipeline installers
def checkInstall():
"""
Updates the existing install of the HFX pipeline.
"""
global INSTALL_CHECKED
if INSTALL_CHECKED or 'HFX_DEV':
return
tools.TOOLS_LOGGER.info('Running version check...')
targetVersion = os.environ['HFX_INSTALL_VERSION']
if targetVersion == 'latest':
targetVersion = max(eval(urllib.urlopen('https://api.hatfieldfx.com/ft/releases').read()))
INSTALL_CHECKED = True
currentVersion = 'unknown'
if os.path.exists(os.path.expanduser('~/.hfx/version')):
with open(os.path.expanduser('~/.hfx/version'), 'r') as versionFile:
currentVersion = versionFile.read()
tools.TOOLS_LOGGER.info('Installed version: ' + currentVersion)
if targetVersion != currentVersion:
tools.TOOLS_LOGGER.info('Updating install: %s => %s' % (currentVersion, targetVersion))
os.environ['HFX_INSTALL_VERSION'] = targetVersion
os.environ['HFX_UPDATE'] = '1'
exec urllib.urlopen('https://raw.githubusercontent.com/aldmbmtl/tools/master/installer.py').read()
# force reload FT
imp.reload(tools)
tools.TOOLS_LOGGER.info('Upgraded to: %s' % targetVersion)
INSTALL_CHECKED = True
def installRequiredToolbox(uid, service='Local_Path', **kwargs):
"""
Install a toolbox programmatically.
:param uid: name of the toolbox or uid number. MUST BE UNIQUE!!
:param service: name of the service
:param kwargs: fields required to install the toolbox
"""
global INSTALLED
service = str(service)
envVar = 'HFX-ENV-' + str(uid)
devMode = False
serviceName = 'Local_Path'
box = None
try:
toolboxConstruct = dict(service=service, **kwargs)
if toolboxConstruct in INSTALLED:
return
INSTALLED.append(toolboxConstruct)
if envVar in os.environ:
envValue = os.environ[envVar]
|
MAndelkovic/pybinding
|
pybinding/support/alias.py
|
Python
|
bsd-2-clause
| 5,869
| 0.001704
|
import numpy as np
from scipy.sparse import csr_matrix
class AliasArray(np.ndarray):
"""An ndarray with a mapping of values to user-friendly names -- see example
This ndarray subclass enables comparing sub_id and hop_id arrays directly with
their friendly string identifiers. The mapping parameter translates sublattice
or hopping names into their number IDs.
Only the `==` and `!=` operators are overloaded to handle the aliases.
Examples
--------
>>> a = AliasArray([0, 1, 0], mapping={"A": 0, "B": 1})
>>> list(a == 0)
[True, False, True]
>>> list(a == "A")
[True, False, True]
>>> list(a != "A")
[False, True, False]
>>> a = AliasArray([0, 1, 0, 2], mapping={"A|1": 0, "B": 1, "A|2": 2})
>>> list(a == "A")
[True, False, True, True]
>>> list(a != "A")
[False, True, False, False]
"""
def __new__(cls, array, mapping):
obj = np.asarray(array).view(cls)
obj.mapping = {SplitName(k): v for k, v in mapping.items()}
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.mapping = getattr(obj, "mapping", None)
def _mapped_eq(self, other):
if other in self.mapping:
return super().__eq__(self.mapping[other])
else:
result = np.zeros(len(self), dtype=np.bool)
for k, v in self.mapping.items():
if k == other:
result = np.logical_or(result, super().__eq__(v))
return result
def __eq__(self, other):
if isinstance(other, str):
return self._mapped_eq(other)
else:
return super().__eq__(other)
def __ne__(self, other):
if isinstance(other, str):
return np.logical_not(self._mapped_eq(other))
else:
return super().__ne__(other)
# noinspection PyAbstractClass
class AliasCSRMatrix(csr_matrix):
"""Same as :class:`AliasArray` but for a CSR matrix
Examples
--------
>>> from scipy.sparse import spdiags
>>> m = AliasCSRMatrix(spdiags([1, 2, 1], [0], 3, 3), mapping={'A': 1, 'B': 2})
>>> list(m.data == 'A')
[True, False, True]
>>> list(m.tocoo().data == 'A')
[True, False, True]
>>> list(m[:2].data == 'A')
[True, False]
"""
def __init__(self, *args, **kwargs):
mapping = kwargs.pop('mapping', {})
if not mapping:
mapping = getattr(args[0], 'mapping', {})
super().__init__(*args, **kwargs)
self.data = AliasArray(self.data, mapping)
@property
def format(self):
return 'csr'
@format.setter
def format(self, _):
pass
@property
def mapping(self):
return self.data.mapping
def tocoo(self, *args, **kwargs):
coo = super().tocoo(*args, **kwargs)
coo.data = AliasArray(coo.data, mapping=self.mapping)
return coo
def __getitem__(self, item):
result = super().__getitem__(item)
if getattr(result, 'format', '') == 'csr':
return AliasCSRMatrix(result, mapping=self.mapping)
else:
return result
class AliasIndex:
"""An all-or-nothing array index based on equality with a specific value
The `==` and `!=` operators are overloaded to return a lazy array which is either
all `True` or all `False`. See the examples below. This is useful for modifiers
where the each call gets arrays with the same sub_id/hop_id for all elements.
Instead of passing an `AliasArray` with `.size` identical element, `AliasIndex`
does the same all-or-nothing indexing.
Examples
--------
>>> l = np.array([1, 2, 3])
>>> ai = AliasIndex("A", len(l))
>>> list(l[ai == "A"])
[1, 2, 3]
>>> list(l[ai == "B"])
[]
>>> list(l[ai != "A"])
[]
>>> list(l[ai != "B"])
[1, 2, 3]
>>> np.logical_and([True, False, True], ai == "A")
array([ True, False, True], dtype=bool)
>>> np.logical_and([True, False, True], ai != "A")
array([False, False, False], dtype=bool)
>>> bool(ai == "A")
True
>>> bool(ai != "A")
False
>>> str(ai)
'A'
>>> hash(ai) == hash("A")
True
>>> int(ai.eye)
1
>>> np.allclose(AliasIndex("A", 1, (2, 2)).eye, np.eye(2))
True
"""
class LazyArray:
def __init__(self, value, shape):
self.value = value
self.shape = shape
def __bool__(self):
return bool(self.value)
def __array__(self):
return np.full(self.shape, self.value)
def __init__(self, name, shape, orbs=(1, 1)):
self.name = name
self.shape = shape
self.orbs = orbs
def __str__(self):
return self.name
def __eq__(self, other):
return self.LazyArray(self.name == other, self.shape)
def __ne__(self, other):
return self.LazyArray(self.name != other, self.shape)
def __hash__(self):
return hash(self.name)
@property
def eye(self):
return np.eye(*self.orbs)
class SplitName(str):
"""String subclass with special support for strings of the form "first|second"
Operators `==` and `!=` are overloade
|
d to return `True` even if only the first
|
part matches.
Examples
--------
>>> s = SplitName("first|second")
>>> s == "first|second"
True
>>> s != "first|second"
False
>>> s == "first"
True
>>> s != "first"
False
>>> s == "second"
False
>>> s != "second"
True
"""
@property
def first(self):
return self.split("|")[0]
def __eq__(self, other):
return super().__eq__(other) or self.first == other
def __ne__(self, other):
return super().__ne__(other) and self.first != other
def __hash__(self):
return super().__hash__()
|
polyaxon/polyaxon-api
|
polyaxon_lib/datasets/converters/base.py
|
Python
|
mit
| 2,397
| 0.001252
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import tensorflow as tf
class BaseConverter(object):
@staticmethod
def to_int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
a TF-Feature.
"""
if not isinstance(values, list):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
@staticmethod
def to_bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
a TF-Feature.
"""
if not isinstance(values, list):
values = [values]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
@staticmethod
def to_float_feature(values):
"""Returns a TF-Feature of floats.
Args:
values: A string.
Returns:
a TF-Feature.
"""
if not isinstance(values, list):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
@classmethod
def to_feature(cls, value, value_type):
if value_type ==
|
'int':
return cls.to_int64_feature(value)
if value_type == 'float':
retu
|
rn cls.to_float_feature(value)
if value_type == 'bytes':
return cls.to_bytes_feature(value)
raise TypeError("value type: `{}` is not supported.".format(value_type))
@classmethod
def to_sequence_feature(cls, sequence, sequence_type):
"""Returns a FeatureList based on a list fo features of type sequence_type
Args:
sequence: list of values
sequence_type: type of the sequence.
Returns:
list of TF-FeatureList
"""
if sequence_type == 'int':
feature_list = [cls.to_int64_feature(i) for i in sequence]
elif sequence_type == 'float':
feature_list = [cls.to_float_feature(i) for i in sequence]
elif sequence_type == 'bytes':
feature_list = [cls.to_bytes_feature(i) for i in sequence]
else:
raise TypeError("sequence type: `{}` is not supported.".format(sequence_type))
return tf.train.FeatureList(feature=feature_list)
|
scheib/chromium
|
tools/grit/grit/tool/postprocess_interface.py
|
Python
|
bsd-3-clause
| 1,031
| 0.00388
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Base class for postprocessing of RC files.
'''
from __future__ import print_function
class PostProcessor(object):
''' Base class for postprocessing of the RC file data before being
output through the RC2GRD tool. You should implement this class if
you want GRIT to do specific things to the RC files after it has
converted the data into GRD format, i.e. change the content of the
RC file, and put it into a P4 changelist, etc.'''
def Process(self, rctext, rcpath, grdnode):
''' Processes the data in rctext and grdnode.
Ar
|
gs:
rctext: string containing the contents of the RC file being processed.
rcpath: the path used to access the file.
g
|
rdtext: the root node of the grd xml data generated by
the rc2grd tool.
Return:
The root node of the processed GRD tree.
'''
raise NotImplementedError()
|
mindriot101/bokeh
|
examples/plotting/file/pie.py
|
Python
|
bsd-3-clause
| 1,053
| 0.004748
|
from math import pi
import pandas as pd
from bokeh.io import output_file, show
from bokeh.palettes import Category20c
from bokeh.plotting import figure
from bokeh.transform import cumsum
output_file("pie.html")
x = {
'United States': 157,
'United Kingdom': 93,
'Japan': 89,
'China': 63,
'Germany': 44,
'India': 42,
'Italy': 40,
'Australia': 35,
'Brazil': 32,
'France': 31,
'Taiwan': 31,
'Spain': 29
}
data = pd.Series(x).reset_index(name='value').rename(columns={'index':'country'})
data['angle'] = data['valu
|
e']/data['
|
value'].sum() * 2*pi
data['color'] = Category20c[len(x)]
p = figure(plot_height=350, title="Pie Chart", toolbar_location=None,
tools="hover", tooltips="@country: @value", x_range=(-0.5, 1.0))
p.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='country', source=data)
p.axis.axis_label=None
p.axis.visible=False
p.grid.grid_line_color = None
show(p)
|
piquadrat/django
|
tests/postgres_tests/test_array.py
|
Python
|
bsd-3-clause
| 34,363
| 0.001659
|
import decimal
import json
import unittest
import uuid
from django import forms
from django.core import checks, exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, modify_settings, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLTestCase, PostgreSQLWidgetTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel, Tag,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import (
SimpleArrayField, SplitArrayField, SplitArrayWidget,
)
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(fi
|
eld=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4
|
]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_in_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__in=IntegerArrayModel.objects.all().values_list('field', flat=True)
),
self.objs[2:3]
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F('id')]]),
self.objs[:2]
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F('field')]),
self.objs[:4]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
@unittest.expectedFailure
def test_contained_by_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[models.F('id'), 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=['Fo
|
weso/CWR-DataApi
|
tests/parser/dictionary/encoder/record/test_component.py
|
Python
|
mit
| 2,680
| 0.000373
|
# -*- coding: utf-8 -*-
import unittest
import datetime
from cwr.parser.encoder.dictionary import ComponentDictionaryEncoder
from cwr.work import ComponentRecord
"""
ComponentRecord to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestComponentRecordDictionaryEncoding(unittest.TestCase):
def setUp(self):
self._encoder = ComponentDictionaryEncoder()
def test_encoded(self):
data = ComponentRecord(record_type='COM',
transaction_sequence_n=3,
record_sequence_n=15,
title='
|
TITLE',
writer_1_last_name='LAST NAME 1',
submitter_work_n='ABCD123',
|
writer_1_first_name='FIRST NAME 1',
writer_2_first_name='FIRST NAME 2',
writer_2_last_name='LAST NAME 2',
writer_1_ipi_base_n='I-000000229-7',
writer_1_ipi_name_n=14107338,
writer_2_ipi_base_n='I-000000339-7',
writer_2_ipi_name_n=14107400,
iswc='T0123456789',
duration=datetime.datetime.strptime('011200',
'%H%M%S').time())
encoded = self._encoder.encode(data)
self.assertEqual('COM', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual('TITLE', encoded['title'])
self.assertEqual('LAST NAME 1', encoded['writer_1_last_name'])
self.assertEqual('ABCD123', encoded['submitter_work_n'])
self.assertEqual('FIRST NAME 1', encoded['writer_1_first_name'])
self.assertEqual('FIRST NAME 2', encoded['writer_2_first_name'])
self.assertEqual('LAST NAME 2', encoded['writer_2_last_name'])
self.assertEqual('LAST NAME 2', encoded['writer_2_last_name'])
self.assertEqual(14107338, encoded['writer_1_ipi_name_n'])
self.assertEqual(14107400, encoded['writer_2_ipi_name_n'])
self.assertEqual(datetime.datetime.strptime('011200', '%H%M%S').time(),
encoded['duration'])
self.assertEqual('I-000000229-7', encoded['writer_1_ipi_base_n'])
self.assertEqual('I-000000339-7', encoded['writer_2_ipi_base_n'])
self.assertEqual('T0123456789', encoded['iswc'])
|
missionpinball/mpf
|
mpf/platforms/opp/opp.py
|
Python
|
mit
| 53,276
| 0.003942
|
# pylint: disable-msg=too-many-lines
"""OPP Hardware interface.
Contains the hardware interface and drivers for the Open Pinball Project
platform hardware, including the solenoid, input, incandescent, and neopixel
boards.
"""
import asyncio
from collections import defaultdict
from typing import Dict, List, Set, Union, Tuple, Optional # pylint: disable-msg=cyclic-import,unused-import
from mpf.core.platform_batch_light_system import PlatformBatchLightSystem
from mpf.core.utility_functions import Util
from mpf.platforms.base_serial_communicator import HEX_FORMAT
from mpf.platforms.interfaces.driver_platform_interface import PulseSettings, HoldSettings
from mpf.platforms.opp.opp_coil import OPPSolenoidCard
from mpf.platforms.opp.opp_incand import OPPIncandCard
from mpf.platforms.opp.opp_modern_lights import OPPModernLightChannel, OPPNeopixelCard, OPPModernMatrixLightsCard
from mpf.platforms.opp.opp_serial_communicator import OPPSerialCommunicator, BAD_FW_VERSION
from mpf.platforms.opp.opp_switch import OPPInputCard
from mpf.platforms.opp.opp_switch import OPPMatrixCard
from mpf.platforms.opp.opp_rs232_intf import OppRs232Intf
from mpf.core.platform import SwitchPlatform, DriverPlatform, LightsPlatform, SwitchSettings, DriverSettings, \
DriverConfig, SwitchConfig, RepulseSettings
MYPY = False
if MYPY: # pragma: no cover
from mpf.platforms.opp.opp_coil import OPPSolenoid # pylint: disable-msg=cyclic-import,unused-import
from mpf.platforms.opp.opp_incand import OPPIncand # pylint: disable-msg=cyclic-import,unused-import
from mpf.platforms.opp.opp_switch import OPPSwitch # pylint: disable-msg=cyclic-import,unused-import
# pylint: disable-msg=too-many-instance-attributes
class OppHardwarePlatform(LightsPlatform, SwitchPlatform, DriverPlatform):
"""Platform class for the OPP hardware.
Args:
----
machine: The main ``MachineController`` instance.
"""
__slots__ = ["opp_connection", "serial_connections", "opp_incands", "opp_solenoid", "sol_dict",
"opp_inputs", "inp_dict", "inp_addr_dict", "matrix_inp_addr_dict", "read_input_msg",
"neo_card_dict", "num_gen2_brd", "gen2_addr_arr", "bad_crc", "min_version", "_poll_task",
"config", "_poll_response_received", "machine_type", "opp_commands", "_incand_task", "_light_system",
"matrix_light_cards"]
def __init__(self, machine) -> None:
"""Initialise OPP platform."""
super().__init__(machine)
self.opp_connection = {} # type: Dict[str, OPPSerialCommunicator]
self.serial_connections = set() # type: Set[OPPSerialCommunicator]
self.opp_incands = dict() # type: Dict[str, OPPIncandCard]
self.opp_solenoid = [] # type: List[OPPSolenoidCard]
self.sol_dict = dict() # type: Dict[str, OPPSolenoid]
self.opp_inputs = [] # type: List[Union[OPPInputCard, OPPMatrixCard]]
self.inp_dict = dict() # type: Dict[str, OPPSwitch]
self.inp_addr_dict = dict() # type: Dict[str, OPPInputCard]
self.matrix_inp_addr_dict = dict() # type: Dict[str, OPPMatrixCard]
self.read_input_msg = {} # type: Dict[str, bytes]
self.neo_card_dict = dict() # type: Dict[str, OPPNeopixelCard]
self.matrix_light_cards = dict() # type: Dict[str, OPPModernMatrixLightsCard]
self.num_gen2_brd = 0
self.gen2_addr_arr = {} # type: Dict[str, Dict[int, Optional[int]]]
self.bad_crc = defaultdict(lambda: 0)
self.min_version = defaultdict(lambda: 0xffffffff) # type: Dict[str, int]
self._poll_task = {} # type: Dict[str, asyncio.Task]
self._incand_task = None # type: Optional[asyncio.Task]
self._light_system = None # type: Optional[PlatformBatchLightSystem]
self.features['tickless'] = True
self.config = self.machine.config_validator.validate_config("opp", self.machine.config.get('opp', {}))
self._configure_device_logging_and_debug("OPP", self.config)
self._poll_response_received = {} # type: Dict[str, asyncio.Event]
assert self.log is not None
if self.config['driverboards']:
self.machine_type = self.config['driverboards']
else:
self.machine_type = self.machine.config['hardware']['driverboards'].lower()
if self.machine_type == 'gen1':
raise AssertionError("Original OPP boards not currently supported.")
if self.machine_type == 'gen2':
self.debug_log("Configuring the OPP Gen2 boards")
else:
self.raise_config_error('Invalid driverboards type: {}'.format(self.machine_type), 15)
# Only including responses that should be received
self.opp_commands = {
ord(OppRs232Intf.INV_CMD): self.inv_resp,
ord(OppRs232Intf.EOM_CMD): self.eom_resp,
ord(OppRs232Intf.GET_GEN2_CFG): self.get_gen2_
|
cfg_resp,
ord(OppRs232Intf.READ_GEN2_INP_CMD): self.read_gen2_inp_resp_initial,
ord(OppRs232Intf.GET_VERS_CMD): self.vers_resp,
ord(OppRs232Intf.READ_MATRIX_INP): self.read_matrix_inp_resp_initial,
}
async def initialize(self):
"""Initialis
|
e connections to OPP hardware."""
await self._connect_to_hardware()
self.opp_commands[ord(OppRs232Intf.READ_GEN2_INP_CMD)] = self.read_gen2_inp_resp
self.opp_commands[ord(OppRs232Intf.READ_MATRIX_INP)] = self.read_matrix_inp_resp
self._light_system = PlatformBatchLightSystem(self.machine.clock, self._send_multiple_light_update,
self.machine.config['mpf']['default_light_hw_update_hz'],
128)
async def _send_multiple_light_update(self, sequential_brightness_list: List[Tuple[OPPModernLightChannel,
float, int]]):
first_light, _, common_fade_ms = sequential_brightness_list[0]
number_leds = len(sequential_brightness_list)
msg = bytearray()
msg.append(int(ord(OppRs232Intf.CARD_ID_GEN2_CARD) + first_light.addr))
msg.append(OppRs232Intf.SERIAL_LED_CMD_FADE)
msg.append(int(first_light.pixel_num / 256))
msg.append(int(first_light.pixel_num % 256))
msg.append(int(number_leds / 256))
msg.append(int(number_leds % 256))
msg.append(int(common_fade_ms / 256))
msg.append(int(common_fade_ms % 256))
for _, brightness, _ in sequential_brightness_list:
msg.append(int(brightness * 255))
msg.extend(OppRs232Intf.calc_crc8_whole_msg(msg))
cmd = bytes(msg)
if self.debug:
self.debug_log("Set color on %s: %s", first_light.chain_serial, "".join(HEX_FORMAT % b for b in cmd))
self.send_to_processor(first_light.chain_serial, cmd)
async def start(self):
"""Start polling and listening for commands."""
# start polling
for chain_serial in self.read_input_msg:
self._poll_task[chain_serial] = self.machine.clock.loop.create_task(self._poll_sender(chain_serial))
self._poll_task[chain_serial].add_done_callback(Util.raise_exceptions)
# start listening for commands
for connection in self.serial_connections:
await connection.start_read_loop()
if [version for version in self.min_version.values() if version < 0x02010000]:
# if we run any CPUs with firmware prior to 2.1.0 start incands updater
self._incand_task = self.machine.clock.schedule_interval(self.update_incand,
1 / self.config['incand_update_hz'])
self._light_system.start()
def stop(self):
"""Stop hardware and close connections."""
if self._light_system:
self._light_system.stop()
for task in self._poll_task.values():
task.
|
PhenoImageShare/PhenoImageShare
|
VFB_import/src/VFB2PhisXML.py
|
Python
|
apache-2.0
| 15,991
| 0.00863
|
#!/usr/bin/env python
import sys
sys.path.append("../build/")
import phisSchema
import pyxb
import warnings
# Strategy:
# Perhaps cleanest would be to build a separate interface for data that may vary from VFB.
# This also allows separation of Jython code
# OTOH - this gives another layer of mappings to maintain.
# Sketch of interface:
# minimal vars to set (for now):
# image_id, image URL, source links; expressed feature (+ its type - gene or transgene); classification of struc & overlapped region
# Generator functions live outside the classes. They generate objects that must then be bound.
def gen_OntologyTerm(id_name, ID):
"""Takes id_name lookup dict for ontology terms and an ID
Returns a phisSchema.OntologyTerm object"""
ot = phisSchema.OntologyTerm()
ot.termId = ID
ot.termLabel = id_name[ID]
return ot
def gen_Link(display_name, url):
"""Takes display_name and URI as args and returns a phisSchema.Link object"""
gen_Link = phisSchema.Link()
gen_Link.display_name = display_name
gen_Link.url = url
return gen_Link
def gen_Annotation(ot, text, mode):
"""Generate a phisSchema.Annotation object based on specified:
ot: ontology term
text: free text
mode: Manual/Automated"""
annotation = phisSchema.Annotation()
annotation.annotation_freetext = text
annotation.ontology_term = ot
annotation.annotationMode = mode
return annotation
def gen_roi_Coordinates(x, y, z):
"""Generate a phisSchema.Coordinates object for an roi
Each arg specifies a range in the form of a list or tuple
with 2 elements
"""
try:
assert len(x) == 2
assert len(y) == 2
assert len(z) == 2
except:
warnings.warn("Percent arrays should have only 2 members - specifying a range.")
coord = phisSchema.Coordinates()
coord.x_coordinates = _gen_PercentArray(*x)
coord.y_coordinates = _gen_PercentArray(*y)
coord.z_coordinates = _gen_PercentArray(*z)
return coord
def _gen_PercentArray(a, b):
AB = (a, b)
pa = phisSchema.PercentArray()
pa.extend(AB)
return pa
def gen_GenotypeComponent(gf_symbol=False, gf_id=False, gene_symbol=False, gene_id=False, gf_ensembl_id=False):
## How to specify channel. Use defaults? ###
"""Generate a phisSchema.GenotypeComponent object.
All args are strings. Please specify each arg with a keyword
"""
gc = phisSchema.GenotypeComponent()
if gene_id:
gc.gene_id = gene_id
if gene_symbol:
gc.gene_symbol = gene_symbol
if gf_symbol:
gc.genetic_feature_symbol = gf_symbol
if gf_id:
gc.genetic_feature_id = gf_id
if gf_ensembl_id:
gc.genetic_feature_ensembl_id = gf_ensembl_id
return gc
class imageDataSet():
"""Class to use for generating sets of images from a common source.
Assumes all datasets have common source name and URL.
And that they share a background channel marker and visualization methods
for background and signal channels. All of these are set by methods rather than KWARGS.
"""
# May not be worth bothering with a class here
def __init__(self, ont_dict):
### Do we have a way to distinguish general source from specific source links?
self.doc = phisSchema.Doc()
self.source = ''
self.background_channel_marker = ''
self.signal_channel_visualisation_methods = []
self.background_channel_visualisation_methods = []
self.ont_dict = ont_dict
def set_source(self, source_name, source_url):
"""source_name and source_url are strings"""
self.source = gen_Link(source_name, source_url)
def set_background_channel_marker(self, genotype_component):
"""Takes a phisSchema.genotypeComponent object as an arg"""
self.background_channel_marker = genotype_component
|
def add_signal_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId o
|
f and FBbi visualisation method"""
self.signal_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
def add_background_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId of and FBbi visualisation method"""
self.background_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
class VfbImage():
"""Interface class for loading VFB data.
Assumes 3D confocal image with 2 channels -
a background stain channel and a signal channel
depicting some interesting expression/anatomy"""
# Define constants here: Or should this just jump straight to populating model?
host = gen_Link("Virtual Fly Brain", "http://www.virtualflybrain.org") # for image_description.host
def __init__(self, ont, image_dataset):
"""ont: an ID:name dict of ontology terms used in XML to be produced
d: A image_dataset object
"""
self.ont = ont
self._initialise_image()
self._unpack_image_dataset(image_dataset)
self.image.image_description.host = self.host
def _unpack_image_dataset(self, image_dataset):
self.set_source(image_dataset.source)
# self.set_signal_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
# self.set_background_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
self.set_expressed_feature_for_background_channel(image_dataset.background_channel_marker)
def set_organism(self, stage, sex):
"""stage must be a phisSchema.ontologyTerm object; sex must be the string 'Male' or 'Female'"""
organism = phisSchema.Organism()
organism.taxon = "Drosophila melanogaster"
organism.sex = sex
organism.ncbi_taxon_id = "NCBItaxon_7227"
organism.stage=stage
self.image.organism = organism
def _initialise_image(self):
"""Assume 2 channels each with an associated ROI at 100%.
All objects generated by multiple iterations appended to common doc.
Generate IDs for two channels and corresponding ROIs according to the scheme:
image_id-a/b roi_id-a/b; channel_id-a/b - where id = self.vfb_image_id.
channel1/roi1 = background. channel2/roi2 = signal."""
# Generate Root objects
self.image = phisSchema.Image()
self.channel1 = phisSchema.Channel()
self.channel2 = phisSchema.Channel()
self.roi1 = phisSchema.Roi()
self.roi2 = phisSchema.Roi()
# bind root objects to doc
# Which pattern??
# This doesn't work for multiple images rois: self.doc.append(image)
# Need to work on checking the more obvious self.doc.image.append(self.image)
self.doc.image.append(self.image)
self.doc.channel.append(self.channel1)
self.doc.channel.append(self.channel2)
self.doc.roi.append(self.roi1)
self.doc.roi.append(self.roi2)
# Populate IDs
self.image.id = "image_" + self.vfb_image_id
self.channel1.id = "channel_" + self.vfb_image_id + "-a"
self.channel2.id = "channel_" + self.vfb_image_id + "-b"
self.roi1.id = "roi_" + self.vfb_image_id + "-a"
self.roi2.id = "roi_" + self.vfb_image_id + "-b"
self.image.associated_roi = pyxb.BIND() # Special magic
self.image.associated_roi.el.append(self.roi1.id) # Is this correct, or should I be populating a string array and appending that?
self.image.associated_roi.el.append(self.roi2.id)
self.image.associated_channel = pyxb.BIND()
self.image.associated_channel.el.append(self.channel1.id)
self.image.associated_channel.el.append(self.channel2.id)
self.channel1.associated_image = self.image.id
self.channel2.associated_image = self.image.id
self.roi1.associated_image = self.image.id
self.roi2.associated_image = self.image.id
self.roi1.associated_channel = pyxb.BIND()
self.roi1.associated_channel.el.append(self.channel1.id)
|
t3dev/odoo
|
addons/test_mass_mailing/tests/test_mail_auto_blacklist.py
|
Python
|
gpl-3.0
| 4,008
| 0.002994
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
from odoo.tests import common
import datetime
class TestAutoBlacklist(common.TransactionCase):
def test_mail_bounced_auto_blacklist(self):
mass_mailing_contacts = self.en
|
v['mail.mass_mailing.contact']
mass_mailing = self.env['mail.mass_mailing']
mail_blacklist = self.env['mail.blacklist']
mail_statistics = self.env['mail.mail.statistics']
mail_thread = self.env['mail.thread']
# create mailing contact record
self.mailing_contact_1 = mass_mailing_contacts.create({'name': 'test email 1', 'email': 'Test1@email.com'})
# create bounced history
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=2),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=3),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=4),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now() - datetime.timedelta(weeks=5),
'email': self.mailing_contact_1.email
})
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
# create mass mailing record
self.mass_mailing = mass_mailing.create({
'name': 'test',
'subject': 'Booooounce!',
'mailing_domain': [('id', 'in',
[self.mailing_contact_1.id])],
'body_html': 'This is a bounced mail for auto blacklist demo'})
self.mass_mailing.put_in_queue()
res_ids = self.mass_mailing.get_remaining_recipients()
composer_values = {
'body': self.mass_mailing.convert_links()[self.mass_mailing.id],
'subject': self.mass_mailing.name,
'model': self.mass_mailing.mailing_model_real,
'email_from': self.mass_mailing.email_from,
'composition_mode': 'mass_mail',
'mass_mailing_id': self.mass_mailing.id,
'mailing_list_ids': [(4, l.id) for l in self.mass_mailing.contact_list_ids],
}
composer = self.env['mail.compose.message'].with_context(
active_ids=res_ids,
mass_mailing_seen_list=self.mass_mailing._get_seen_list()
).create(composer_values)
composer.send_mail()
mail_statistics.create({
'model': 'mail.mass_mailing.contact',
'res_id': self.mailing_contact_1.id,
'bounced': datetime.datetime.now(),
'email': self.mailing_contact_1.email
})
# call bounced
self.mailing_contact_1.message_receive_bounce(self.mailing_contact_1.email, self.mailing_contact_1)
# check blacklist
blacklist_record = mail_blacklist.search([('email', '=', self.mailing_contact_1.email)])
self.assertEqual(len(blacklist_record), 1,
'The email %s must be blacklisted' % self.mailing_contact_1.email)
|
wikimedia/pywikibot-wikibase
|
pywikibase/wbtime.py
|
Python
|
mit
| 4,755
| 0
|
# -*- coding: utf-8 -*-
"""
Exceptions
"""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
import re
import json
try:
long
except NameError:
long = int
class WbTime(object):
"""A Wikibase time representation."""
PRECISION = {'1000000000': 0,
'100000000': 1,
'10000000': 2,
'1000000': 3,
'100000': 4,
'10000': 5,
'millenia': 6,
'century': 7,
'decade': 8,
'year': 9,
'month': 10,
'day': 11,
'hour': 12,
'minute': 13,
'second': 14
}
FORMATSTR = '{0:+04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
def __init__(self, year=None, month=None, day=None,
hour=None, minute=None, second=None,
precision=None, before=0, after=0,
timezone=0, calendarmodel=None):
"""
Create a new WbTime object.
The precision can be set by the Wikibase int value (0-14) or by a human
readable string, e.g., 'hour'. If no precision is given, it is set
according to the given time units.
"""
if year is None:
raise ValueError('no year given')
self.precision = self.PRECISION['second']
if second is None:
self.precision = self.PRECISION['minute']
second = 0
if minute is None:
self.precision = self.PRECISION['hour']
minute = 0
if hour is None:
self.precision = self.PRECISION['day']
hour = 0
if day is None:
self.precision = self.PRECISION['month']
day = 1
if month is None:
self.precision = self.PRECISION['year']
month = 1
self.year = long(year)
self.month = month
|
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.after = after
self.before = before
self.timezone = timezone
self.calendarmodel = calend
|
armodel
# if precision is given it overwrites the autodetection above
if precision is not None:
if (isinstance(precision, int) and
precision in self.PRECISION.values()):
self.precision = precision
elif precision in self.PRECISION:
self.precision = self.PRECISION[precision]
else:
raise ValueError('Invalid precision: "%s"' % precision)
@classmethod
def fromTimestr(cls, datetimestr, precision=14, before=0, after=0,
timezone=0, calendarmodel=None):
match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z',
datetimestr)
if not match:
raise ValueError(u"Invalid format: '%s'" % datetimestr)
t = match.groups()
return cls(long(t[0]), int(t[1]), int(t[2]),
int(t[3]), int(t[4]), int(t[5]),
precision, before, after, timezone, calendarmodel)
def toTimestr(self):
"""
Convert the data to a UTC date/time string.
@return: str
"""
return self.FORMATSTR.format(self.year, self.month, self.day,
self.hour, self.minute, self.second)
def toWikibase(self):
"""
Convert the data to a JSON object for the Wikibase API.
@return: dict
"""
json = {'time': self.toTimestr(),
'precision': self.precision,
'after': self.after,
'before': self.before,
'timezone': self.timezone,
'calendarmodel': self.calendarmodel
}
return json
@classmethod
def fromWikibase(cls, ts):
return cls.fromTimestr(ts[u'time'], ts[u'precision'],
ts[u'before'], ts[u'after'],
ts[u'timezone'], ts[u'calendarmodel'])
def __str__(self):
return json.dumps(self.toWikibase(), indent=4, sort_keys=True,
separators=(',', ': '))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return u"WbTime(year=%(year)d, month=%(month)d, day=%(day)d, " \
u"hour=%(hour)d, minute=%(minute)d, second=%(second)d, " \
u"precision=%(precision)d, before=%(before)d, after=%(after)d, " \
u"timezone=%(timezone)d, calendarmodel='%(calendarmodel)s')" \
% self.__dict__
|
YiqunPeng/Leetcode-pyq
|
solutions/229MajorityElementII.py
|
Python
|
gpl-3.0
| 671
| 0.007452
|
class Solution:
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
|
"""
num1, cnt1 = 0, 0
num2, cnt2 = 1, 0
for num in nums:
if num == num1:
cnt1 += 1
elif num == num2:
cnt2 += 1
else:
if cnt1 == 0:
num1, cnt1 = num
|
, 1
elif cnt2 == 0:
num2, cnt2 = num, 1
else:
cnt1, cnt2 = cnt1 - 1, cnt2 - 1
return [num for num in (num1, num2) if nums.count(num) > len(nums) // 3]
|
OpusVL/odoo
|
addons/share/wizard/share_wizard.py
|
Python
|
agpl-3.0
| 50,754
| 0.006147
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
import time
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import api
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
# Pseudo-domain to represent an empty filter, constructed using
# osv.expression's DUMMY_LEAF
DOMAIN_ALL = [(1, '=', 1)]
# A good selection of easy to read password characters (e.g. no '0' vs 'O', etc.)
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, [uid], ['groups_id'], context=context)[0]['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='base', group_xml_id='group_no_one', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
@api.cr_uid_ids_context
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr, uid, context = wizard.env.args
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, password=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to s
|
hare."),
'view_type': fields.char('Current View Type',
|
required=True),
'domain': fields.char('Domain', help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
|
nwjs/chromium.src
|
content/test/gpu/flake_suppressor/data_types_unittest.py
|
Python
|
bsd-3-clause
| 2,947
| 0.004411
|
#!/usr/bin/env vpython3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from flake_suppressor import data_types
class ExpectationUnittest(unittest.TestCase):
def testAppliesToResultNonResult(self):
"""Tests that AppliesToResult properly fails when given a non-Result."""
e = data_types.Expectation('test', ['win', 'nvidia'], ['Failure'])
with self.assertRaises(AssertionError):
e.AppliesToResult(None)
def testAppliesToResultApplies(self):
"""Tests that AppliesToResult properly returns True on expected Results."""
# Exact match.
e = data_types.Expectation('test', ['win', 'nvidia'], ['Failure'])
r = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
self.assertTrue(e.AppliesToResult(r))
# Tag subset
r = data_types.Result('suite', 'test', ('win', 'nvidia', 'release'), 'id')
self.assertTrue(e.AppliesToResult(r))
# Glob match
e = data_types.Expectation('t*', ['win', 'nvidia'], ['Failure'])
self.assertTrue(e.AppliesToResult(r))
def testAppliesToResultDoesNotApply(self):
"""Tests that AppliesToResult properly returns False on expected Results."""
# Name mismatch
e = data_types.Expectation('test', ['win', 'nvidia'], ['Failure'])
r = data_types.Result('suite', 'notatest', ('win', 'nvidia'), 'id')
self.assertFalse(e.AppliesToResult(r))
# Tag superset
r = data_types.Result('suite', 'test', tuple(['win']), 'id')
self.assertFalse(e.AppliesToResult(r))
class ResultUnittest(unittest.TestCase):
def testTupleEnforced(self):
"""Tests that tags must be in a tuple."""
with self.assertRaises(AssertionError):
_ = data_types.Result('suite', 'test', ['win', 'nvidia'], 'id')
def testWildcardsDisallowed(self):
with self.assertRaises(AssertionE
|
rror):
_ = data_types.Result('suite', 't*', ('win', 'nvidia'), 'id')
def testHashability(self):
"""Tests that Result objects are hashable."""
r = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
_ = set([r])
def testEquality(self):
"""Tests that
|
equality is properly calculated."""
r = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
other = data_types.Result('suite', 'test', ('win', 'nvidia'), 'id')
self.assertEqual(r, other)
other = data_types.Result('notsuite', 'test', ('win', 'nvidia'), 'id')
self.assertNotEqual(r, other)
other = data_types.Result('suite', 'nottest', ('win', 'nvidia'), 'id')
self.assertNotEqual(r, other)
other = data_types.Result('suite', 'test', tuple(['win']), 'id')
self.assertNotEqual(r, other)
other = data_types.Result('suite', 'test', ('win', 'nvidia'), 'notid')
self.assertNotEqual(r, other)
other = None
self.assertNotEqual(r, other)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
krischer/LASIF
|
lasif/window_selection.py
|
Python
|
gpl-3.0
| 36,635
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Window selection algorithm.
This module aims to provide a window selection algorithm suitable for
calculating phase misfits between two seismic waveforms.
The main function is the select_windows() function. The selection process is a
multi-stage process. Initially all time steps are considered to be valid in
the sense as being suitable for window selection. Then a number of selectors
is applied, progressively excluding more and more time steps.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
import itertools
import math
import numpy as np
from obspy import geodetics
import obspy.signal.filter
from scipy.signal import argrelextrema
def flatnotmasked_contiguous(time_windows):
"""
Helper function enabling to loop over empty time windows.
"""
fc = np.ma.flatnotmasked_contiguous(time_windows)
# If nothing could be found, set the mask to true (which should already
# be the case).
if fc is None:
return []
else:
return fc
def find_local_extrema(data):
"""
Function finding local extrema. It can also deal with flat extrema,
e.g. a flat top or bottom. In that case the first index of all flat
values will be returned.
Returns a tuple of maxima and minima indices.
"""
length = len(data) - 1
diff = np.diff(data)
flats = np.argwhere(diff == 0)
# Discard neighbouring flat points.
new_flats = list(flats[0:1])
for i, j in zip(flats[:-1], flats[1:]):
if j - i == 1:
continue
new_flats.append(j)
flats = new_flats
maxima = []
minima = []
# Go over each flats position and check if its a maxima/minima.
for idx in flats:
l_type = "left"
r_type = "right"
for i in itertools.count():
this_idx = idx - i - 1
if diff[this_idx] < 0:
l_type = "minima"
break
elif diff[this_idx] > 0:
l_type = "maxima"
break
for i in itertools.count():
this_idx = idx + i + 1
if this_idx >= len(diff):
break
if diff[this_idx] < 0:
r_type = "maxima"
break
elif diff[this_idx] > 0:
r_type = "minima"
break
if r_type != l_type:
continue
if r_type == "maxima":
maxima.append(int(idx))
else:
minima.append(int(idx))
maxs = set(list(argrelextrema(data, np.greater)[0]))
mins = set(list(argrelextrema(data, np.less)[0]))
peaks, troughs = (
sorted(list(maxs.union(set(maxima)))),
sorted(list(mins.union(set(minima)))))
# Special case handling for missing one or the other.
if not peaks and not troughs:
return np.array([], dtype=np.int32), np.array([], dtype=np.int32)
elif not peaks:
if 0 not in troughs:
peaks.insert(0, 0)
if length not in troughs:
peaks.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
elif not troughs:
if 0 not in peaks:
troughs.insert(0, 0)
if length not in peaks:
troughs.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
# Mark the first and last values as well to facilitate the peak and
# trough marching algorithm
if 0 not in peaks and 0 not in troughs:
if peaks[0] < troughs[0]:
troughs.insert(0, 0)
else:
peaks.insert(0, 0)
if length not in peaks and length not in troughs:
if peaks[-1] < troughs[-1]:
peaks.append(length)
else:
troughs.append(length)
return (np.array(peaks, dtype=np.int32),
np.array(troughs, dtype=np.int32))
def find_closest(ref_array, target):
"""
For every value in target, find the index of ref_array to which
the value is closest.
from http://stackoverflow.com/a/8929827/1657047
:param ref_array: The reference array. Must be sorted!
:type ref_array: :class:`numpy.ndarray`
:param target: The target array.
:type target: :class:`numpy.ndarray`
>>> ref_array = np.arange(0, 20.)
>>> target = np.array([-2, 100., 2., 2.4, 2.5, 2.6])
>>> find_closest(ref_array, target)
array([ 0, 19, 2, 2, 3, 3])
"""
# A must be sorted
idx = ref_array.searchsorted(target)
idx = np.clip(idx, 1, len(ref_array) - 1)
left = ref_array[idx - 1]
right = ref_array[idx]
idx -= target - left < right - target
return idx
def _plot_mask(new_mask, old_mask, name=None):
"""
Helper function plotting the remaining time segments after an elimination
stage.
Useful to figure out which stage is responsible for a certain window
being picked/rejected.
:param new_mask: The mask after the elimination stage.
:param old_mask: The mask before the elimination stage.
:param name: The name of the elimination stage.
:return:
"""
# Lazy imports as not needed by default.
import matplotlib.pylab as plt # NOQA
import matplotlib.patheffects as PathEffects # NOQA
old_mask = old_mask.copy()
new_mask = new_mask.copy()
new_mask.mask = np.bitwise_xor(old_mask.mask, new_mask.mask)
old_mask.mask = np.invert(old_mask.mask)
for i in flatnotmasked_contiguous(old_mask):
plt.fill_between((i.start, i.stop), (-1.0, -1.0), (2.0, 2.0),
color="gray", alpha=0.3, lw=0)
new_mask.mask = np.invert(new_mask.mask)
for i in flatnotmasked_contiguous(new_mask):
plt.fill_between((i.start, i.stop), (-1.0, -1.0), (2.0, 2.0),
color="#fb9a99", lw=0)
if name:
plt.text(len(new_mask) - 1 - 20, 0.5, name, verticalalignment="center",
horizontalalignment="right",
path_effects=[
PathEffects.withStroke(linewidth=3, foreground="white")],
fontweight=500)
plt.xlim(0, len(new_mask) - 1)
plt.ylim(0, 1)
plt.yticks([])
plt.gca().xaxis.set_ticklabels([])
def _window_generator(data_length, window_width):
"""
Simple generator yielding start and stop indices for sliding windows.
:param data_length: The complete length of the data series over which to
slide the window.
:param window_width: The desired window width.
"""
window_start = 0
while True:
window_end = window_start + window_width
if window_end > data_length:
break
yield (window_start, window_end, window_start + window_width // 2)
window_start += 1
def _log_window_selection(tr_id, msg):
"""
Helper function for consistent output during the window selection.
:param tr_id: The id of the current trace.
:param msg: The message to
|
be printed.
"""
print "[Window selection for %s] %s" % (tr_id, msg)
# Dictionary to cache the TauPyModel so there is no need to reinitialize it
# each time which is a fairly expensive operation.
TAUPY_MODEL_CACHE = {}
def select_windows(data_trace, synthetic_trace, event_latitude,
|
event_longitude, event_depth_in_km,
station_latitude, station_longitude, minimum_period,
maximum_period,
min_cc=0.10, max_noise=0.10, max_noise_window=0.4,
min_velocity=2.4, threshold_shift=0.30,
threshold_correlation=0.75, min_length_period=1.5,
min_peaks_troughs=2, max_energy_ratio=10.0,
min_envelope_similarity=0.2,
verbose=False, plot=False):
"""
Window selection algorithm for picking windows suitable for misfit
calculation based on phase differences.
Returns a list of windows which might be empty due to various reasons.
This function is really long and a lot of thi
|
belokop/indico_bare
|
indico/modules/rb/forms/reservations.py
|
Python
|
gpl-3.0
| 8,391
| 0.003814
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, date
from flask import session
from wtforms.ext.dateutil.fields import DateTimeField, DateField
from wtforms.fields.core import SelectMultipleField, StringField, BooleanField, RadioField, IntegerField
from wtforms.validators import DataRequired, InputRequired, NumberRange, ValidationError
from wtforms_components import TimeField
from wtforms.widgets.core import HiddenInput
from wtforms.fields.simple import TextAreaField, SubmitField
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import IndicoQuerySelectMultipleCheckboxField, PrincipalField
from indico.web.forms.validators import IndicoEmail, UsedIf
from indico.modules.rb.models.reservations import RepeatMapping, RepeatFrequency
from indico.util.i18n import _
class BookingSearchForm(IndicoForm):
room_ids = SelectMultipleField('Rooms', [DataRequired()], coerce=int)
start_date = DateField('Start Date', [InputRequired()], parse_kwargs={'dayfirst': True})
start_time = TimeField('Start Time', [InputRequired()])
end_date = DateField('End Date', [InputRequired()], parse_kwargs={'dayfirst': True})
end_time = TimeField('End Time', [InputRequired()])
booked_for_name = StringField('Booked For Name')
reason = StringField('Reason')
is_only_mine = BooleanField('Only Mine')
is_only_my_rooms = BooleanField('Only My Rooms')
is_only_confirmed_bookings = BooleanField('Only Confirmed Bookings')
is_only_pending_bookings = BooleanField('Only Prebookings')
is_rejected = BooleanField('Is Rejected')
is_cancelled = BooleanField('Is Cancelled')
is_archived = BooleanField('Is Archived')
uses_vc = BooleanField(_('Uses Videoconference'))
needs_vc_assistance = BooleanField(_('Videoconference Setup Assistance'))
needs_assistance = BooleanField('General Assistance')
@generated_data
def start_dt(self):
return datetime.combine(self.start_date.data, self.start_time.data)
@generated_data
def end_dt(self):
return datetime.combine(self.end_date.data, self.end_time.data)
class NewBookingFormBase(IndicoForm):
start_dt = DateTimeField('Start date', validators=[InputRequired()], parse_kwargs={'dayfirst': True},
display_format='%d/%m/%Y %H:%M')
end_dt = DateTimeField('End date', validators=[InputRequired()], parse_kwargs={'dayfirst': True},
display_format='%d/%m/%Y %H:%M')
repeat_frequency = RadioField('Repeat frequency', coerce=int, default=0, validators=[InputRequired()],
choices=[(0, _(u'Once')), (1, _(u'Daily')), (2, _(u'Weekly')), (3, _(u'Monthly'))])
repeat_interval = IntegerField('Repeat interval', validators=[NumberRange(0, 3)], default=0)
def validate_repeat_interval(self, field):
if (self.repeat_frequency.data, self.repeat_interval.data) not in RepeatMapping.mapping:
raise ValidationError('Invalid repeat step')
def validate_start_dt(self, field):
if field.data != field.object_data and field.data.date() < date.today() and not session.user.is_admin:
raise ValidationError(_(u'The start time cannot be in the past.'))
def validate_end_dt(self, field):
start_dt = self.start_dt.data
end_dt = self.end_dt.data
if start_dt.time() >= end_dt.time():
raise ValidationError('Invalid times')
if self.repeat_frequency.data == RepeatFrequency.NEVER:
field.data = datetime.combine(start_dt.date(), field.data.time())
elif start_dt.date() >= end_dt.date():
raise ValidationError('Invalid period')
class NewBookingCriteriaForm(NewBookingFormBase):
room_ids = SelectMultipleField('Rooms', [DataRequired()], coerce=int)
flexible_dates_range = RadioField('Flexible days', coerce=int, default=0,
choices=[(0, _(u'Exact')),
(1, '±{}'.format(_(u'1 day'))),
(2, '±{}'.format(_(u'2 days'))),
(3, '±{}'.format(_(u'3 days')))])
def validate_flexible_dates_range(self, field):
if self.repeat_frequency.data == RepeatFrequency.DAY:
field.data = 0
class NewBookingPeriodForm(NewBookingFormBase):
room_id = IntegerField('Room', [DataRequired()], widget=HiddenInput())
class NewBookingConfirmForm(NewBookingPeriodForm):
booked_for_user = PrincipalField(_(u'User'), [DataRequired()], allow_external=True)
contact_email = StringField(_(u'Email'), [InputRequired(), IndicoEmail(multi=True)])
contact_phone = StringField(_(u'Telephone'))
booking_reason = TextAreaField(_(u'Reason'), [DataRequired()])
uses_vc = BooleanField(_(u'I will use videoconference equipment'))
used_equipment = IndicoQuerySelectMultipleCheckboxField(_(u'VC equipment'), get_label=lambda x: x.name)
needs_vc_assistance = BooleanField(_(u'Request assistance for the startup of the videoconference session. '
u'This support is usually performed remotely.'))
needs_assistance = BooleanField(_(u'Request personal assistance for meeting startup'))
submit_book = SubmitField(_(u'Create booking'))
submit_prebook = SubmitField(_(u'Create pre-booking'))
def validate_used_equipment(self, field):
if field.data and not self.uses_vc.data:
raise ValidationError(_(u'Videoconference equipment is not used.'))
elif not field.data and self.uses_vc.data:
raise ValidationError(_(u'You need to select some Videoconference equipment'))
def validate_needs_vc_assistance(self, field):
if field.data and not self.uses_vc.data:
raise ValidationError(_(u'Videoconference equipment is not used.'))
class NewBookingSimpleForm(NewBookingConfirmForm):
submit_check = SubmitField(_(u'Check conflicts'))
booking_reason = TextAreaField(_(u'Reason'), [UsedIf(lambda form, field: not form.submit_check.data),
DataRequired()])
class ModifyBookingForm(NewBookingSimpleForm):
submit_update = SubmitField(_(u'Update booking'))
def __init__(self, *args, **kwargs):
self._old_start_dt = kwargs.pop('old_start_dt')
self._old_end_dt = kwargs.pop('old_end_
|
dt')
|
super(ModifyBookingForm, self).__init__(*args, **kwargs)
del self.room_id
del self.submit_book
del self.submit_prebook
def validate_start_dt(self, field):
super(NewBookingSimpleForm, self).validate_start_dt(field)
new_start_dt = field.data
now = datetime.now()
if self._old_start_dt < now and new_start_dt != self._old_start_dt and not session.user.is_admin:
raise ValidationError(_(u"The start time is in the past and cannot be modified."))
if self._old_start_dt >= now and new_start_dt < now and not session.user.is_admin:
raise ValidationError(_(u'The start time cannot be moved into the past.'))
def validate_end_dt(self, field):
super(NewBookingSimpleForm, self).validate_end_dt(field)
new_end_dt = field.data
now = datetime.now()
if self._old_end_dt < now and new_end_dt != self._old_end_dt and not session.user.is_admin:
raise ValidationError(_(u"The end time is
|
rizen1892/SmartHomeSolutions-Web
|
app/recording.py
|
Python
|
gpl-2.0
| 3,094
| 0.001616
|
class RecordingException(Exception):
pass
class Recording(object):
def __init__(self, stream, lenght):
import command
self.name = None
self.time = None
self.lenght = lenght
self.path = None
self._temp_path = None
self._processing_list = []
if isinstance(stream, str):
self._stream = stream
else:
raise RecordingException("Wrong stream type: " + str(type(stream)))
self._set_attrs()
self._processing_list.append(command.Command(self._process))
def cut(self, start, stop):
import command
if start > stop:
raise RecordingException("Invalid start and stop args: " + str(start) + " " + str(stop))
self._processing_list.append(command.Command(self._cut, (start, stop)))
def remove(self):
import os
if os.path.isfile(self
|
.path):
os.unlink(self.path)
def save(self, path):
import os
if not os.path.isdir(path):
raise RecordingException("Input path does not exist or is not a folder: " + path)
self.path = os.path.join(path, self.name)
for command in self._processing_list:
command.execute()
self._processing_list = []
def _set_attrs(self):
|
import tempfile
import datetime
import timezone
import os
self.time = datetime.datetime.utcnow()
self.time = self.time.replace(tzinfo=timezone.utc)
self.name = self.time.strftime("%Y%m%d%H%M%S%f.mp4")
name_h264 = self.time.strftime("%Y%m%d%H%M%S%f.h264")
self.path = os.path.join(tempfile.gettempdir(), self.name)
self._temp_path = os.path.join(tempfile.gettempdir(), name_h264)
def _process(self):
import os
try:
with open(self._temp_path, 'wb') as out:
out.write(self._stream)
except IOError as error:
raise RecordingException(str(error))
try:
self._convert(self._temp_path, self.path)
except RecordingException as error:
raise
finally:
os.unlink(self._temp_path)
def _convert(self, src, dst):
import subprocess
cmd = "MP4Box -fps 30 -add " + src + " " + dst
try:
ret = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as error:
raise RecordingException(str(error))
if ret != 0:
raise RecordingException("Convertion to mp4 failed on " + src)
def _cut(self, start, stop):
import subprocess
cmd = "MP4Box -splitx " + str(start) + ":" + str(stop) + " " + self.path + " -out " + self.path
try:
ret = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as error:
raise RecordingException(str(error))
if ret != 0:
raise RecordingException("Cannot cut recording: " + self.path)
self.lenght = stop - start
|
colaftc/webtool
|
top/api/rest/TradePostageUpdateRequest.py
|
Python
|
mit
| 327
| 0.030581
|
'''
Created by au
|
to_sdk on 2015.11.10
'''
from top.api.base import RestApi
class TradePostageUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.post_fee = None
self.tid = None
def getapiname(self):
return 'taobao.trade.p
|
ostage.update'
|
yishayv/lyacorr
|
physics_functions/delta_f_snr_bins.py
|
Python
|
mit
| 1,610
| 0.003106
|
"""
A helper class for working with 2D bins of goodness-of-fit as a function of log(SNR).
"""
import numpy as np
class DeltaFSNRBins(object):
NUM_SNR_BINS = 50
NUM_DELTA_F_BINS = 50
LOG_SNR_RANGE = 6.
LOG_SNR_OFFSET = 2.
DELTA_F_RANGE = 1.
DELTA_F_
|
OFFSET = 0.
def __init__(self):
pass
def snr_to_bin(self, snr):
if snr <= 0:
return 0
return self.log_snr_to_bi
|
n(np.log(snr))
def log_snr_to_bin(self, log_snr):
# type: (np.ndarray) -> np.ndarray
return (np.clip((log_snr + self.LOG_SNR_OFFSET) * self.NUM_SNR_BINS / self.LOG_SNR_RANGE,
0, self.NUM_SNR_BINS - 1)).astype(np.int)
def bin_to_log_snr(self, bin_num):
# type: (np.ndarray) -> np.ndarray
return bin_num * self.LOG_SNR_RANGE / self.NUM_SNR_BINS - self.LOG_SNR_OFFSET
def delta_f_to_bin(self, delta_f):
# type: (np.ndarray) -> np.ndarray
return (np.clip((delta_f + self.DELTA_F_OFFSET) * self.NUM_DELTA_F_BINS / self.DELTA_F_RANGE,
0, self.NUM_DELTA_F_BINS - 1)).astype(np.int)
def bin_to_delta_f(self, bin_num):
# type: (np.ndarray) -> np.ndarray
return bin_num * self.DELTA_F_RANGE / self.NUM_DELTA_F_BINS - self.DELTA_F_OFFSET
def get_empty_histogram_array(self):
return np.zeros(shape=(3, self.NUM_SNR_BINS, self.NUM_DELTA_F_BINS))
def get_log_snr_axis(self):
return self.bin_to_log_snr(np.arange(self.NUM_SNR_BINS))
def get_delta_f_axis(self):
return self.bin_to_delta_f(np.arange(self.NUM_DELTA_F_BINS))
|
mdhaman/superdesk-core
|
tests/vocabularies_tests.py
|
Python
|
agpl-3.0
| 4,647
| 0.000215
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import json
from unittest.mock import patch
from apps.prepopulate.app_populate import AppPopulateCommand
from superdesk.tests import TestCase
from superdesk import get_resource_service
from superdesk.vocabularies import VocabulariesService
from superdesk.errors import SuperdeskApiError
class VocabulariesPopulateTest(TestCase):
def setUp(self):
self.filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'vocabularies.json')
self.json_data = [
{'_id': 'categories',
'unique_field': 'qcode',
'items': [
{'name': 'National', 'qcode': 'A', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'T', 'is_active': False}
]},
{'_id': 'newsvalue',
'items': [
{'name': '1', 'value': '1', 'is_active': True},
{'name': '2', 'value': '2', 'is_active': True},
{'name': '3', 'value': '3', 'is_active': False}
]}
]
with open(self.filename, 'w+') as file:
json.dump(self.json_data, file)
def test_populate_vocabularies(self):
cmd = AppPopulateCommand()
cmd.run(self.filename)
service = get_resource_service('vocabularies')
for item in self.json_data:
data = service.find_one(_id=item['_id'], req=None)
self.assertEqual(data['_id'], item['_id'])
self.assertListEqual(data['items'], item['items'])
def test_check_uniqueness(self):
items = [{'name': 'National', 'qcode': 'A', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'a', 'is_active': True}]
with self.assertRaises(SuperdeskApiError):
VocabulariesService()._check_uniqueness(items, 'qcode')
def test_check_uniqueness_active_only(self):
items = [{'name': 'National', 'qcode': 'A', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'A', 'is_active': False}]
VocabulariesService()._check_uniqueness(items, 'qcode')
def test_check_value_of_unique_field(self):
items = [{'name': 'National', 'is_active': True},
{'name': 'Domestic Sports', 'qcode': 'A', 'is_active': True}]
with self.assertRaises(SuperdeskApiError):
VocabulariesService()._check_uniqueness(items, 'qcode')
def test_get_rightsinfo(self):
service = get_resource_service('vocabularies')
vocab = {
'_id': 'rightsinfo',
'items': [
{
'is_active': True,
'name': 'default',
'copyrightHolder': 'default holder',
'copyrightNotice': 'default notice',
'usageTerms': 'default terms'
},
{
'is_active': True,
'name': 'foo',
|
'copyrightHolder': 'foo holder',
'copyrightNotice': 'foo notice',
'usageTerms': 'foo terms'
},
]
}
with patch.object(service, 'find_one', return_value=vocab):
info = service.get_rightsinfo({})
self.assertEqual('default holder', info['copyrightholder'])
self.assertEqual('default notice', info['copyrightnotice'])
self.assertEqual('default terms', info['usageterms'])
|
info = service.get_rightsinfo({'source': 'foo'})
self.assertEqual('foo holder', info['copyrightholder'])
self.assertEqual('foo notice', info['copyrightnotice'])
self.assertEqual('foo terms', info['usageterms'])
def test_get_locale_vocabulary(self):
items = [
{'is_active': True, 'name': 'FIXME1', 'qcode': 'f', 'subject': '',
'translations': {'name': {'fr': 'FIXME1-fr', 'es': 'FIXME1-es'}}},
{'is_active': True, 'name': 'FIXME2', 'qcode': 'f', 'subject': '',
'translations': {'name': {'fr': 'FIXME2-fr', 'es': 'FIXME2-es'}}}
]
result = VocabulariesService().get_locale_vocabulary(items, 'fr')
self.assertEqual(result[0]['name'], 'FIXME1-fr')
self.assertEqual(result[1]['name'], 'FIXME2-fr')
def tearDown(self):
os.remove(self.filename)
|
arunkgupta/gramps
|
gramps/plugins/quickview/ageondate.py
|
Python
|
gpl-2.0
| 2,780
| 0.002518
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009 Douglas S. Blank
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# $Id$
#
#
"""
Display references for any object
"""
from gramps.gen.simple import SimpleAccess, SimpleDoc
from gramps.gui.plug.quick import QuickTable
from gramps.gen.utils.alive import probably_alive
from gramps.gen.ggettext import gettext as _
from gramps.gen.datehandler import displayer
from gramps.gen.config import config
def run(database, document, date):
"""
Display people probably alive and their ages on a particular date.
"""
# setup the simple access functions
sdb = SimpleAccess(database)
sdoc = SimpleDoc(document)
stab = QuickTable(sdb)
if not date.get_valid():
sdoc.paragraph("Date is not a valid date.")
return
# display the title
if date.get_day_valid():
sdoc.title(_("People probably alive and their ages the %s") %
displayer.display(date))
else:
sdoc.title(_("People probably alive and their ages on %s") %
displayer.display(date))
stab.columns(_("Person"), _("Age")) # Actual Date makes column unicode
matches = 0
for person in sdb.all_people():
alive, birth, death, explain, relative = \
probably_alive(person, da
|
tabase, date, return_range=True)
# Doesn't show people probably alive but no way of figuring an age:
if alive and birth:
diff_span = (date - birth)
stab.row(person, str(d
|
iff_span))
stab.row_sort_val(1, int(diff_span))
matches += 1
document.has_data = matches > 0
sdoc.paragraph(_("\n%d matches.\n") % matches)
stab.write(sdoc)
sdoc.paragraph("")
def get_event_date_from_ref(database, ref):
date = None
if ref:
handle = ref.get_reference_handle()
if handle:
event = database.get_event_from_handle(handle)
if event:
date = event.get_date_object()
return date
|
ortutay/23andme-phenotypes-hackathon
|
my_app/my_app/migrations/0001_initial.py
|
Python
|
mit
| 829
| 0.003619
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-25 00:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
impor
|
t django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('facebook_token', models.CharFie
|
ld(max_length=1000, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
KelSolaar/sIBL_GUI
|
utilities/get_package_path.py
|
Python
|
gpl-3.0
| 2,062
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**get_package_path.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Write given package path to stdout.
**Others:**
"""
from __future__ import unicode_literals
import argparse
import sys
import foundations.decorators
import foundations.verbose
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["LOGGER", "get_package_path", "get_command_line_arguments", "main"]
LOGGER = foundations.verbose.install_logger()
foundations.verbose.get_logging_console_handler()
foundations.verbose.set_verbosity_level(3)
def get_package_path(package):
"""
Writes given package p
|
ath to stdout.
:param package: Package to retrieve the path.
:type package: unicode
:return: Definition success.
:rtype: bool
"""
package = __import__(package)
sys.stdout.write(package.__path__[0])
return True
def get_command_line_arguments():
"""
Retrieves command line arguments.
|
:return: Namespace.
:rtype: Namespace
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-h",
"--help",
action="help",
help="'Displays this help message and exit.'")
parser.add_argument("-p",
"--package",
type=unicode,
dest="package",
help="'Package to retrieve the path.'")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
@foundations.decorators.system_exit
def main():
"""
Starts the Application.
:return: Definition success.
:rtype: bool
"""
args = get_command_line_arguments()
return get_package_path(args.package)
if __name__ == "__main__":
main()
|
plotly/python-api
|
packages/python/plotly/plotly/validators/volume/slices/z/_fill.py
|
Python
|
mit
| 517
| 0.001934
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.slices.z", **kwargs):
super(FillValidator, self).__init__(
plotly_n
|
ame=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
|
**kwargs
)
|
brianhelba/pylibtiff
|
libtiff/bitarray-a1646c0/examples/compress.py
|
Python
|
bsd-3-clause
| 953
| 0
|
"""
Demonstrates how the bz2 module may be used to create a compressed object
which represents a bitarray.
"""
import bz2
from bitarray import bitarray
def compress(ba):
"""
Given a bitarray, return an object which represents all information
wi
|
thin the bitarray in a compresed form.
The function `decompress` can be used to restore the bitarray from the
compresed object.
"""
assert isinstance(ba, bitarray)
return ba.length(), bz2.compress(ba.tobytes()), ba.endian()
def decompress(obj):
"""
Given an object (created by `compress`), return the a copy of the
original bitarray.
"""
n, data, endian = obj
res = bitarray(endian=endian)
res.frombytes(bz2.decompress(dat
|
a))
del res[n:]
return res
if __name__ == '__main__':
a = bitarray(12345)
a.setall(0)
a[::10] = True
c = compress(a)
print(c)
b = decompress(c)
assert a == b, a.endian() == b.endian()
|
garoa/pingo
|
pingo/examples/rpi_examples/display7_anim.py
|
Python
|
mit
| 363
| 0
|
import pingo
from time import sleep
rpi = pingo.rpi.RaspberryPi()
# A B C D E F G dp
led_locations = [11, 7, 21, 24, 26, 13, 15, 19]
pins = [rpi.pins[loc] for loc in
|
led_locations[:6]]
for pin in pins:
pin.mode = pingo.OUT
pin.low()
while True:
for pin in pi
|
ns:
pin.high()
sleep(.04)
pin.low()
|
scholer/cadnano2.5
|
cadnano/extras/fasta/__init__.py
|
Python
|
mit
| 102
| 0
|
"""
This convenience module is t
|
o hard-code some example FASTA files for testing
and development.
|
"""
|
zhupengjia/beampackage
|
beampackage/signalfilter.py
|
Python
|
gpl-3.0
| 32,657
| 0.043268
|
#!/usr/bin/env python
import re,os,glob,sys,gc,ctypes,time
import numpy as np
try:import ROOT
except:print "Error!! pyroot didn't compile! please recompile your root!"
from array import array
#from pylab import plot,show,subplot
from bcmconst import *
from runinfo import getpklpath,runinfo,zload,zdump
try:from scipy.signal import butter,freqz,lfilter
except Exception as err:
print err
print "sorry no scipy module found from your computer,it is needed to filter the bpm raw data infomation, please install it first"
#low pass filter added for raw ADC signal
def lowfilter(raw,cutfreq):
n=4
fs=960.015 #sample rate,here is helicity rate
fc=2*cutfreq/fs #Normalize LPF cutoff frequency to Nyquist frequency
if fc>=1:return raw
normok=False
while not normok:
b,a=butter
|
(n,fc)
if len(b)==len(a):
normok=True
break
n-=1
if n<0:
print "filter failed!you only have %i events for bpm, that's not enough for using filter!will use raw data instead!"%len(raw)
return raw
#w,h=freqz(b,a,n)
sf=lfilter(b,a,raw)
return np.float32(sf)
#similar as lowfilter, but use average instead
def signalave(raw,avefreq):
fs=960.015
|
#trigger rate,here is helicity rate
if 2*avefreq>=fs:return raw
aveevents=int(fs/avefreq)
Vave=avestack(aveevents)
rawlen=len(raw)
averaw=np.zeros(rawlen,dtype=np.float32)
for i in range(rawlen):
Vave.push(raw[i])
averaw[i]=Vave.ave()
del Vave
return averaw
#get the total ram of computer
def getmemory():
try:
for line in open("/proc/meminfo","r"):
if "MemTotal" in line:
return int(re.split("[:kB]","".join(re.split("\s",line)))[1])*1000
except:return 2054132000
#same usage as Cavestack, use numpy instead of c class
class avestack:
def __init__(self,size):
self.size=size
self.buf=numpy.zeros(size)
self.counter=0
self.point=0
def push(self,data):
self.buf[self.point]=data
self.point=(self.point+1)%self.size
self.counter+=1
def ave(self):
if self.counter<self.size:
return numpy.mean(self.buf[:self.counter])
else:
return numpy.mean(self.buf)
#get raw data from rootfile,save it to pkl file and return as dict type,bpm signal dealt with filter
#filter1 used to get average pos,filter2 used to get raw pos that can see slow raster signal
class decode:
def __init__(self,runpath,treename="T",firstevent=-1,lastevent=-1,forceredecode=0,buildtree=0,forcefastbus=False):
self.info=runinfo()
self.bcmconst=bcmconst()
self.runpath=os.path.abspath(runpath)
self.treename=treename
self.firstevent=firstevent
self.lastevent=lastevent
self.forceredecode=forceredecode
self.redecode=True
self.buildtree=buildtree
self.forcefastbus=forcefastbus
self.rootfilepath,self.runfilename=os.path.split(self.runpath)
self.run=int(re.split("[_.]",self.runfilename)[1])
if not self.info.ifhapavail(self.run) or self.forcefastbus:self.fastbus=True
else:self.fastbus=False
self.arm="L" if self.run<20000 else "R"
self.pp=getpklpath(self.rootfilepath)
self.pklprefix="raw"
self.pklpathn=[["rbpm","curr","hapevent","sbpm","ssbpm","fbpm","bpmavail","sbpmavail","fbpmavail","hapraster"],["raster","clock","event","fbbpm"]]
self.pkldecoden=["rbpm","curr","hapevent","hapraster","raster","clock","event","fbbpm"]
#decide if decode
#self.manualset=False
self.pklon={}
self.setpklon(False)
def setpklon(self,value):
for m in self.pklpathn:
for n in m:
self.pklon[n]=value
def getrootfilefamily(self):
self.rootfiles=glob.glob(os.path.join(self.rootfilepath,self.runfilename.replace(".root","_*.root")))
self.rootfiles.append(self.runpath)
self.rootfiles.sort()
print "rootfile family",self.rootfiles
#check if needed redecode
def checkifredecode(self):
#check if decoded file is fastbus or not
fbbpmpkl=self.pp.getpath(self.pklprefix,"fbbpm",self.run)
fbbpmpkl2=self.pp.getpath(self.pklprefix,"fbbpm",self.run,1)
if self.forcefastbus:
if not os.path.exists(fbbpmpkl):
print "set forceredecode to 1 since forcefastbus"
self.forceredecode=1
elif not self.fastbus:
if os.path.exists(fbbpmpkl):
if os.path.exists(fbbpmpkl2):
print "set forceredecode to 1 since no fastbus info"
self.forceredecode=1
try:os.remove(fbbpmpkl2)
except:raise Exception("sorry can not remove file %s, please check if you have permission in this directory"%fbbpmpkl2)
elif not os.path.exists(self.pp.getpath(self.pklprefix,"rbpm",self.run,1)):
print "set forceredecode to 1 since no bpm info"
self.forceredecode=1
#check event
eventtolerate=100
if not self.fastbus:
print "use happex, set fbbpm to False"
self.pklon["fbbpm"]=False
if not self.forceredecode:
hapeventpkl=self.pp.getpath(self.pklprefix,"hapevent",self.run)
pklonbak=self.pklon
if os.path.exists(hapeventpkl):
hapevent=zload(hapeventpkl)
print "rootfile event:%i-%i,pkl hapevent:%i-%i"%(self.firstevent,self.lastevent,hapevent.min(),hapevent.max())
if (self.firstevent<0 or hapevent.min()-self.firstevent<eventtolerate) and (self.lastevent<0 or self.lastevent-hapevent.max()<eventtolerate):
for key in self.pklpathn[0]:
pklpath=self.pp.getpath(self.pklprefix,key,self.run)
if os.path.exists(pklpath):
datas=zload(pklpath)
Ndatas=len(datas)
if Ndatas<10:Ndatas=len(datas[0])
if Ndatas!=len(hapevent):
print "not matched events, force replay"
self.forceredecode=1
self.pklon=pklonbak
del datas
break
print "file %s exists, set %s to False"%(pklpath,key)
self.pklon[key]=False
else:
print "file %s not exists, set %s to True"%(pklpath,key)
else:
print "events not enough in happex pkl files,will set all happex keys to true"
del hapevent
eventpkl=self.pp.getpath(self.pklprefix,"event",self.run)
if os.path.exists(eventpkl):
event=zload(eventpkl)
print "rootfile event:%i-%i,pkl event:%i-%i"%(self.firstevent,self.lastevent,event.min(),event.max())
if (self.firstevent<0 or event.min()-self.firstevent<eventtolerate) and (self.lastevent<0 or self.lastevent-event.max()<eventtolerate):
for key in self.pklpathn[1]:
pklpath=self.pp.getpath(self.pklprefix,key,self.run)
if os.path.exists(pklpath):
datas=zload(pklpath)
Ndatas=len(datas)
if Ndatas<10:Ndatas=len(datas[0])
if Ndatas!=len(event):
print "not matched events, force replay"
self.forceredecode=1
self.pklon=pklonbak
del datas
break
print "file %s exists, set %s to False"%(pklpath,key)
self.pklon[key]=False
else:
print "file %s not exists, set %s to True"%(pklpath,key)
else:
print "events not enough in normal daq pkl files,will set all normal daq keys to true"
self.redecode=any([self.pklon[n] for n in self.pkldecoden])
print self.pklon,self.redecode
#decode from rootfile,leaves should be in self.pklpathn
def decodefromrootfile(self):
if not any(self.pklon.values()):return True
ROOT.gROOT.SetBatch(True)
#raw
|
lizardsystem/lizard-blockbox
|
lizard_blockbox/management/commands/parse_shapes_blockbox.py
|
Python
|
gpl-3.0
| 276
| 0
|
from django.core.management.base import BaseCommand
from lizard_bl
|
ockbox import i
|
mport_helpers
class Command(BaseCommand):
help = "Parse the shapes for the blockbox data."
def handle(self, *args, **kwargs):
import_helpers.parse_shapes_blockbox(self.stdout)
|
firebase/grpc-SwiftPM
|
tools/run_tests/artifacts/artifact_targets.py
|
Python
|
apache-2.0
| 16,432
| 0.000609
|
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
docker_base_image=None,
extra_docker_args=None,
verbose_success=False):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
if docker_base_image is not None:
docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
if extra_docker_args is not None:
docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
verbose_success=verbose_success)
return jobspec
def create_jobspec(name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0,
verbose_success=False):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
else:
environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost,
verbose_success=verbose_success)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_ve
|
rsion):
self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', platform, arch, py_version]
self.py_version = py_version
if 'm
|
anylinux' in platform:
self.labels.append('linux')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux_extra':
# Raspberry Pi build
environ['PYTHON'] = '/usr/local/bin/python{}'.format(
self.py_version)
environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
# https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
# A QEMU bug causes submodule update to hang, so we copy directly
environ['RELATIVE_COPY_PATH'] = '.'
# Parallel builds are counterproductive in emulated environment
environ['GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS'] = '1'
extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60 * 5,
docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
extra_docker_args=extra_args)
elif 'manylinux' in self.platform:
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
# Platform autodetection for the manylinux1 image breaks so we set the
# defines ourselves.
# TODO(atash) get better platform-detection support in core so we don't
# need to do this manually...
environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
return create_docker_jobspec(
self.name,
# NOTE(rbellevi): Do *not* update this without also ensuring the
# base_docker_image attribute is accurate.
'tools/dockerfile/grpc_artifact_python_%s_%s' %
(self.platform, self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60,
docker_base_image='quay.io/pypa/manylinux1_i686'
if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
elif self.platform == 'windows':
if 'Python27' in self.py_version:
environ['EXT_COMPILER'] = 'mingw32'
else:
environ['EXT_COMPILER'] = 'msvc'
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = ''.join(
random.choice(string.ascii_uppercase) for _ in range(10))
return create_jobspec(self.name, [
'tools\\run_tests\\artifacts\\build_artifact_python.bat',
self.py_version, '32' if self.arch == 'x86' else '64'
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
environ['SKIP_PIP_INSTALL'] = 'TRUE'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arc
|
ollej/piapi
|
pidaemon.py
|
Python
|
mit
| 2,568
| 0.002336
|
"""pidaemon.py
Usage:
pidaemon.py [--brightness=<b>] [--sleep=<s>] [--interval=<s>] [--wait=<s>]
pidaemon.py (-h | --help)
pidaemon.py --version
Options:
-h --help Show this screen.
--version Show version
--brightness=<b> Default brightness level 1-255 [default: 2]
--interval=<s> Default interval in seconds between each frame in jobs [default: 0.1]
--sleep=<s> Default number of seconds to pause after each job [default: 0]
--wait=<s> Time between each iteration when polling for job on an empty queue. [default: 5]
"""
import sys
import signal
import time
from docopt import docopt
from collections import defaultdict
import settings
from piqueue import piqueue
class PiDaemon():
def __init__(self, opts):
self.running = None
self.options = self.parse_options(opts)
self.session = piqueue.Session()
self.setup_signal_handlers()
def parse_options(self, opts):
options = defaultdict(lambda: None, {
'brightness': int(opts['--brightness']),
'sleep': float(opts['--sleep']),
'interval': float(opts['--interval']),
'wait': float(opts['--wait']),
})
return options
def run(self):
while True:
job = self.next_job()
if job is not None:
self.run_job(job)
if job.options['keep'] == True:
self.add_job(job)
self.delete_job(job)
else:
time.sleep(self.options['wait'])
def run_job(self, job):
self.running = job.job_instance(self.options.copy())
self.running.run()
self.running.sleep()
self.running.cleanup()
self.running = None
def queue(self):
return self.session.query(piqueue.Job).order_by(piqueue.Job.date_created)
def next_job(self):
return self.queue().first()
def add_job(self, old_job):
new_job = piqueue.Job(old_job.job_name, old_job.options)
self.session.add(new_job)
self.session.commit()
def delete_job(self, job):
self.session.delete(job)
|
self.session.commit()
def setup_signal_handlers(self):
signal.signal(signal.SIGINT, self.cleanup)
signal.signal(signal.SIGTERM, self.cleanup)
def cleanup(self, signum, frame):
if self.running
|
is not None:
self.running.cleanup()
sys.exit(-1)
if __name__ == '__main__':
opts = docopt(__doc__, version='PiDaemon v1.0')
PiDaemon(opts).run()
|
cpennington/edx-platform
|
lms/djangoapps/instructor/tests/utils.py
|
Python
|
agpl-3.0
| 3,206
| 0.000624
|
"""
Utilities for instructor unit tests
"""
import datetime
import json
import random
import six
from pytz import UTC
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = six.text_type(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.randint(1950, 2000)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=UTC)
self.targets = FakeTargetGroup()
clas
|
s FakeTarget(object):
""" Corresponding fake target for a fake email """
target_type = "expected"
def long_display(self):
""" Mocks out a class method """
return self.target_type
class FakeTargetGroup(object):
""" Mocks out the M2M relationship between FakeE
|
mail and FakeTarget """
def all(self):
""" Mocks out a django method """
return [FakeTarget()]
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
self.sent_to = [u'expected']
|
iw3hxn/LibrERP
|
product_extended/models/__init__.py
|
Python
|
agpl-3.0
| 1,187
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2016 Didotech srl
|
(<http://www.didotech.com>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the L
|
icense, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_invoice
from . import product
from . import product_category
from . import product_supplierinfo
from . import purchase_order
from . import sale_order
|
lokiteitor/ikol
|
ikol/config.py
|
Python
|
gpl-2.0
| 7,992
| 0.010636
|
# -*- coding: utf-8 -*-
#Copyright (C) 2015 David Delgado Hernandez
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from ConfigParser import ConfigParser
import directory
import var
class Config(directory.Directorio):
"""Permite obtener toda clase de configuracion desde la linea
de comandos, el fichero de constantes y/o el archivo de
configuracion"""
def __init__(self):
super(Config, self).__init__(var.CONFIG_DIR)
if not os.path.exists(var.CONFIG_DIR):
os.makedirs(var.CONFIG_DIR)
if not os.path.exists(var.CACHE_DIR):
os.makedirs(var.CACHE_DIR)
# Archivos basicos
self.ConfDir = var.CONFIG_DIR
self.client_secret = var.CLIENT_SECRETS_FILE
self.storage_path = var.CODE_STORAGE
self.config_file = var.CONFIG_FILE
self.url_file = var.URL_FILE
# si el usuario marco manualmente una configuracion no persistente verlo
# aqui
# CACHE_DIR,URL_FILE,FORMAT_DEFAULT,FINAL_DIR,Codec,kbps
self.reg = [False,False,False,False,False,False]
# Opciones
self.format = var.FORMAT_DEFAULT
self.codec = var.CODEC_DEFAULT
# revisar la integridad antes de continuar
self
|
._CheckDirectory()
self.cfgfile = ConfigParser()
self.cfgfile.read(self.config_file)
# Si todo esta bien requerir las configuraciones hechas por el
# usuario en el archivo de configuracion
# Directorios
|
secundarios
# TODO : si se establece manualmente revisar que no se sobrepongan
self.CACHE_DIR = self.getCacheDir()
self.FINAL_DIR = self.getFinalDir()
def _CheckDirectory(self):
# Registro: (Client_secret,Archivo de Configuracion,URL.conf)
check = [False,False,False]
for i in self.getListFiles():
if i == self.client_secret:
check[0] = True
if i == self.config_file:
check[1] = True
if i == self.url_file:
check[2] = True
if check[0] == False:
raise AttributeError("No se encontro el archivo con la clave API")
if check[1] == False:
self.createFile(self.config_file,var.CONFIG_DEFAULT,rw="w")
if check[2] == False:
self.createFile(self.url_file,rw="w")
def getCacheDir(self):
if self.reg[0]:
# Si el usuario lo modifico no hacer nada y dar la respuesta de
# usuario
pass
elif self.cfgfile.has_option("DIRECTORIOS","CACHE_DIR"):
self.CACHE_DIR = self.cfgfile.get("DIRECTORIOS","CACHE_DIR")
else:
# si no la dio ni esta en fichero de configuracion
self.cfgfile.set("DIRECTORIOS","CACHE_DIR",var.CACHE_DIR)
return self.CACHE_DIR
def setCacheDir(self,path,flag=False):
self.reg[0] = True
if not os.path.exists(path):
os.mkdir(path)
# si se debe establecer por persistente
self.CACHE_DIR = path
if flag:
self.cfgfile.set("DIRECTORIOS","CACHE_DIR",path)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.CACHE_DIR
def getFormat(self):
if self.reg[2]:
pass
elif self.cfgfile.has_option("OPCIONES","FORMAT_DEFAULT"):
self.format = self.cfgfile.getint("OPCIONES","FORMAT_DEFAULT")
else:
self.cfgfile.set("OPCIONES","FORMAT_DEFAULT",var.FORMAT_DEFAULT)
return self.format
def setFormat(self,nformat,flag=False):
self.reg[2] = True
self.format = nformat
if flag:
self.cfgfile.set("OPCIONES","FORMAT_DEFAULT",nformat)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.format
def setFinalDir(self,path,flag=False):
self.reg[3] = True
if not os.path.exists(path):
os.mkdir(path)
# si se debe establecer por persistente
self.FINAL_DIR = path
if flag:
self.cfgfile.set("DIRECTORIOS","FINAL_DIR",path)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.FINAL_DIR
def getFinalDir(self):
if self.reg[3]:
# Si el usuario lo modifico no hacer nada y dar la respuesta de
# usuario
pass
elif self.cfgfile.has_option("DIRECTORIOS","FINAL_DIR"):
self.FINAL_DIR = self.cfgfile.get("DIRECTORIOS","FINAL_DIR")
else:
# si no la dio ni esta en fichero de configuracion
self.cfgfile.set("DIRECTORIOS","FINAL_DIR",var.FINAL_DIR)
self.FINAL_DIR = var.FINAL_DIR
return self.FINAL_DIR
def addURL(self,URL):
# TODO : Revisar integridad del URL
lst = self.getAllURL()
if URL in lst:
dup = True
else:
dup = False
with open(self.url_file,"a") as f:
if dup == False:
f.write(URL+"\n")
def getAllURL(self):
# Devolver una lista con las url
urllist = []
try:
with open(self.url_file,"r") as f:
while True:
url = f.readline()
if not url:
break
url = url.replace("\n","")
if len(url) > 0:
urllist.append(url)
return urllist
except Exception, e:
# TODO: Lanzar aviso y log
print e
# crear el archivo
self.createFile(self.url_file,rw="w")
return []
def getDelWrongList(self):
if self.cfgfile.has_option("OPCIONES","DELETE_WRONG_LIST"):
self.DELETE_WRONG_LIST = self.cfgfile.get("OPCIONES","DELETE_WRONG_LIST")
else:
# si no esta en fichero de configuracion
self.cfgfile.set("OPCIONES","DELETE_WRONG_LIST","YES")
return self.DELETE_WRONG_LIST
def getLogFile(self):
return var.LOG_FILE
def getCodec(self):
if self.reg[4]:
pass
elif self.cfgfile.has_option("OPCIONES","CODEC_DEFAULT"):
self.codec = self.cfgfile.get("OPCIONES","CODEC_DEFAULT")
else:
self.cfgfile.set("OPCIONES","CODEC_DEFAULT",var.CODEC_DEFAULT)
self.codec = var.CODEC_DEFAULT
return self.codec
def setCodec(self,codec,flag=False):
self.reg[4] = True
self.codec = codec
if flag:
self.cfgfile.set("OPCIONES","CODEC_DEFAULT",codec)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.codec
def getKbps(self):
if self.reg[5]:
pass
elif self.cfgfile.has_option("OPCIONES","KBPS"):
self.kpbs = self.cfgfile.get("OPCIONES","KBPS")
else:
self.cfgfile.set("OPCIONES","KBPS",var.KBPS)
self.kpbs = var.KBPS
return self.kpbs
def setKbps(self,kpbs,flag=False):
self.reg[5] = True
self.kpbs = kpbs
if flag:
self.cfgfile.set("OPCIONES","KBPS",kpbs)
with open(self.config_file,"w") as f:
self.cfgfile.write(f)
return self.kpbs
|
lablup/sorna-manager
|
tests/manager/test_scheduler.py
|
Python
|
lgpl-3.0
| 16,173
| 0.000618
|
from __future__ import annotations
from decimal import Decimal
from typing import (
Any,
Mapping,
Sequence,
)
import uuid
from pprint import pprint
import pytest
from ai.backend.common.docker import ImageRef
from ai.backend.common.types import (
AccessKey, AgentId, KernelId,
ResourceSlot, SessionTypes,
)
from ai.backend.manager.scheduler import PendingSession, ExistingSession, AgentContext
from ai.backend.manager.scheduler.dispatcher imp
|
ort load_scheduler
from ai.backend.manager.scheduler.fifo import FIFOSlotScheduler, LIFOSlotScheduler
from ai.backend.manager.scheduler.drf import DRFScheduler
from ai.backend.manager.scheduler.mof import MOFScheduler
def test_load_intrinsic():
assert isin
|
stance(load_scheduler('fifo', {}), FIFOSlotScheduler)
assert isinstance(load_scheduler('lifo', {}), LIFOSlotScheduler)
assert isinstance(load_scheduler('drf', {}), DRFScheduler)
assert isinstance(load_scheduler('mof', {}), MOFScheduler)
example_group_id = uuid.uuid4()
example_total_capacity = ResourceSlot({'cpu': '4.0', 'mem': '4096'})
@pytest.fixture
def example_agents():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_mixed_agents():
return [
AgentContext(
agent_id=AgentId('i-gpu'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-cpu'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_first_one_assigned():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_no_valid():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
),
]
pending_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
existing_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
_common_dummy_for_pending_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
resource_policy={},
resource_opts={},
mounts=[],
mount_map={},
environ={},
bootstrap_script=None,
startup_command=None,
internal_data=None,
preopen_ports=[],
)
_common_dummy_for_existing_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
)
@pytest.fixture
def example_pending_sessions():
# lower indicies are enqueued first.
return [
PendingSession( # rocm
kernel_id=pending_kernel_ids[0],
access_key=AccessKey('user01'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('1'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cuda
kernel_id=pending_kernel_ids[1],
access_key=AccessKey('user02'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('0.5'),
'rocm.devices': Decimal('0'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cpu-only
kernel_id=pending_kernel_ids[2],
access_key=AccessKey('user03'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requ
|
colossalbit/cssypy
|
cssypy/scanners/scanners.py
|
Python
|
bsd-3-clause
| 4,679
| 0.005984
|
from __future__ import absolute_import
from __future__ import print_function
import re
import itertools
from ..utils.py3compat import range
from .. import csstokens as tokens
class ScannerBase(object):
def __init__(self, data):
self._tokeniter = tokens.re_tokens.finditer(data)
self._lineno = 1
self._column = 1
self._eof_count = 0
self._next = [tokens.Token(tokens.START, u'', self._lineno, self._column)]
def __iter__(self):
return self
def next(self):
tok = self._next.pop(0)
if not self._next:
self._fill(10)
return tok
def _fill(self, n=1, force=False):
# n: The desired length for self._next.
ntoload = max(n - len(self._next), 0)
i = -1
try:
for i in range(ntoload):
self._next.append(self.get_next())
return len(self._next)
except StopIteration:
if not force and self._eof_count > 1:
raise
loaded = i+1
k = -1
for k in range(ntoload - loaded):
self._eof_count += 1
self._next.append(tokens.Token(tokens.EOF, u'', self._lineno, self._column))
return len(self._next)
def putback(self, *toks):
self._next[:0] = list(toks)
def peek(self, n=0):
try:
return self._next[n]
except IndexError:
pass
try:
sz = max(n+1, 10)
end = self._fill(n=sz, force=True)
assert end > 0
return self._next[min(end-1, n)]
except StopIteration:
return self._next[-1] # this will be EOF
def process_newlines(self, s):
lines = tokens.re_newline.split(s)
# returns: number_of_newlines, length_of_last_line
return len(lines) - 1, len(lines[-1])
embedded_newlines = set((tokens.STRING, tokens.COMMENT, tokens.WS,
|
tokens.URI, tokens.BADCOMMENT, tokens.BADSTRING,
tokens.BADURI, tokens.IDENT,
tokens.ATKEYWORD_OTHER, tokens.DIMENSION,
tokens.HASH, tokens.FUNCTION))
def advance_position(self, toktype, value):
if toktype in self.embedded_newlines:
nlines, nlast = self.process_newlines(value)
if nlines:
self._lineno += nlin
|
es
self._column = nlast + 1
else:
self._column += len(value)
else:
self._column += len(value)
def get_next(self):
m = self._tokeniter.next()
toktype = tokens.tokens[m.lastgroup]
value = m.group()
tok = tokens.Token(toktype, value, self._lineno, self._column)
self.advance_position(toktype, value)
##print 'Token: {0}'.format(tok.typestr)
return tok
class Scanner(ScannerBase):
ignore_tokens = (tokens.COMMENT,)
def get_next(self):
tok = super(Scanner, self).get_next()
while tok.type in self.ignore_tokens:
tok = super(Scanner, self).get_next()
return tok
#==============================================================================#
def benchmark_iter(src, tests=5): # pragma: no cover
import time
times = []
for i in range(tests):
start = time.clock()
for tok in re_tokens.finditer(src):
pass
stop = time.clock()
times.append(stop - start)
return times
def benchmark_iterlist(src, tests=5): # pragma: no cover
import time
times = []
for i in range(tests):
start = time.clock()
ilist = list(re_tokens.finditer(src))
for tok in ilist:
pass
stop = time.clock()
times.append(stop - start)
return times
def benchmark_list(src, tests=5): # pragma: no cover
import time
times = []
for i in range(tests):
start = time.clock()
for tok in re_tokens.findall(src):
pass
stop = time.clock()
times.append(stop - start)
return times
def benchmark(src, ntests=5): # pragma: no cover
times_list = benchmark_list(src, tests=ntests)
times_iterlist = benchmark_iterlist(src, tests=ntests)
times_iter = benchmark_iter(src, tests=ntests)
print('iter time: {0}'.format(min(times_iter)))
print('iterlist time: {0}'.format(min(times_iterlist)))
print('list time: {0}'.format(min(times_list)))
#==============================================================================#
|
jishnuv/Toy-Python-Virtual-Machine
|
Testcases/Functions/f2.py
|
Python
|
gpl-2.0
| 131
| 0.045802
|
def sqr(x):
return x*x
def cube(x):
return x*x*x
def quad(x):
return cube
|
(x)*x
a =
|
10
print sqr(a)
print cube(a)
print quad(a)
|
RightToResearch/OpenCon-Rating-App
|
project/rating/urls.py
|
Python
|
mit
| 365
| 0
|
"""
Applicati
|
on urlconfig
"""
from __future__ import absolute_import
from django.conf.urls import url
from . import views
|
urlpatterns = [
url(
r"^(?P<uuid>[0-9a-f-]{36})/$",
views.RateView.as_view(),
name="rate"
),
url(
r"^2/(?P<uuid>[0-9a-f-]{36})/$",
views.Rate2View.as_view(),
name="rate2"
)
]
|
RewrZ/RewrZ
|
rewrz/blog/templatetags/blog_tags.py
|
Python
|
agpl-3.0
| 698
| 0.010479
|
from ..models import Post, Category, Tag
from django.db.models.aggregates import Count
from django import template
register = template.Library()
# 最近文章
@register.simple_tag
def get_recent_posts(num=9):
return Post.obj
|
ects.all().order_by('-modified_time')[:num]
# 按月归档
@register.simple_tag
def archives():
return Post.objects.dates('created_time', 'month', order='DESC')
# 分类归档
@register
|
.simple_tag
def get_categories():
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
# 标签云
@register.simple_tag
def get_tags():
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
|
monikagrabowska/osf.io
|
website/addons/s3/tests/test_view.py
|
Python
|
apache-2.0
| 12,215
| 0.001474
|
# -*- coding: utf-8 -*-
import httplib as http
import mock
from nose.tools import * # noqa
from boto.exception import S3ResponseError
from framework.auth import Auth
from tests.base import get_default_metaschema
from tests.factories import ProjectFactory, AuthUserFactory
from website.addons.base import testing
from website.addons.s3.tests.utils import S3AddonTestCase
from website.addons.s3.utils import validate_bucket_name, validate_bucket_location
from website.util import api_url_for
class TestS3Views(S3AddonTestCase, testing.views.OAuthAddonConfigViewsTestCaseMixin):
def setUp(self):
self.mock_can_list = mock.patch('website.addons.s3.views.utils.can_list')
self.mock_can_list.return_value = True
self.mock_can_list.start()
self.mock_uid = mock.patch('website.addons.s3.views.utils.get_user_info')
self.mock_uid.return_value = {'id': '1234567890', 'display_name': 's3.user'}
self.mock_uid.start()
self.mock_exists = mock.patch('website.addons.s3.views.utils.bucket_exists')
self.mock_exists.return_value = True
self.mock_exists.start()
super(TestS3Views, self).setUp()
def tearDown(self):
self.mock_can_list.stop()
self.mock_uid.stop()
self.mock_exists.stop()
super(TestS3Views, self).tearDown()
def test_s3_settings_input_empty_keys(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': '',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_settings_input_empty_access_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': '',
'secret_key': 'Non-empty-secret-key'
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_settings_input_empty_secret_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': 'Non-empty-access-key',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_set_bucket_no_settings(self):
user = AuthUserFactory()
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_s3_set_bucket_no_auth(self):
user = AuthUserFactory()
user.add_addon('s3')
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_s3_set_bucket_registered(self):
registration = self.project.register_node(
get_default_metaschema(), Auth(self.user), '', ''
)
url = registration.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.addons.s3.views.utils.can_list', return_value=False)
def test_user_settings_cant_list(self, mock_can_list):
url = api_url_for('s3_add_user_account')
rv = self.app.post_json(url, {
'access_key': 'aldkjf',
'secret_key': 'las'
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('Unable to list buckets.', rv.body)
def test_s3_remove_node_settings_owner(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
ret = self.app.delete(url, auth=self.user.auth)
result = self.Serializer().serialize_settings(node_settings=self.node_settings, current_user=self.user)
assert_equal(result['nodeHasAuth'], False)
def test_s3_remove_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
ret = self.app.delete(url, auth=None, expect_errors=True)
assert_equal(ret.status_code, 401)
def test_s3_get_node_settings_owner(self):
self.node_settings.set_auth(self.external_account, self.user)
self.node_settings.folder_id = 'bucket'
self.node_settings.save()
url = self.node_settings.owner.api_url_for('s3_get_config')
res = self.app.get(url, auth=self.user.auth)
result = res.json['result']
assert_equal(result['nodeHasAuth'], True)
assert_equal(result['userIsOwner'], True)
assert_equal(result['folder']['path'], self.node_settings.folder_id)
def test_s3_get_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_get_config')
unauthorized = AuthUserFactory()
ret = self.app.get(url, auth=unauthorized.auth, expect_errors=True)
assert_equal(ret.status_code, 403)
## Overrides ##
@mock.patch('website.addons.s3.model.get_bucket_names')
def test_folder_list(self, mock_names):
mock_names.return_value = ['bucket1', 'bucket2']
super(TestS3Views, self).test_folder_list()
@mock.patch('website.addons.s3.model.bucket_exists')
@mock.patch('website.addons.s3.model.get_bucket_location_or_error')
def test_set_config(self, mock_location, mock_exists):
mock_exists.return_value = True
mock_location.return_value = ''
self.node_settings.set_auth(self.external_account, self.user)
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.put_json(url, {
'selected': self.folder
}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
self.project.reload()
self.node_settings.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_bucket_linked'.format(self.ADDON_SHORT_NAME)
)
assert_equal(res.json['result']['folder']['name'], self.node_settings.folder_name)
class TestCreateBucket(S3AddonTestCase):
def setUp(self):
super(TestCreateBucket, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.auth = self.user.auth
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3', auth=self.consolidated_auth)
self.project.creator.add_addon('s3')
self.user_settings = self.user.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.user_settings.save()
self.node_settings = self.project.get_addon('s3')
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.project.creator.get_addon('s3')
self.node_settings.save()
def test_bad_names(self):
assert_false(validate_bucket_name(''))
assert_false(validate_bucket_name('no'))
assert_false(validate_bucket_name('a' * 64))
assert_false(validate
|
_bucket_name(' leadingspace'))
assert_false(validate_bucket_name('trailingspace '))
assert_false(validate_bucket_name('bogus naMe'))
assert_false(validate_bucket_name('.cantstartwithp'))
assert_false(validate_bucket_name('or.endwith.'))
assert_fa
|
lse(validate_bucket_name('..nodoubles'))
assert_false(validate_bucket_name('no_unders_in'))
|
pyoceans/gridded
|
examples/make_test_grid.py
|
Python
|
mit
| 1,437
| 0.004871
|
import numpy as np
import matpl
|
otlib.pyplot as plt
from shapely.geometry.polygon import Polygon
from shapely.geometry import MultiPolygon
import cell_tree2d
# create a rotated Cartesian grid
xc, yc = np.mgrid[1:10:15j, 1:20:18j]
yc
|
= yc**1.2 + xc**1.5
def rot2d(x, y, ang):
'''rotate vectors by geometric angle'''
xr = x*np.cos(ang) - y*np.sin(ang)
yr = x*np.sin(ang) + y*np.cos(ang)
return xr, yr
x, y = rot2d(xc, yc, 0.2)
y /= 10.0
x -= x.mean()
y -= y.mean()
# Create nodes and faces from grid
nodes = np.ascontiguousarray(np.column_stack((x[:].reshape(-1),
y[:].reshape(-1)))).astype(np.float64)
y_size = x.shape[0]
x_size = y.shape[1]
faces = np.array([np.array([[xi, xi + 1, xi + x_size + 1, xi + x_size]
for xi in range(0, x_size - 1, 1)]) + yi * x_size for yi in range(0, y_size - 1)])
faces = np.ascontiguousarray(faces.reshape(-1, 4).astype(np.int32))
squares = [nodes[face] for face in faces]
## Convert to a bunch of shapely Polygon objects, for some unknown use.
# mesh = MultiPolygon([Polygon(p) for p in squares])
## Extra functions for plotting the grid
# for square in squares:
# x, y = square.T
# plt.fill(x, y)
#
# plt.gca().set_aspect(1.0)
# plt.show()
# Create some trial points and locate them using cell_tree
xyi = np.random.randn(10, 2)
ct = cell_tree2d.CellTree(nodes, faces)
idx = ct.locate(xyi)
|
manti-by/Churchill
|
churchill/apps/profiles/models.py
|
Python
|
bsd-3-clause
| 2,037
| 0
|
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from churchill.apps.core.models import BaseModel
from churchill.apps.currencies.services import get_default_currency_id
class StatsCalculationStrategy(models.TextChoices):
LAST_SHOT = "LAST_SHOT", _("From the last shot")
WEEKLY = "WEEKLY", _("Weekly")
MONTHLY = "MONTHLY", _("Monthly")
ALL_TIME = "ALL_TIME", _("For the all time")
class Profile(BaseModel):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
related_name="profile",
)
image = models.FileField(
upload_to=settings.PROFILE_IMAGE_DIRECTORY, null=True, blank=True
)
language = models.CharField(
max_length=5,
blank=True,
default=settings.LANGUAGE_CODE,
choices=settings.LANGUAGES,
)
currency = models.ForeignKey(
"currencies.Currency",
related_name="profiles",
on_delete=models.DO_NOTHING,
blank=True,
default=get_default_currency_id,
)
next_day_offset = models.IntegerField(
blank=True,
default=settings.NEXT_DAY_OFFSET,
help_text=_("Offset in hours for the ne
|
xt day"),
)
avg_consumption = models.IntegerField(
blank=True,
default=settings.AVG
|
_ALCOHOL_CONSUMPTION,
help_text=_("Average alcohol consumption in ml per year"),
)
avg_price = models.DecimalField(
max_digits=5,
decimal_places=2,
blank=True,
default=settings.AVG_ALCOHOL_PRICE,
help_text=_("Average alcohol price for 1000 ml"),
)
stats_calculation_strategy = models.CharField(
max_length=20,
choices=StatsCalculationStrategy.choices,
default=StatsCalculationStrategy.MONTHLY,
)
verification_token = models.CharField(max_length=16, null=True, blank=True)
def __str__(self):
return self.user.email
|
pgroudas/pants
|
src/python/pants/backend/android/targets/android_resources.py
|
Python
|
apache-2.0
| 1,346
| 0.008172
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.android.targets.android_target import AndroidTarget
from pants.base.exceptions import TargetDefinitionException
class AndroidResources(AndroidTarget):
"""Processes android resources to generate R.java"""
def __init__(self,
resource_dir=None,
**kwargs):
#TODO(mateor) change resource_dir from string into list
"""
:param string resource_dir: path/to/directory containing Android resource files,
often named 'res'.
"""
super(AndroidResources, self).__init__(**kwargs)
address = kwargs['address']
try:
self.resource_dir = os.path.join(address.spec_path, resource_dir)
except AttributeError:
raise TargetDefinitionException(self, 'An android_resources target must specify a '
'\'resource_dir
|
\' that contains the target\'s '
|
'resource files.')
def globs_relative_to_buildroot(self):
return {'globs' : os.path.join(self.resource_dir, '**')}
|
technologiescollege/Blockly-rduino-communication
|
scripts_XP/Lib/site-packages/autobahn/wamp/test/test_uri_pattern.py
|
Python
|
gpl-3.0
| 24,741
| 0.001495
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
from autobahn import wamp
from autobahn.wamp.uri import Pattern, RegisterOptions, SubscribeOptions
import unittest2 as unittest
class TestUris(unittest.TestCase):
def test_invalid_uris(self):
for u in [u"",
u"com.myapp.<product:foo>.update",
u"com.myapp.<123:int>.update",
u"com.myapp.<:product>.update",
u"com.myapp.<product:>.update",
u"com.myapp.<int:>.update",
]:
self.assertRaises(Exception, Pattern, u, Pattern.URI_TARGET_ENDPOINT)
def test_valid_uris(self):
for u in [u"com.myapp.proc1",
u"123",
u"com.myapp.<product:int>.update",
u"com.myapp.<category:string>.<subcategory>.list"
u"com.myapp.something..update"
]:
p = Pattern(u, Pattern.URI_TARGET_ENDPOINT)
self.assertIsInstance(p, Pattern)
def test_parse_uris(self):
tests = [
(u"com.myapp.<product:int>.update", [
(u"com.myapp.0.update", {u'product': 0}),
(u"com.myapp.123456.update", {u'product': 123456}),
(u"com.myapp.aaa.update", None),
(u"com.myapp..update", None),
(u"com.myapp.0.delete", None),
]
),
(u"com.myapp.<product:string>.update", [
(u"com.myapp.box.update", {u'product': u'box'}),
(u"com.myapp.123456.update", {u'pr
|
oduct': u'123456'}),
(u"com.myapp..update", None),
]
),
(u"com.myapp.<product>.update", [
(u"com.myapp.0.update", {u'product': u'0'}),
(u"com.myapp.abc.update", {u
|
'product': u'abc'}),
(u"com.myapp..update", None),
]
),
(u"com.myapp.<category:string>.<subcategory:string>.list", [
(u"com.myapp.cosmetic.shampoo.list", {u'category': u'cosmetic', u'subcategory': u'shampoo'}),
(u"com.myapp...list", None),
(u"com.myapp.cosmetic..list", None),
(u"com.myapp..shampoo.list", None),
]
)
]
for test in tests:
pat = Pattern(test[0], Pattern.URI_TARGET_ENDPOINT)
for ptest in test[1]:
uri = ptest[0]
kwargs_should = ptest[1]
if kwargs_should is not None:
args_is, kwargs_is = pat.match(uri)
self.assertEqual(kwargs_is, kwargs_should)
else:
self.assertRaises(Exception, pat.match, uri)
class TestDecorators(unittest.TestCase):
def test_decorate_endpoint(self):
@wamp.register(u"com.calculator.square")
def square(_):
"""Do nothing."""
self.assertTrue(hasattr(square, '_wampuris'))
self.assertTrue(type(square._wampuris) == list)
self.assertEqual(len(square._wampuris), 1)
self.assertIsInstance(square._wampuris[0], Pattern)
self.assertTrue(square._wampuris[0].is_endpoint())
self.assertFalse(square._wampuris[0].is_handler())
self.assertFalse(square._wampuris[0].is_exception())
self.assertEqual(square._wampuris[0].uri(), u"com.calculator.square")
self.assertEqual(square._wampuris[0]._type, Pattern.URI_TYPE_EXACT)
@wamp.register(u"com.myapp.product.<product:int>.update")
def update_product(product=None, label=None):
"""Do nothing."""
self.assertTrue(hasattr(update_product, '_wampuris'))
self.assertTrue(type(update_product._wampuris) == list)
self.assertEqual(len(update_product._wampuris), 1)
self.assertIsInstance(update_product._wampuris[0], Pattern)
self.assertTrue(update_product._wampuris[0].is_endpoint())
self.assertFalse(update_product._wampuris[0].is_handler())
self.assertFalse(update_product._wampuris[0].is_exception())
self.assertEqual(update_product._wampuris[0].uri(), u"com.myapp.product.<product:int>.update")
self.assertEqual(update_product._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.register(u"com.myapp.<category:string>.<cid:int>.update")
def update(category=None, cid=None):
"""Do nothing."""
self.assertTrue(hasattr(update, '_wampuris'))
self.assertTrue(type(update._wampuris) == list)
self.assertEqual(len(update._wampuris), 1)
self.assertIsInstance(update._wampuris[0], Pattern)
self.assertTrue(update._wampuris[0].is_endpoint())
self.assertFalse(update._wampuris[0].is_handler())
self.assertFalse(update._wampuris[0].is_exception())
self.assertEqual(update._wampuris[0].uri(), u"com.myapp.<category:string>.<cid:int>.update")
self.assertEqual(update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.register(u"com.myapp.circle.<name:string>",
RegisterOptions(match=u"wildcard", details_arg="details"))
def circle(name=None, details=None):
""" Do nothing. """
self.assertTrue(hasattr(circle, '_wampuris'))
self.assertTrue(type(circle._wampuris) == list)
self.assertEqual(len(circle._wampuris), 1)
self.assertIsInstance(circle._wampuris[0], Pattern)
self.assertIsInstance(circle._wampuris[0].options, RegisterOptions)
self.assertEqual(circle._wampuris[0].options.match, u"wildcard")
self.assertEqual(circle._wampuris[0].options.details_arg, "details")
self.assertTrue(circle._wampuris[0].is_endpoint())
self.assertFalse(circle._wampuris[0].is_handler())
self.assertFalse(circle._wampuris[0].is_exception())
self.assertEqual(circle._wampuris[0].uri(), u"com.myapp.circle.<name:string>")
self.assertEqual(circle._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.register(u"com.myapp.something..update",
RegisterOptions(match=u"wildcard", details_arg="details"))
def something(dynamic=None, details=None):
""" Do nothing. """
self.assertTrue(hasattr(something, '_wampuris'))
self.assertTrue(type(something._wampuris) == list)
self.assertEqual(len(something._wampuris), 1)
self.assertIsInstance(something._wampuris[0], Pattern)
self.assertIsInstance(something._wampuris[0].options, RegisterOptions)
self.assertEqual(something._wampuris[0].options.match, u"wildcard")
self.assertEqual(something._wampuris[0].options.details_arg, "details")
self.assertTrue(something._wampuris[0].is_endpoint())
self.assertFalse(something._wampuris[0].is_handler())
self.assertFalse(something._wampuris[0].is_exception())
|
bapril/cfa_635
|
cfa635/crc16.py
|
Python
|
apache-2.0
| 4,254
| 0.019041
|
#!/usr/bin/env python
# crc16.py by Bryan G. Olson, 2005
# This module is free software and may be used and
# distributed under the same terms as Python itself.
"""
CRC-16 in Python, as standard as possible. This is
the 'reflected' version, which is usually what people
want. See Ross N. Williams' /A Painless Guide to
CRC error detection algorithms/.
Re-factored by bapril@gmail.com to pass pylint
"""
#from array import array
def crc16(string, value=0):
""" Single-function interface, like gzip module's crc32
"""
value = 0xffff
for char in string:
#value = TABLE[ord(char) ^ (value & 0xff)] ^ (value >> 8)
value = value >> 8 ^ TABLE[ ( value ^ ord(char) ) & 0xff ]
value = ~value
value = (value & 0xffff)
return value
class CRC16(object):
""" Class interface, like the Python library's cryptographic
hash functions (which CRC's are definitely not.)
"""
def __init__(self, string=''):
self.val = 0
if string:
self.update(string)
def update(self, string):
""" Append string to CRC
"""
self.val = crc16(string, self.val)
def checksum(self):
""" Returns the current CRC
"""
return chr(self.val >> 8) + chr(self.val & 0xff)
def hexchecksum(self):
""" Returns the current CRC in hex
"""
return '%04x' % self.val
def copy(self):
""" Copy the CRC object
"""
clone = CRC16()
clone.val = self.val
return clone
def get_value(self):
""" Return the raw CRC value
"""
return self.val
# CRC-16 poly: p(x) = x**16 + x**15 + x**2 + 1
# top bit implicit, reflected
TABLE = [ 0x00000, 0x01189, 0x02312, 0x0329B, 0x04624, 0x057AD, 0x06536, \
0x074BF, 0x08C48, 0x09DC1, 0x0AF5A, 0x0BED3, 0x0CA6C, 0x0DBE5, 0x0E97E, \
0x0F8F7, 0x01081, 0x00108, 0x03393, 0x0221A, 0x056A5, 0x0472C, 0x075B7, \
0x0643E, 0x09CC9, 0x08D40, 0x0BFDB, 0x0AE52, 0x0DAED, 0x0CB64, 0x0F9FF, \
0x0E876, 0x02102, 0x0308B, 0x00210, 0x01399, 0x06726, 0x076AF, 0x04434, \
0x055BD, 0x0AD4A, 0x0BCC3, 0x08E58, 0x09FD1, 0x0EB6E, 0x0FAE7, 0x0C87C, \
0x0D9F5, 0x03183, 0x0200A, 0x01291, 0x00318, 0x077A7, 0x0662E, 0x054B5, \
0x0453C, 0x0BDCB, 0x0AC42, 0x09ED9, 0x08F50, 0x0FBEF, 0x0EA66, 0x0D8FD, \
0x0C974, 0x04204, 0x0538D, 0x06116, 0x0709F, 0x00420, 0x015A9, 0x02732, \
0x036BB, 0x0CE4C, 0x0DFC5, 0x0ED5E, 0x0FCD7, 0x08868, 0x099E1, 0x0AB7A, \
|
0x0BAF3, 0x05285, 0x0430C, 0x07197, 0x0601E, 0x014A1, 0x00528, 0x037B3, \
0x0263A, 0x0DECD, 0x0CF44, 0x0FDDF, 0x0EC56, 0x098E9, 0x08960, 0x0BBFB, \
0x0AA72, 0x06306, 0x0728F, 0x04014, 0x0519D, 0x02522, 0x034AB, 0x00630, \
0x017B9, 0x0EF4E, 0x0FEC7, 0x0CC5C, 0x0DDD5, 0x0A96A, 0x0B8E3, 0x08A78, \
0x09BF1, 0x07387, 0x0620E, 0x05095, 0x0411C, 0x035A3, 0x0242A, 0x016B1, \
0x00738, 0x0FFCF, 0x0EE46, 0
|
x0DCDD, 0x0CD54, 0x0B9EB, 0x0A862, 0x09AF9, \
0x08B70, 0x08408, 0x09581, 0x0A71A, 0x0B693, 0x0C22C, 0x0D3A5, 0x0E13E, \
0x0F0B7, 0x00840, 0x019C9, 0x02B52, 0x03ADB, 0x04E64, 0x05FED, 0x06D76, \
0x07CFF, 0x09489, 0x08500, 0x0B79B, 0x0A612, 0x0D2AD, 0x0C324, 0x0F1BF, \
0x0E036, 0x018C1, 0x00948, 0x03BD3, 0x02A5A, 0x05EE5, 0x04F6C, 0x07DF7, \
0x06C7E, 0x0A50A, 0x0B483, 0x08618, 0x09791, 0x0E32E, 0x0F2A7, 0x0C03C, \
0x0D1B5, 0x02942, 0x038CB, 0x00A50, 0x01BD9, 0x06F66, 0x07EEF, 0x04C74, \
0x05DFD, 0x0B58B, 0x0A402, 0x09699, 0x08710, 0x0F3AF, 0x0E226, 0x0D0BD, \
0x0C134, 0x039C3, 0x0284A, 0x01AD1, 0x00B58, 0x07FE7, 0x06E6E, 0x05CF5, \
0x04D7C, 0x0C60C, 0x0D785, 0x0E51E, 0x0F497, 0x08028, 0x091A1, 0x0A33A, \
0x0B2B3, 0x04A44, 0x05BCD, 0x06956, 0x078DF, 0x00C60, 0x01DE9, 0x02F72, \
0x03EFB, 0x0D68D, 0x0C704, 0x0F59F, 0x0E416, 0x090A9, 0x08120, 0x0B3BB, \
0x0A232, 0x05AC5, 0x04B4C, 0x079D7, 0x0685E, 0x01CE1, 0x00D68, 0x03FF3, \
0x02E7A, 0x0E70E, 0x0F687, 0x0C41C, 0x0D595, 0x0A12A, 0x0B0A3, 0x08238, \
0x093B1, 0x06B46, 0x07ACF, 0x04854, 0x059DD, 0x02D62, 0x03CEB, 0x00E70, \
0x01FF9, 0x0F78F, 0x0E606, 0x0D49D, 0x0C514, 0x0B1AB, 0x0A022, 0x092B9, \
0x08330, 0x07BC7, 0x06A4E, 0x058D5, 0x0495C, 0x03DE3, 0x02C6A, 0x01EF1, \
0x00F78]
CRC = CRC16()
|
hyperNURb/ggrc-core
|
src/tests/ggrc_workflows/generator.py
|
Python
|
apache-2.0
| 5,290
| 0.00397
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from datetime import date
from ggrc import db
from ggrc import builder
from ggrc_workflows.models import (Workflow, TaskGroup, TaskGroupTask,
TaskGroupObject, Cycle)
from tests.ggrc.generator import Generator
import random
import copy
class WorkflowsGenerator(Generator):
def generate_workflow(self, data={}):
""" create a workflow with dict data
return: wf if it was created, or response otherwise
"""
obj_name = "workflow"
data = copy.deepcopy(data)
tgs = data.pop("task_groups", [])
wf = Workflow(title="wf " + self.random_str())
obj_dict = self.obj_to_dict(wf, obj_name)
obj_dict[obj_name].update(data)
response, workflow = self.generate(Workflow, obj_name, obj_dict)
for tg in tgs:
self.generate_task_group(workflow, tg)
return response, workflow
def generate_task_group(self, workflow=None, data={}):
if not workflow:
_, workflow = self.generate_workflow()
data = copy.deepcopy(data)
tgts = data.pop("task_group_tasks", [])
tgos = data.pop("task_group_objects", [])
obj_name = "task_group"
workflow = self._session_add(workflow)
tg = TaskGroup(
title="tg " + self.random_str(),
workflow_id=workflow.id,
context_id=workflow.context.id,
contact_id=1
)
obj_dict = self.obj_to_dict(tg, obj_name)
obj_dict[obj_name].update(data)
response, task_group = self.generate(TaskGroup, obj_name, obj_dict)
for tgt in tgts:
self.generate_task_group_task(task_group, tgt)
for tgo in tgos:
self.generate_task_group_object(task_group, tgo)
return response, task_group
def generate_task_group_task(self, task_group=None, data={}):
if not task_group:
_, task_group = self.generate_task_group()
task_group = self._session_add(task_group)
default_start = self.random_date()
default_end = self.random_date(default_start, date.today())
day_range = 5 if task_group.workflow.frequency == "weekly" else 31
obj_name = "task_group_task"
tgt = TaskGroupTask(
task_group_id=task_group.id,
context_id=task_group.context.id,
title="tgt " + self.random_str(),
start_date=default_start,
end_date=default_end,
relative_start_day=random.randrange(1, day_range),
relative_start_month=random.randrange(1, 12),
relative_end_day=random.randrange(1, day_range),
relative_end_month=random.randrange(1, 12),
contact_id=1
)
obj_dict = self.obj_to_dict(tgt, obj_name)
obj_dict[obj_name].update(data)
return self.generate(TaskGroupTask, obj_name, obj_dict)
def generate_task_group_object(self, task_group=None, obj=None):
if not task_group:
_, task_group = self.generate_task_group()
task_group = self._session_add(task_group)
obj = self._session_add(obj)
obj_name = "task_group_object"
tgo = TaskGroupObject(
object_id=obj.id,
object=obj,
task_group_id=task_group.id,
context_id=task_group.context.id
)
obj_dict = self.obj_to_dict(tgo, obj_name)
return self.generate(TaskGroupObject, obj_name, obj_dict)
def generate_cycle(self, workflow=None):
if not workflow:
_, workflow = self.generate_workflow()
workflow = self._session_add(workflow) # this should be nicer
obj_name = "cycle"
obj_dict = {
obj_name: {
"workflow": {
"id": workflow.id,
"type": workflow.__class__.__name__,
"href": "/api/workflows/%d" % workflow.id
},
"context": {
"id": workflow.context.id,
"type": workflow.context.__class__.__name__,
"href": "/api/workflows/%d" % workflow.context.id
},
"autogenerate": "true"
}
}
return self.generate(Cycle, obj_name, obj_dict)
def activate_workflow(self, workflow):
workflow = self._session_add(workflow)
return self.modify_workflow(workflow, {
"status": "Active",
"recurrences": workflow.frequency != "one_time"
})
def modify_workflow(self, wf=None, data={}):
if not wf:
_, wf = self.generate_workflow()
wf = self._session_add(wf)
obj_name = "workflow"
obj_dict = builder.json.publish(wf)
builder.json.publish_representation(obj_dict)
obj_dict.update(data)
default = {obj_name: obj_dict}
response, workflow = se
|
lf.modify(wf, obj_name, default)
return response, workflow
def modify_object(self, obj, data={}):
obj = self._session_add(obj)
obj_name = obj._inflector.table_singular
obj_dict = builder.json.publish(obj)
builder.json.publish_representation(obj_dict)
obj_dict.update(data)
obj_data = {obj_name: obj_dict}
response, generated_object = self.modify(obj, obj_name, obj_data)
return response, generated_object
def _ses
|
sion_add(self, obj):
""" Sometimes tests throw conflicting state present error."""
try:
db.session.add(obj)
return obj
except:
return obj.__class__.query.get(obj.id)
|
se-esss-litterbox/ess-its
|
IceCubeIocPython/OldItsPowerMeterIoc.py
|
Python
|
gpl-3.0
| 3,050
| 0.003279
|
import paho.mqtt.client as mqtt
import time
import json
import sys
import usbtmc
#should be unique for each Ioc
clientId = "itsPowerMeter01Ioc"
subscribeTopic = "itsPowerMeter01/set/#"
publishtopic = "itsPowerMeter01/get"
periodicPollPeriodSecs = 1
# Power meter initialization
usbInst = usbtmc.Instrument(2733, 27)
usbCommand = "SYST:PRES"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
time.sleep(2)
usbCommand = "*RCL 1"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
usbCommand = "INIT:ALL:CONT ON"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
# usually leave this alone
subscribeQos = 0
publishQos = 0
brokerAddress = "broker.shiftr.io"
brokerPort = 1883
brokertimeout = 60
def getDataFromDevice():
# code here to be executed in periodic poll and set to local device
usbCommand = "SENS1:AVER:RES"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
usbCommand = "FETC1?"
print "Sending " + usbCommand + " to device"
power1 = usbInst.ask(usbCommand)
print "Received " + power1 + " from device"
power1f = float(power1) + 43.9
power1 = str(power1f)
usbCommand = "SENS2:AVER:RES"
print "Sending " + usbCommand + " to device"
usbInst.write(usbCommand)
usbCommand = "FETC2?"
print "Sending " + usbCommand + " to device"
power2 = usbInst.ask(usbCommand)
print "Received " + power2 + " from device"
power2f = float(power2) + 59.5 + 7.2
power2 = str(power2f)
data = {"power1": power1, "power2": power2}
return json.dumps(data)
def handleIncomingMessage(topic, message):
# handle messages from broker
# if "/set/init" in topic:
return
userName = sys.argv[1]
userKey = sys.argv[2]
incomingMessageTopic = ""
incomingMessage = None
newIncomingMessage = True
def on_connect(client, userdata, rc):
global brokerAddress
global subscribeTopic
print("Connected to: " + brokerAddress + " with result code "+str(rc))
client.subscribe(subscribeTopic)
print("Subscribing to: " + subscribeTopic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print clientId + " received message on topic: " + msg.topic
global incomingMessageTopic
global incomingMessage
global newIncomingMessage
incomingMessageTopic = msg.topic
incomingMessage = msg.payload
newIncomingMessage = True
client = mqtt.Client(client_id=clientId, clean_session=False, userdata=None)
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(userNam
|
e, userKey)
client.connect(brokerAddress, brokerPort, brokertimeout)
client.loop_start()
while True:
time.sleep(periodicPollPeriodSecs)
dataFromDevice = getDataFromDevice()
if len
|
(dataFromDevice) > 0:
client.publish(publishtopic, dataFromDevice, publishQos, True)
if newIncomingMessage:
handleIncomingMessage(incomingMessageTopic, incomingMessage)
newIncomingMessage = False
|
shaneHowearth/Statutory_Holidays
|
Christmas_and_New_Year/Boxing_day.py
|
Python
|
gpl-2.0
| 1,049
| 0.003813
|
import datetime
import astropy
'''Boxing day is celebrated on the 26th of December, each year.
If Boxing day falls on the weekend it is moved to the following Monday or Tuesday
Boxing Day is observed on the 28th if the 26th falls on a Saturday or Sunday
'''
def get_holiday(year):
'''
Calculate the observed date of Boxing day for the given year
:param year: int
:return: datetime object set for the observed date
'''
if year < 1:
raise Val
|
ueError("Year must be > 1")
DECEMBER = 12
if datetime.date(year, DECEMBER, 26).weekday() in (5,6):
# Christmas_and_New_Year falls on the weekend
return datetime.date(year, DECEMBER, 28)
else:
return datetime.date(year, DECEMBER, 26)
def get_actual(year):
'''
Boxing Day is always celebrated on the 26th of December
:param year: int
|
:return: datetime object set for the observed date
'''
if year < 1:
raise ValueError("Year must be > 1")
DECEMBER = 12
return datetime.date(year, DECEMBER, 26)
|
jefftc/changlab
|
Betsy/Betsy/modules/plot_geneset_score_bar.py
|
Python
|
mit
| 1,784
| 0.006726
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import filelib
import os
from genomicode import jmath
in_data = antecedents
matrix = [x for x in filelib.read_cols(in_data.identifier)]
matrix = [x[1:] for x in matrix]
matrix = jmath.transpose(matrix)
sample = matrix[0][1:]
data = matrix[1:]
if not os.path.exists(outfile):
os
|
.mkdir(outfile)
for one_data in data:
value = one_data[1:]
value = [float(i
|
) for i in value]
pair = [(value[i], sample[i]) for i in range(len(value))]
pair.sort()
gene_value = [i[0] for i in pair]
label = [i[1] for i in pair]
ylabel = one_data[0]
from genomicode import mplgraph
fig = mplgraph.barplot(gene_value,
box_label=label,
xtick_rotation=90,
xlabel='sample',
ylabel=ylabel)
output = os.path.join(outfile, ylabel)
fig.savefig(output + '.png')
assert filelib.exists_nz(outfile), (
'the output file %s for plot_geneset_score_bar fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'geneset_plot_' + original_file + '.png'
return filename
|
DS-100/sp17-materials
|
sp17/labs/lab06/ok_tests/q4.py
|
Python
|
gpl-3.0
| 642
| 0.003115
|
test = {
'name': 'Question 4',
'points': 1,
'suites': [
{
|
'cases': [
{
'code': r"""
>>> rows7 = {('Joey', 7), ('Henry', 7)}
>>> rows6 = {('Ian', 6), ('Joyce', 6)}
>>> q4_answer[0] == ("John", 8)
True
>>> all([tuple(row) in rows7 for row in q4_answer[1:3]])
True
>>> all([tuple(row) in rows6 for row in q4_
|
answer[3:5]])
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
showell/zulip
|
analytics/management/commands/stream_stats.py
|
Python
|
apache-2.0
| 2,358
| 0.003393
|
from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from zerver.models import Message, Realm, Recipient, Stream, Subscription, get_realm
class Command(BaseCommand):
help = "Generate statistics on the streams for a realm."
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('realms', metavar='<realm>', nargs='*',
help="realm to generate statistics for")
def handle(self, *args: Any, **options: str) -> None:
if options['realms']:
try:
realms = [get_realm(string_id) for string_id in options['realms']]
except Realm.DoesNotExist as e:
raise CommandError(e)
else:
realms = Realm.objects.all()
for realm in realms:
streams = Stream.objects.filter(realm=realm).exclude(Q(name__istartswith="tutorial-"))
# privat
|
e stream count
private_count = 0
# public stream count
public_count = 0
for stream in streams:
if stream.
|
invite_only:
private_count += 1
else:
public_count += 1
print("------------")
print(realm.string_id, end=' ')
print("{:>10} {} public streams and".format("(", public_count), end=' ')
print(f"{private_count} private streams )")
print("------------")
print("{:>25} {:>15} {:>10} {:>12}".format("stream", "subscribers", "messages", "type"))
for stream in streams:
if stream.invite_only:
stream_type = 'private'
else:
stream_type = 'public'
print(f"{stream.name:>25}", end=' ')
recipient = Recipient.objects.filter(type=Recipient.STREAM, type_id=stream.id)
print("{:10}".format(len(Subscription.objects.filter(recipient=recipient,
active=True))), end=' ')
num_messages = len(Message.objects.filter(recipient=recipient))
print(f"{num_messages:12}", end=' ')
print(f"{stream_type:>15}")
print("")
|
hatwar/buyback-erpnext
|
erpnext/accounts/doctype/account/account.py
|
Python
|
agpl-3.0
| 8,576
| 0.024254
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint
from frappe import throw, _
from frappe.model.document import Document
class RootNotEditable(frappe.ValidationError): pass
class Account(Document):
nsm_parent_field = 'parent_account'
def onload(self):
frozen_accounts_modifier = frappe.db.get_value("Accounts Settings", "Accounts Settings",
"frozen_accounts_modifier")
if not frozen_accounts_modifier or frozen_accounts_modifier in frappe.get_roles():
self.get("__onload").can_freeze_account = True
def autoname(self):
self.name = self.account_name.strip() + ' - ' + \
frappe.db.get_value("Company", self.company, "abbr")
def validate(self):
self.validate_parent()
self.validate_root_details()
self.set_root_and_report_type()
self.validate_mandatory()
self.validate_warehouse_account()
self.validate_frozen_accounts_modifier()
self.validate_balance_must_be_debit_or_credit()
self.validate_account_currency()
def validate_parent(self):
"""Fetch Parent Details and validate parent account"""
if self.parent_account:
par = frappe.db.get_value("Account", self.parent_account,
["name", "is_group", "company"], as_dict=1)
if not par:
throw(_("Account {0}: Parent account {1} does not exist").format(self.name, self.parent_account))
elif par.name == self.name:
throw(_("Account {0}: You can not assign itself as parent account").format(self.name))
elif not par.is_group:
throw(_("Account {0}: Parent account {1} can not be a ledger").format(self.name, self.parent_account))
elif par.company != self.company:
throw(_("Account {0}: Parent account {1} does not belong to company: {2}")
.format(self.name, self.parent_account, self.company))
def set_root_and_report_type(self):
if self.parent_account:
par = frappe.db.get_value("Account", self.parent_account, ["report_type", "root_type"], as_dict=1)
if par.report_type:
self.report_type = par.report_type
if par.root_type:
self.root_type = par.root_type
if self.is_group:
db_value = frappe.db.get_value("Account", self.name, ["report_type", "root_type"], as_dict=1)
if db_value:
if self.report_type != db_value.report_type:
frappe.db.sql("update `tabAccount` set report_type=%s where lft > %s and rgt < %s",
(self.report_type, self.lft, self.rgt))
if self.root_type != db_value.root_type:
frappe.db.sql("update `tabAccount` set root_type=%s where lft > %s and rgt < %s",
(self.root_type, self.lft, self.rgt))
def validate_root_details(self):
# does not exists parent
if frappe.db.exists("Account", self.name):
if not frappe.db.get_value("Account", self.name, "parent_account"):
throw(_("Root cannot be edited."), RootNotEditable)
def validate_frozen_accounts_modifier(self):
old_value = frappe.db.get_value("Account", self.name, "freeze_account")
if old_value and old_value != self.freeze_account:
frozen_accounts_modifier = frappe.db.get_value('Accounts Settings', None, 'frozen_accounts_modifier')
if not frozen_accounts_modifier or \
frozen_accounts_modifier not in frappe.get_roles():
throw(_("You are not authorized to set Frozen value"))
def validate_balance_must_be_debit_or_credit(self):
from erpnext.accounts.utils import get_balance_on
if not self.get("__islocal") and self.balance_must_be:
account_balance = get_balance_on(self.name)
if account_balance > 0 and self.balance_must_be == "Credit":
frappe.throw(_("Account balance already in Debit, you are not allowed to set 'Balance Must Be' as 'Credit'"))
elif account_balance < 0 and self.balance_must_be == "Debit":
frappe.throw(_("Account balance already in Credit, you are not allowed to set 'Balance Must Be' as 'Debit'"))
def validate_account_currency(self):
if not self.account_currency:
self.account_currency = frappe.db.get_value("Company", self.company, "default_currency")
elif self.account_currency != frappe.db.get_value("Account", self.name, "account_currency"):
if frappe.db.get_value("GL Entry", {"account": self.name}):
frappe.throw(_("Currency can not be changed after making entries using some other currency"))
def convert_group_to_ledger(self):
if self.check_if_child_exists():
throw(_("Account with child nodes cannot be converted to ledger"))
elif self.check_gle_exists():
throw(_("Account with existing transaction cannot be converted to ledger"))
else:
self.is_group = 0
self.save()
return 1
def convert_ledger_to_group(self):
if self.check_gle_exists():
throw(_("Account with existing transaction can not be converted to group."))
elif self.account_type:
throw(_("Cannot covert to Group because Account Type is selected."))
else:
self.is_group = 1
self.save()
return 1
# Check if any previous balance exists
def check_gle_exists(self):
return frappe.db.get_value("GL Entry", {"account": self.name})
def check_if_child_exists(self):
return frappe.db.sql("""select name from `tabAccount` where parent_account = %s
and docstatus != 2""", self.name)
def validate_mandatory(self):
if not self.report_type:
throw(_("Report Type is mandatory"))
if not self.root_type:
throw(_("Root Type is mandatory"))
def validate_warehouse_account(self):
if not cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
return
if self.account_type == "Warehouse":
if not self.warehouse:
throw(_("Warehouse is mandatory if account type is Warehouse"))
old_warehouse = cstr(frappe.db.get_value("Account", self.name, "warehouse"))
if old_warehouse != cstr(self.warehouse):
if old_warehouse:
self.validate_warehouse(old_warehouse)
if self.warehouse:
self.validate_warehouse(self.warehouse)
elif self.warehouse:
self.warehouse = None
def validate_warehouse(self, warehouse):
if frappe.db.get_value("Stock Ledger Entry", {"warehouse": warehouse}):
throw(_("Stock entries exist against warehouse {0}, hence you cannot re-assign or modify Warehouse").format(warehouse))
def update_nsm_model(self):
"""update lft, rgt indices for nested set model"""
import frappe
import frappe.utils.nestedset
frappe.utils.nestedset.update_nsm(self)
def on_update(self):
self.update_nsm_model()
def validate_trash(self):
"""checks gl entries and if child exists"""
if not self.parent_account:
throw(_("Root account can not be deleted"))
if self.check_gle_exists():
throw(_("Account with existing transaction can not be deleted"))
if self.check_if_child_exists():
throw(_("Chil
|
d account exists for this account. You can not delete this account."))
def on_trash(self):
self.validate_trash()
self.update_nsm_model()
def before_rename(self, old, new, merge=False):
# Add company abbr if not provided
from erpnext.setup.doctype.company.company import get_name_with_abbr
new_account = get_name_with_abbr(new, self.company)
# Validate properties before merging
if merge:
if not frappe.db.exists("Account", new):
throw(_("Acc
|
ount {0} does not exist").format(new))
val = list(frappe.db.get_value("Account", new_account,
["is_group", "root_type", "company"]))
if val != [self.is_group, self.root_type, self.company]:
throw(_("""Merging is only possible if following properties are same in both records. Is Group, Root Type, Company"""))
return new_account
def after_rename(self, old, new, merge=False):
if not merge:
frappe.db.set_value("Account", new, "account_name",
" - ".join(new.split(" - ")[:-1]))
else:
from frappe.utils.nestedset import rebuild_tree
rebuild_tree("Account", "parent_account")
def get_parent_account(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name from tabAccount
where is_group = 1 and docstatus != 2 and company = %s
and %s like %s order by name limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["company"], "%%%s%%" % txt, start, page_len), as_list=1)
def get_account_currency(account):
"
|
FedoraScientific/salome-smesh
|
src/SMESH_SWIG/PAL_MESH_041_mesh.py
|
Python
|
lgpl-2.1
| 2,975
| 0.009748
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D, OPEN CASCADE
#
# Copyright (C) 2003-2007 OPEN CASCADE, EADS/CCR, LIP6, CEA/DEN,
# CEDRAT, EDF R&D, LEG, PRINCIPIA R&D, BUREAU VERITAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(salome.myStudy)
#-----------------------------GEOM----
|
------------------------------------
#----------Vertexes------------
p1 = geompy.
|
MakeVertex(20.0,30.0,40.0)
p2 = geompy.MakeVertex(90.0,80.0,0.0)
p3 = geompy.MakeVertex(30.0,80.0,200.0)
#----------Edges---------------
e1 = geompy.MakeEdge(p1,p2)
e2 = geompy.MakeEdge(p2,p3)
e3 = geompy.MakeEdge(p3,p1)
#----------Wire----------------
ListOfEdges = []
ListOfEdges.append(e3)
ListOfEdges.append(e2)
ListOfEdges.append(e1)
wire1 = geompy.MakeWire(ListOfEdges)
#----------Face----------------
WantPlanarFace = 1
face1 = geompy.MakeFace(wire1,WantPlanarFace)
Id_face1 = geompy.addToStudy(face1,"Face1")
#-----------------------------SMESH-------------------------------------------
# -- Init mesh --
plane_mesh = salome.IDToObject( Id_face1)
mesh = smesh.Mesh(plane_mesh, "Mesh_1")
print"---------------------Hypothesis and Algorithms"
#---------------- NumberOfSegments
numberOfSegment = 9
algoWireDes = mesh.Segment()
listHyp = algoWireDes.GetCompatibleHypothesis()
print algoWireDes.GetName()
algoWireDes.SetName("Ware descritisation")
hypNbSeg = algoWireDes.NumberOfSegments(numberOfSegment)
print hypNbSeg.GetName()
print hypNbSeg.GetNumberOfSegments()
smesh.SetName(hypNbSeg, "Nb. Segments")
#--------------------------Max. Element Area
maxElementArea = 200
algoMef = mesh.Triangle()
listHyp = algoMef.GetCompatibleHypothesis()
print algoMef.GetName()
algoMef.SetName("Triangle (Mefisto)")
hypArea200 = algoMef.MaxElementArea(maxElementArea)
print hypArea200.GetName()
print hypArea200.GetMaxElementArea()
smesh.SetName(hypArea200, "Max. Element Area")
print "---------------------Compute the mesh"
ret = mesh.Compute()
print ret
salome.sg.updateObjBrowser(1)
|
nurhandipa/python
|
codecademy/string_methods.py
|
Python
|
gpl-3.0
| 72
| 0
|
//co
|
decademy cour
|
se answer
parrot = "Norwegian Blue"
print len(parrot)
|
mcalmer/spacewalk
|
client/rhel/rhn-client-tools/src/actions/reboot.py
|
Python
|
gpl-2.0
| 1,358
| 0.005891
|
#!/usr/bin/python2
# Client code for Update Agent
# Copyright (c) 1999--2018 Red Hat, Inc. Distributed under GPLv2.
#
# Author: Adrian Likins <alikins@redhat.com
#
import os
__rhnexport__ = [
'reboot']
from up2date_client import up2dateLog
from up2date_client import config
cfg = config.initUp2dateConfig()
log = up2dateLog.initLog()
# action version we understand
ACTION_VERSION = 2
def reboot(test=None, cache_only=None):
if cache_only:
return (0, "no-ops for caching", {})
if cfg['noReboot']:
return (38, "Up2date is configured not to allow reboots", {})
pid = os.fork()
data = {'version': '0'}
reboot_message = 'Reboot of system "' + os.uname()[1] + '" initiated by Spacewalk reboot action.'
if not pid:
try:
|
if test:
os.execvp("/sbin/shutdown", ['/sbin/shutdown','-r','-k', '+3', reboot_message])
else:
os.execvp("/sbin/shutdown", ['/sbin/shutdown','-r', '+3', reboot_message])
except OSError:
data['name'] = "reboot.reboot.shutdown_fail
|
ed"
return (34, "Could not execute /sbin/shutdown", data)
log.log_me("Rebooting the system now")
# no point in waiting around
return (0, "Reboot sucessfully started", data)
def main():
print(reboot(test=1))
if __name__ == "__main__":
main()
|
helix84/activae
|
deployment/__init__.py
|
Python
|
bsd-3-clause
| 3,590
| 0
|
# Copyright (C) 2010 CENATIC: Centro Nacional d
|
e Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
|
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
__all__ = ['config']
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
__all__ = ['config']
|
erasche/argparse2tool
|
argparse2tool/cmdline2gxml/__init__.py
|
Python
|
apache-2.0
| 1,040
| 0.002885
|
import logging
impor
|
t sys
from argparse2tool import load_argparse
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Arg2GxmlParser:
def __init__(self):
ap = load_argparse() # avoid circular imports
help_text = (
"argparse2tool forms Galaxy XML and CWL tools from Python scripts.\n"
"You are curren
|
tly using the Galaxy XML invocation which may have different options from the CWL invocation."
)
arg2tool_parser = ap.ArgumentParser(
prog=sys.argv[0], description=help_text,
formatter_class=ap.RawDescriptionHelpFormatter, add_help=False
)
arg2tool_parser.add_argument('--help', help='Show this help message and exit', action='help')
self.parser = arg2tool_parser
def process_arguments(self):
self.parser.add_argument('--generate_galaxy_xml', action='store_true')
self.parser.add_argument('--command', action='store', default="")
return vars(self.parser.parse_args())
|
firmlyjin/brython
|
www/tests/unittests/test/test_syslog.py
|
Python
|
bsd-3-clause
| 1,104
| 0.005435
|
from test import support
syslog = support.import_module("syslog") #skip if not supported
import unittest
# XXX(nnorwitz): This test sucks. I don'
|
t know of a platform independent way
# to verify that the messages were really logged.
# The only purpose of this test is to verify the code doesn't crash or leak.
class Test(unittest.TestCase):
def test_openlog(self):
syslog.openlog('python')
# Issue #6697.
self.assertRaises(UnicodeEncodeError, syslog.openlog, '\uD800')
def test_syslog(self):
syslog.openlog('python')
syslog.sy
|
slog('test message from python test_syslog')
syslog.syslog(syslog.LOG_ERR, 'test error from python test_syslog')
def test_closelog(self):
syslog.openlog('python')
syslog.closelog()
def test_setlogmask(self):
syslog.setlogmask(syslog.LOG_DEBUG)
def test_log_mask(self):
syslog.LOG_MASK(syslog.LOG_INFO)
def test_log_upto(self):
syslog.LOG_UPTO(syslog.LOG_INFO)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
remitamine/youtube-dl
|
youtube_dl/extractor/vimeo.py
|
Python
|
unlicense
| 46,520
| 0.001721
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import functools
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
dict_get,
ExtractorError,
js_to_json,
int_or_none,
merge_dicts,
OnDemandPagedList,
parse_filesize,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
str_or_none,
try_get,
unified_timestamp,
unsmuggle_url,
urlencode_postdata,
urljoin,
unescapeHTML,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
username, password = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
webpage = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = {
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
}
self._set_vimeo_cookie('vuid', vuid)
try:
self._download_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self._LOGIN_URL,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 418:
raise ExtractorError(
'Unable to log in: bad username or password',
expected=True)
raise ExtractorError('Unable to log in')
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
})
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _extract_vimeo_config(self, webpage, video_id, *args, **kwargs):
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));',
webpage, 'vimeo config', *args, **compat_kwargs(kwargs))
if vimeo_config:
return self._parse_json(vimeo_config, video_id)
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
def _vimeo_sort_formats(self, formats):
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))
def _parse_config(self, config, video_id):
video_data = config['video']
video_title = video_data['title']
live_event = video_data.get('live_event') or {}
is_live = live_event.get('status') == 'started'
formats = []
config_files = video_data.get('files') or config['request'].get('files', {})
for f in config_files.get('progressive', []):
video_url = f.get('url')
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': 'http-%s' % f.get('quality'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'fps': int_or_none(f.get('fps')),
'tbr': int_or_none(f.get('bitrate')),
})
# TODO: fix handling of 308 status code returned for live archive manifest requests
for files_type in ('hls', 'dash'):
for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
manifest_url = cdn_data.get('url')
if not manifest_url:
continue
format_id = '%s-%s' % (files_type, cdn_name)
if files_type == 'hls':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4',
'm3u8' if is_live else 'm3u8_native', m3u8_id=format_id,
note='Downloading %s m3u8 information' % cdn_name,
fatal=False))
elif files_type == 'dash':
mpd_pattern = r'/%s/(?:sep/)?video/' % video_id
mpd_manifest_urls = []
if re.search(mpd_pattern, manifest_url):
for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
mpd_manifest_urls.append((format_id + suffix, re.sub(
mpd_pattern, '/%s/%s/' % (video_id, repl), manifest_url)))
else:
mpd_manifest_urls = [(format_id, manifest_url)]
for f_id, m_url in mpd_manifest_urls:
if 'json=1' in m_url:
real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url')
if real_m_url:
m_url = real_m_url
mpd_formats = self._extract_mpd_formats(
m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
'Downloading %s MPD information' % cdn_name,
fatal=False)
for f in mpd_formats:
if f.get('vcodec') == 'none':
f['preference'] = -50
elif f.get('acodec') == 'none':
f['preference'] = -40
formats.extend(mpd_formats)
live_archive = live_event.get('archive') or {}
live_archive_source_url = live_archive.get('source_url')
if live_archive_source_url and live_archive.get('status') == 'done':
formats.append({
'format_id': 'live-archive-source',
'url': live_archive_sou
|
rce_url,
'preference': 1,
})
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': urljoin('https://vimeo.com', tt['url']),
|
}]
thumbnails = []
if not is_live:
for key, thumb in video_data.get('thumbs', {}).items():
thumbnails.append({
|
mxOBS/deb-pkg_trusty_chromium-browser
|
native_client/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 7,031
| 0.008107
|
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Documentation on PRESUBMIT.py can be found at:
# http://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
import os
import sys
# List of directories to not apply presubmit project checks, relative
# to the NaCl top directory
EXCLUDE_PROJECT_CHECKS_DIRS = [
# The following contain test data (including automatically generated),
# and do not follow our conventions.
'src/trusted/validator_ragel/testdata/32',
'src/trusted/validator_ragel/testdata/64',
'src/trusted/validator_x86/testdata/32',
'src/trusted/validator_x86/testdata/64',
'src/trusted/validator/x86/decoder/generator/testdata/32',
'src/trusted/validator/x86/decoder/generator/testdata/64',
# The following directories contains automatically generated source,
# which may not follow our conventions.
'src/trusted/validator_x86/gen',
'src/trusted/validator/x86/decoder/gen',
'src/trusted/validator/x86/decoder/generator/gen',
'src/trusted/validator/x86/ncval_seg_sfi/gen',
'src/trusted/validator_arm/gen',
'src/trusted/validator_ragel/gen',
]
NACL_TOP_DIR = os.getcwd()
while not os.path.isfile(os.path.join(NACL_TOP_DIR, 'PRESUBMIT.py')):
NACL_TOP_DIR = os.path.dirname(NACL_TOP_DIR)
assert len(NACL_TOP_DIR) >= 3, "Could not find NaClTopDir"
def _CommonChecks(input_api, output_api):
"""Checks for both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, project_name='Native Client',
excluded_paths=tuple(EXCLUDE_PROJECT_CHECKS_DIRS)))
return results
def IsFileInDirectories(f, dirs):
""" Returns true if f is in list of directories"""
for d in dirs:
if d is os.path.commonprefix([f , d]):
return True
return False
def CheckChangeOnUpload(input_api, output_api):
"""Verifies all changes in all files.
Args:
input_api: the limited set of input modules allowed in presubmit.
output_api: the limited set of output modules allowed in presubmit.
"""
report = []
report.extend(_CommonChecks(input_api, output_api))
# The commit queue assumes PRESUBMIT.py is standalone.
# TODO(bradnelson): M
|
igrate code_hygiene to a common location so that
# it can be used by the commit queue.
old_sys_path = list(sys.path)
try:
sys.path.append(os.path.join(NACL_TOP_DIR, 'tools')
|
)
sys.path.append(os.path.join(NACL_TOP_DIR, 'build'))
import code_hygiene
finally:
sys.path = old_sys_path
del old_sys_path
affected_files = input_api.AffectedFiles(include_deletes=False)
exclude_dirs = [ NACL_TOP_DIR + '/' + x + '/'
for x in EXCLUDE_PROJECT_CHECKS_DIRS ]
for filename in affected_files:
filename = filename.AbsoluteLocalPath()
if not IsFileInDirectories(filename, exclude_dirs):
errors, warnings = code_hygiene.CheckFile(filename, False)
for e in errors:
report.append(output_api.PresubmitError(e, items=errors[e]))
for w in warnings:
report.append(output_api.PresubmitPromptWarning(w, items=warnings[w]))
return report
def CheckChangeOnCommit(input_api, output_api):
"""Verifies all changes in all files and verifies that the
tree is open and can accept a commit.
Args:
input_api: the limited set of input modules allowed in presubmit.
output_api: the limited set of output modules allowed in presubmit.
"""
report = []
report.extend(CheckChangeOnUpload(input_api, output_api))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://nativeclient-status.appspot.com/current?format=json'))
return report
# Note that this list is duplicated in the Commit Queue. If you
# change this list, you should also update the CQ's list here:
# https://chrome-internal.googlesource.com/infra/infra_internal/+/master/commit_queue/projects.py
# (see https://crbug.com/399059).
DEFAULT_TRYBOTS = [
'nacl-precise32_newlib_dbg',
'nacl-precise32_newlib_opt',
'nacl-precise32_glibc_opt',
'nacl-precise64_newlib_dbg',
'nacl-precise64_newlib_opt',
'nacl-precise64_glibc_opt',
'nacl-mac10.6_newlib_opt',
'nacl-mac10.6_glibc_opt',
'nacl-mac10.6_64_newlib_dbg',
'nacl-mac10.6_64_glibc_opt',
'nacl-mac10.7_newlib_opt',
'nacl-mac10.7_glibc_opt',
'nacl-mac10.7_64_newlib_dbg',
'nacl-mac10.7_64_glibc_opt',
'nacl-mac10.8_32_newlib_dbg',
'nacl-mac10.8_32_glibc_opt',
'nacl-mac10.8_64_newlib_dbg',
'nacl-mac10.8_64_glibc_opt',
'nacl-win32_newlib_opt',
'nacl-win32_glibc_opt',
'nacl-win64_newlib_dbg',
'nacl-win64_newlib_opt',
'nacl-win64_glibc_opt',
'nacl-win8-64_newlib_dbg',
'nacl-win8-64_newlib_opt',
'nacl-arm_opt_panda',
# arm-nacl-gcc bots
'nacl-win7_64_arm_newlib_opt',
'nacl-mac10.7_arm_newlib_opt',
'nacl-precise64_arm_newlib_opt',
# Clang bots
'nacl-precise_64-newlib-dbg-clang',
'nacl-mac10.6-newlib-dbg-clang',
# pnacl scons bots
'nacl-precise_64-newlib-arm_qemu-pnacl',
'nacl-precise_64-newlib-x86_32-pnacl',
'nacl-precise_64-newlib-x86_64-pnacl',
'nacl-mac10.8_newlib_opt_pnacl',
'nacl-win7_64_newlib_opt_pnacl',
# pnacl spec2k bots
'nacl-arm_perf_panda',
'nacl-precise_64-newlib-x86_32-pnacl-spec',
'nacl-precise_64-newlib-x86_64-pnacl-spec',
]
PNACL_TOOLCHAIN_TRYBOTS = [
'nacl-toolchain-linux-pnacl-x86_64',
'nacl-toolchain-linux-pnacl-x86_32',
'nacl-toolchain-mac-pnacl-x86_32',
'nacl-toolchain-win7-pnacl-x86_64',
]
TOOLCHAIN_BUILD_TRYBOTS = [
'nacl-toolchain-precise64-newlib-arm',
'nacl-toolchain-mac-newlib-arm',
]
def GetPreferredTryMasters(_, change):
has_pnacl = False
has_toolchain_build = False
has_others = False
for file in change.AffectedFiles(include_dirs=True):
if IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'build'),
os.path.join(NACL_TOP_DIR, 'buildbot'),
os.path.join(NACL_TOP_DIR, 'pynacl')]):
# Buildbot and infrastructure changes should trigger all the try bots.
has_pnacl = True
has_toolchain_build = True
has_others = True
break
elif IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'pnacl')]):
has_pnacl = True
elif IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'toolchain_build')]):
has_toolchain_build = True
else:
has_others = True
trybots = []
if has_pnacl:
trybots += PNACL_TOOLCHAIN_TRYBOTS
if has_toolchain_build:
trybots += TOOLCHAIN_BUILD_TRYBOTS
if has_others:
trybots += DEFAULT_TRYBOTS
return {
'tryserver.nacl': { t: set(['defaulttests']) for t in trybots },
}
|
xibosignage/xibo-pyclient
|
plugins/media/MicroblogMedia.py
|
Python
|
agpl-3.0
| 27,626
| 0.010715
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Xibo - Digitial Signage - http://www.xibo.org.uk
# Copyright (C) 2010-11 Alex Harrington
#
# This file is part of Xibo.
#
# Xibo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Xibo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Xibo. If not, see <http://www.gnu.org/licenses/>.
#
from XiboMedia import XiboMedia
from threading import Thread, Semaphore
import sys, os, time, codecs
import simplejson
import urllib2
import urllib
import cPickle
import inspect
# Define costants to represent each service
TWITTER = 0
IDENTICA = 1
class MicroblogMedia(XiboMedia):
def add(self):
self.running = True
self.tmpPath = os.path.join(self.libraryDir,self.mediaNodeName + "-tmp.html")
self.opener = urllib2.build_opener()
# Semaphore to lock reading/updating the global posts array
self.__lock = Semaphore()
# Options:
# <searchTerm>oggcamp</searchTerm><fadeInterval>1</fadeInterval><speedInterval>5</speedInterval><updateInterval>10</updateInterval><historySize>15</historySize><twitter>1</twitter><identica>1</identica></options>
self.options['historySize'] = int(self.options['historySize'])
self.options['twitter'] = bool(int(self.options['twitter']))
self.options['identica'] = bool(int(self.options['identica']))
self.options['speedInterval'] = int(self.options['speedInterval'])
self.options['fadeInterval'] = int(self.options['fadeInterval'])
# Create an empty array for the posts to sit in
# Each element will be a dictionary in the following format:
# {'xibo_src': 0, u'iso_language_code': u'en_GB', u'text': u"@bobobex @corenominal If you have an android device, give c:geo a look for !geocaching, or nudge me at oggcamp and I'll show you. 'sgood", u'created_at': u'Thu, 08 Apr 2010 08:03:38 +0000', u'profile_image_url': u'http://avatar.identi.ca/13737-48-20080711132350.png', u'to_user': None, u'source': u'web', u'from_user': u'jontheniceguy', u'from_user_id': u'13737', u'to_user_id': None, u'id': u'27725072'}
self.__posts = []
# Parse out the template element from the raw tag.
try:
for t in self.rawNode.getElementsByTagName('template'):
self.templateNode = t
for node in self.templateNode.childNodes:
if node.nodeType == node.CDATA_SECTION_NODE:
self.template = node.data.encode('UTF-8')
self.log.log(5,'audit','Template is: ' + self.template)
except:
self.log.log(2,'error','%s Error parsing out the template from the xlf' % self.mediaNodeName)
self.template = ""
# Parse out the nocontent element from the raw tag
try:
for t in self.rawNode.getElementsByTagName('nocontent'):
self.nocontentNode = t
for node in self.nocontentNode.childNodes:
if node.nodeType == node.CDATA_SECTION_NODE:
self.nocontent = node.data.encode('UTF-8')
self.log.log(5,'audit','No Content is: ' + self.nocontent)
except:
self.log.log(2,'error','%s Error parsing out the nocontent from the xlf' % self.mediaNodeName)
self.nocontent = ""
def run(self):
# Kickoff the display output thread
self.displayThread = MicroblogMediaDisplayThread(self.log,self.p,self)
self.displayThread.start()
# Start the region timer so the media dies at the right time.
self.p.enqueue('timer',(int(self.duration) * 1000,self.timerElapsed))
tmpXML = '<browser id="' + self.mediaNodeName + '" opacity="0" width="' + str(self.width) + '" height="' + str(self.height) + '"/>'
self.p.enqueue('add',(tmpXML,self.regionNodeName))
self.startStats()
# Pointer to the currently displayed post:
self.__pointer = -1
# Open previous cache file (if exists) and begin playing out posts
# Lock the semaphore as we write to __posts to avoid changing the array as the display thread reads it.
try:
try:
self.log.log(9,'info','%s acquiring lock to read pickled file.' % self.mediaId)
self.__lock.acquire()
self.log.log(9,'info','%s acquired lock to read pickled file.' % self.mediaId)
tmpFile = open(os.path.join(self.libraryDir,self.mediaId + ".pickled"), 'rb')
self.__posts = cPickle.load(tmpFile)
tmpFile.close()
finally:
self.__lock.release()
self.log.log(9,'info','%s releasing lock after reading pickled file.' % self.mediaId)
except:
# Erase any pickle file that may be existing but corrupted
try:
os.remove(os.path.join(self.libraryDir,self.mediaId + ".pickled"))
self.log.log(9,'info','%s erasing corrupt pickled file.' % self.mediaId)
except:
self.log.log(9,'info','%s unable to erase corrupt pickled file.' % self.mediaId)
self.log.log(5,"audit","Unable to read serialised representation of the posts array or this media has never run before.")
self.__lock.release()
self.displayThread.nextPost()
# Check that the updateInterval we've been given is sane
try:
self.options['updateInterval'] = int(self.options['updateInterval'])
except:
self.options['updateInterval'] = 5
while self.running:
self.log.log(0,"audit","%s: Waking up" % self.mediaId)
try:
mtime = os.path.getmtime(os.path.join(self.libraryDir,self.mediaId + '.pickled'))
except:
# File probably doesn't exist.
# Pretend the file was last updated more than updateInterval ago
self.log.log(0,"audit","%s: Post cache does not exist.")
mtime = time.time() - (self.options['updateInterval'] * 60) - 10
if time.time() > (mtime + (self.options['updateInterval'] * 60)):
# Download new posts and add them to the rotation
self.log.log(0,"audit","%s: Getting new posts from Microblogs" % self.mediaId)
tmpTwitter = self.updateTwitter()
|
tmpIdentica = self.updateIdentica()
|
tmpPosts = []
# Deduplicate the posts we've pulled in from Twitter against Identica and __posts
for post in tmpTwitter:
inIdentica = False
inPosts = False
# See if the post is in the tmpIdentica array
for cmpPost in tmpIdentica:
if post['text'] == cmpPost['text'] and post['from_user'] == cmpPost['from_user']:
inIdentica = True
# See if the post is in the __posts array
for cmpPost in self.__posts:
if post['text'] == cmpPost['text'] and post['from_user'] == cmpPost['from_user']:
inPosts = True
# Update self.__posts with the new content as required
# Lock the semaphore as we write to __posts to avoid changing the array as the display thread reads it.
if inIdentica or inPosts:
# The post already exists or is in Identica too
|
onepesu/django_transmission
|
torrents/views.py
|
Python
|
mit
| 371
| 0.002695
|
import json
from django.http import HttpResponse
from django.shortcuts import render
from torrents.logic import active_torrents_info
def active(request):
if request.is_ajax():
content = {"torrents
|
": active_torrents_info()}
return HttpResponse(json.dumps(content), content_type="application/json")
return render(request, "torrents/ac
|
tive.html")
|
eliasdesousa/indico
|
indico/modules/events/logs/controllers.py
|
Python
|
gpl-3.0
| 1,324
| 0.001511
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have
|
received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.logs.models.entries import EventLogEntry
from indico.modules.events.logs.views import WPEventLogs
from indico.modules.events.management.controllers import RHManageEventBase
class RHEventLogs(RHManageEventBase):
"""Shows the modification/action log for the event"""
def _process(se
|
lf):
entries = self.event.log_entries.order_by(EventLogEntry.logged_dt.desc()).all()
realms = {e.realm for e in entries}
return WPEventLogs.render_template('logs.html', self.event, entries=entries, realms=realms)
|
unicef/rhizome
|
rhizome/tests/test_api_source_object_map.py
|
Python
|
agpl-3.0
| 5,601
| 0.005892
|
from rhizome.tests.base_test_case import RhizomeApiTestCase
from rhizome.models.indicator_models import Indicator
from rhizome.models.document_models import SourceObjectMap, \
DocumentSourceObjectMap
from pandas import read_csv
from rhizome.tests.setup_helpers import TestSetupHelpers
class SourceObjectMapResourceTest(RhizomeApiTestCase):
def setUp(self):
## instantiate the test client and all other methods ##
super(SourceObjectMapResourceTest, self).setUp()
self.test_setup = TestSetupHelpers()
self.user = self.test_setup.user
self.lt = self.test_setup.create_arbitrary_location_type()
self.location = \
self.test_setup.create_arbitrary_location(self.lt.id)
self.document = self\
.test_setup.create_arbitrary_document(id=22,file_type ='campaign')
self.som_0 = SourceObjectMap.objects.create(
source_object_code='This is not mapped',
master_object_id = -1,
content_type = 'location'
)
DocumentSourceObjectMap.objects.create(
document_id = self.document.id,
source_object_map_id = self.som_0.id
)
self.som_1 = SourceObjectMap.objects.create(
source_object_code='This is mapped',
master_object_id = self.location.id,
content_type = 'location'
)
DocumentSourceObjectMap.objects.create(
document_id = self.document.id,
source_object_map_id = self.som_1.id
)
indicator_df = read_csv('rhizome/tests/_data/indicators.csv')
self.indicators = self.test_setup.model_df_to_data(
indicator_df, Indicator)
def test_som_patch(self):
# this is really a PUT that is i am updating values here in place
post_data = {
'source_object_code': 'Percent missed children_PCA',
'master_object_id': self.indicators[0].id,
'content_type': 'indicator',
'mapped_by_id': self.user.id
}
patch_url = '/api/v1/source_object_map/%s/' % self.som_0.id
patch_resp = self.test_setup.patch(self, patch_url, post_data)
self.assertHttpAccepted(patch_resp)
response_data = self.deserialize(patch_resp)
self.assertEqual(
response_data['master_object_id'], self.indicators[0].id)
def test_som_post_invalid_id(self):
'''
try to PATCH with an invalid id.
'''
post_data = {
'master_object_id': self.indicators[0].id,
'content_type': 'indicator',
'mapped_by_id': self.user.id
}
post_resp = self.test_setup.patch(
self, '/api/v1/source_object_map/9090909090/' , post_data)
self.assertHttpApplicationError(post_resp)
def test_som_get_id(self):
'''
get the som_obj by id for both the mapped and un mapped.
'''
## mapped ##
get_resp = self.test_setup.get(
self, '/api/v1/source_object_map/%s/' % self.som_1.id)
self.assertHttpOK(get_resp)
response_data = self.deserialize(get_resp)
self.assertEqual(response_data['master_object_id'], self.location.id)
## un mapped ##
get_resp_1 = self.test_setup.get(
sel
|
f, '/api/v1/source_object_map/%s/' % self.som_0.id)
self.assertHttpOK(get_resp_1)
response_data_1 = self.deserialize(get_resp_1)
self.ass
|
ertEqual(response_data_1['master_object_id'], -1)
def test_som_get_doc_id(self):
get_data = {'document_id': self.document.id, 'is_mapped': 1}
resp = self.test_setup.get(
self, '/api/v1/source_object_map/', get_data)
self.assertHttpOK(resp)
data = self.deserialize(resp)
self.assertEqual(data['objects'][0]['master_object_id']\
, self.location.id)
def test_som_get_no_doc_param(self):
'''
the document_id is a required parameter so we need to make sure
that when we pass a request without a document_id, that we get the
expected error message.
'''
resp = self.test_setup.get(self, '/api/v1/source_object_map/')
data = self.deserialize(resp)
self.assertHttpApplicationError(resp)
# expected_error_msg = 'Missing required parameter document_id'
expected_error_msg = "'document_id'"
self.assertEqual(data['error'], str(expected_error_msg))
def test_som_get_unmapped(self):
filter_params = {'document_id': self.document.id, 'is_mapped': 0}
resp = self.test_setup.get(self, '/api/v1/source_object_map/',\
data = filter_params)
self.assertHttpOK(resp)
data = self.deserialize(resp)
data_objects = data['objects']
self.assertEqual(len(data_objects), 1) # since we created one unmapped
self.assertEqual(data_objects[0]['master_object_id'], -1)
self.assertEqual(str(data_objects[0]['source_object_code']),\
'This is not mapped')
def test_som_get_doc_id_invalid(self):
get_data = {'document_id': 123456}
get_resp = self.test_setup.get(
self, '/api/v1/source_object_map/', get_data)
self.assertHttpOK(get_resp)
get_data = self.deserialize(get_resp)
def test_som_get_id_invalid(self):
get_data_id = 123456
get_resp = self.test_setup.get(
self, '/api/v1/source_object_map/%s/' % get_data_id)
self.assertHttpApplicationError(get_resp)
get_data = self.deserialize(get_resp)
|
pawkoz/dyplom
|
blender/intern/cycles/blender/addon/osl.py
|
Python
|
gpl-2.0
| 4,371
| 0.002974
|
#
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
import bpy
import _cycles
def osl_compile(input_path, report):
"""compile .osl file with given filepath to temporary .oso file"""
import tempfile
output_file = tempfile.NamedTemporaryFile(mode='w', suffix=".oso", delete=False)
output_path = output_file.name
output_file.close()
ok = _cycles.osl_compile(input_path, output_path)
if ok:
report({'INFO'}, "OSL shader compilation succeeded")
return ok, output_path
def update_script_node(node, report):
"""compile and update shader script node"""
import os
import shutil
import tempfile
if node.mode == 'EXTERNAL':
# compile external script file
script_path = bpy.path.abspath(node.filepath, library=node.id_data.library)
script_path_noext, script_ext = os.path.splitext(script_path)
if script_ext == ".oso":
# it's a .oso file, no need to compile
ok, oso_path = True, script_path
oso_file_remove = False
elif script_ext == ".osl":
# compile .osl file
ok, oso_path = osl_compile(script_path, report)
oso_file_remove = True
if ok:
# copy .oso from temporary path to .osl directory
dst_path = script_path_noext + ".oso"
try:
|
shutil.copy2(oso_path, dst_path)
except:
report({'ERROR'}, "Failed to write .oso file next to external .osl file at " + dst_path)
elif os.p
|
ath.dirname(node.filepath) == "":
# module in search path
oso_path = node.filepath
oso_file_remove = False
ok = True
else:
# unknown
report({'ERROR'}, "External shader script must have .osl or .oso extension, or be a module name")
ok = False
if ok:
node.bytecode = ""
node.bytecode_hash = ""
elif node.mode == 'INTERNAL' and node.script:
# internal script, we will store bytecode in the node
script = node.script
osl_path = bpy.path.abspath(script.filepath, library=script.library)
if script.is_in_memory or script.is_dirty or script.is_modified or not os.path.exists(osl_path):
# write text datablock contents to temporary file
osl_file = tempfile.NamedTemporaryFile(mode='w', suffix=".osl", delete=False)
osl_file.write(script.as_string())
osl_file.close()
ok, oso_path = osl_compile(osl_file.name, report)
oso_file_remove = False
os.remove(osl_file.name)
else:
# compile text datablock from disk directly
ok, oso_path = osl_compile(osl_path, report)
oso_file_remove = False
if ok:
# read bytecode
try:
oso = open(oso_path, 'r')
node.bytecode = oso.read()
oso.close()
except:
import traceback
traceback.print_exc()
report({'ERROR'}, "Can't read OSO bytecode to store in node at %r" % oso_path)
ok = False
else:
report({'WARNING'}, "No text or file specified in node, nothing to compile")
return
if ok:
# now update node with new sockets
ok = _cycles.osl_update_node(node.id_data.as_pointer(), node.as_pointer(), oso_path)
if not ok:
report({'ERROR'}, "OSL query failed to open " + oso_path)
else:
report({'ERROR'}, "OSL script compilation failed, see console for errors")
# remove temporary oso file
if oso_file_remove:
try:
os.remove(oso_path)
except:
pass
return ok
|
liupangzi/codekata
|
leetcode/Algorithms/110.BalancedBinaryTree/Solution.py
|
Python
|
mit
| 712
| 0.001404
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left =
|
None
# self.right = None
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
return self.dfsHeight(root) != -1
def dfsHeight(self, root):
if not root:
return 0
left_height = self.dfsHeight(root
|
.left)
right_height = self.dfsHeight(root.right)
if left_height == -1 or right_height == -1:
return -1
if abs(left_height - right_height) > 1:
return -1
return max(left_height, right_height) + 1
|
Vb2341/image-junk
|
fhead.py
|
Python
|
mit
| 217
| 0.018433
|
#! /usr/bin/env python
import sys, glo
|
b
from astropy.io import fits
try: ext = int(sys.argv[2])
except: ext = 0
prin
|
t sys.argv[1]
ims = glob.glob(sys.argv[1])
for im in ims:
print repr(fits.getheader(im, ext))
|
brantje/telegram-github-bot
|
captain_hook/services/telegram/__init__.py
|
Python
|
apache-2.0
| 77
| 0
|
from __fu
|
ture__ import absolute_import
from .telegram import TelegramService
| |
marcindulak/accts
|
accts/asegpaw/3.6.0-0.9.0.8965/ase/test.py
|
Python
|
gpl-3.0
| 142
| 0.007042
|
i
|
mport sys
import subprocess
result = subprocess.Popen('sh test.sh', shell=True)
text = result.communic
|
ate()[0]
sys.exit(result.returncode)
|
pmisik/buildbot
|
master/buildbot/test/unit/www/test_roles.py
|
Python
|
gpl-2.0
| 3,892
| 0.000514
|
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.www.authz import roles
class RolesFromGroups(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromGroups("buildbot-")
def test_noGroups(self):
ret = self.roles.getRolesFromUser(dict(
username="homer"))
self.assertEqual(ret, [])
def test_noBuildbotGroups(self):
ret = self.roles.getRolesFromUser(dict(
username="homer",
groups=["employee"]))
self.assertEqual(ret, [])
def test_someBuildbotGroups(self):
ret = self.roles.getRolesFromUser(dict(
|
username="homer",
groups=["employee", "buildbot-maintainer", "buildbot-admin"]))
self.assertEqual(ret, ["maintainer", "admin"])
class RolesFromEmails(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromEmails(
employee=["homer@plant.com", "burns@plant.com"], boss=["burns@plant.c
|
om"])
def test_noUser(self):
ret = self.roles.getRolesFromUser(dict(
username="lisa", email="lisa@school.com"))
self.assertEqual(ret, [])
def test_User1(self):
ret = self.roles.getRolesFromUser(dict(
username="homer", email="homer@plant.com"))
self.assertEqual(ret, ["employee"])
def test_User2(self):
ret = self.roles.getRolesFromUser(dict(
username="burns", email="burns@plant.com"))
self.assertEqual(sorted(ret), ["boss", "employee"])
class RolesFromOwner(unittest.TestCase):
def setUp(self):
self.roles = roles.RolesFromOwner("ownerofbuild")
def test_noOwner(self):
ret = self.roles.getRolesFromUser(dict(
username="lisa", email="lisa@school.com"), None)
self.assertEqual(ret, [])
def test_notOwner(self):
ret = self.roles.getRolesFromUser(dict(
username="lisa", email="lisa@school.com"), "homer@plant.com")
self.assertEqual(ret, [])
def test_owner(self):
ret = self.roles.getRolesFromUser(dict(
username="homer", email="homer@plant.com"), "homer@plant.com")
self.assertEqual(ret, ["ownerofbuild"])
class RolesFromUsername(unittest.TestCase, ConfigErrorsMixin):
def setUp(self):
self.roles = roles.RolesFromUsername(roles=["admins"], usernames=["Admin"])
self.roles2 = roles.RolesFromUsername(
roles=["developers", "integrators"], usernames=["Alice", "Bob"])
def test_anonymous(self):
ret = self.roles.getRolesFromUser(dict(anonymous=True))
self.assertEqual(ret, [])
def test_normalUser(self):
ret = self.roles.getRolesFromUser(dict(username="Alice"))
self.assertEqual(ret, [])
def test_admin(self):
ret = self.roles.getRolesFromUser(dict(username="Admin"))
self.assertEqual(ret, ["admins"])
def test_multipleGroups(self):
ret = self.roles2.getRolesFromUser(dict(username="Bob"))
self.assertEqual(ret, ["developers", "integrators"])
def test_badUsernames(self):
with self.assertRaisesConfigError('Usernames cannot be None'):
roles.RolesFromUsername(roles=[], usernames=[None])
|
bradfortner/Convergence-Jukebox-Experimental
|
working_popup_progress_bar rewrite.py
|
Python
|
gpl-3.0
| 1,763
| 0.016449
|
import kivy
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.progressbar import ProgressBar
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty
kivy.require("1.9.1")
class MyPopupProgressBar(Widget):
progress_bar = ObjectProperty() # Kivy properties classes are used when you create an EventDispatcher.
def __init__(self, **kwa):
super(MyPopupProgressBar, self).__init__(**kwa) #super combines and initializes two widgets Popup and ProgressBar
self.progress_bar = ProgressBar() # instance of ProgressBar created.
self.popup = Popup(title='New Songs Detected: Updating Song Library', content=self.progress_bar) # progress bar assigned to popup
self.popup.bind(on_open=self.puopen) # Binds super widget to on_open.
Clock.schedule_once(self.progress_bar_start) # Uses clock to call progress_bar_start() (callback) one time only
def progress_bar_start(self, instance): # Provides initial value of of progress bar and lanches popup
self.progress_bar.value = 1 # Initial value of progress_bar
self.popup.open() # starts puopen()
def next(self, dt): # Updates Project Bar
|
if self.progress_bar.value >= 100: # Checks to see if progress_bar.value has met 100
return False # Returning False schedule is canceled and won't repeat
self.progress_bar.value += 1 # Updates progress_bar's progress
def puopen(self, instance): # Called from bind.
Clock.schedule_interval(self.next, .0005) # Creates Clock event scheduling nex
|
t() every 5-1000th of a second.
class MyApp(App):
def build(self):
return MyPopupProgressBar()
if __name__ in ("__main__"):
MyApp().run()
|
ElofssonLab/pcons-fold
|
pconsc/plotting/parse_fasta.py
|
Python
|
mit
| 3,026
| 0.004296
|
#!/usr/bin/env python
import string, copy
import sys
def read_fasta(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key of a given query ID only contains its ID, not the full header
@return {header: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
heade
|
r = aline[1:]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '
|
.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
def read_fasta_pdb(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key = PDB accession
@return {PDB-acc: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
header = aline[1:].split()[0]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
if __name__ == "__main__":
afile = open(sys.argv[1], 'r')
if len(sys.argv) == 3:
query_id = sys.argv[2]
else:
query_id = ''
seq_dict = read_fasta(afile, query_id)
afile.close()
print 'There are %d entries with unique headers in your file.' % len(seq_dict)
|
wallarelvo/rover
|
rover/plot.py
|
Python
|
apache-2.0
| 3,026
| 0.00033
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def plot_risk_grid(risk_grid, filename):
fig = plt.figure("Risk Map")
ax = fig.add_subplot(111)
ax.set_xlabel("X Location")
ax.set_ylabel("Y Location")
x_step = 1
y_step = 1
x_min = 0
y_min = 0
x_max = risk_grid.problem.width
y_max = risk_grid.problem.height
x = np.arange(x_min, x_max, x_step)
y = np.arange(y_min, y_max, y_step)
X, Y = np.meshgrid(x, y)
zs = np.array(
[
risk_grid.get_risk(x_i, y_i)
for x_i, y_i in zip(np.ravel(X), np.ravel(Y))
]
)
np.savetxt("sandbox/risk.out", zs)
Z = zs.reshape(X.shape)
ax.pcolormesh(X, Y, Z, cmap=cm.jet)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
plt.savefig(filename)
return plt
def plot_time_grid(time_grid, filename):
fig = plt.figure("TimeGrid Map")
ax = fig.add_subplot(111)
ax.set_xlabel("X Location")
ax.set_ylabel("Y Location")
x_step = 1
y_step = 1
x_min = 0
y_min = 0
x_max = time_grid.width - 1
y_max = time_grid.height - 1
x = np.arange(x_min, x_max, x_step)
y = np.arange(y_min, y_max, y_step)
X, Y = np.meshgrid(x, y)
zs = np.array(
[
time_grid.get_raw(x_i, y_max - y_i)
for x_i, y_i in zip(np.ravel(X), np.ravel(Y))
]
)
Z = zs.reshape(X.shape)
ax.pcolormesh(X, Y, Z, cmap=cm.jet)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
plt.savefig(filename)
return plt
class TimeGridPlotter(object):
def __init__(self, time_grid):
self.time_grid = time_grid
self.fig = plt.figure("TimeGrid Map")
self.ax = self.fig.add_subplot(111)
self.ax.set_xlabel("X Location")
self.ax.set_ylabel("Y Location")
self.x_step = 2
self.y_step = 2
self.x_min = 0
self.y_min = 0
self.
|
x_max = time_grid.width - 1
self.y_max = time_grid.height - 1
self.x = np.arange(self.x_min, self.x_max, self.x_step)
self.y = np.arange(self.y_min, self.y_max, self.y_step)
self.X, self.Y = np.meshgrid(self.x, self.y)
plt.ion()
self.get_zs()
self.ax.set_xlim(self.x_min, self.x_max)
self.ax.set_ylim(self.y_min, self.y_max)
self.iteration = 0
|
def get_zs(self):
zs = np.array(
[
self.time_grid.get_raw(x_i, y_i)
for x_i, y_i in zip(np.ravel(self.X), np.ravel(self.Y))
]
)
return zs
def update(self):
try:
self.graph.remove()
except:
pass
zs = self.get_zs()
Z = zs.reshape(self.X.shape)
self.graph = self.ax.pcolormesh(self.X, self.Y, Z, cmap=cm.jet)
plt.draw()
plt.pause(0.0001)
filename = "sandbox/grids/{}.out".format(self.iteration)
self.iteration += 1
np.savetxt(filename, self.time_grid.grid)
|
CaptainDesAstres/Frames-Animated-By-Curve
|
single_track/SingleTrack.py
|
Python
|
gpl-3.0
| 1,451
| 0.052378
|
from .Peaks import Peaks
from .Amplitude import Amplitude
from .Combination import Combination
from .OutputFrame import OutputFrame
from .Track import Track
from .panels import Panel
import bpy
class SingleTrack(
bpy.types.PropertyGroup,
Panel,
Track,
Amplitude,
Combination,
OutputFrame,
Peaks
):
''' class containing all Curve to frame
Properties, methods and operators
for single track feature'''
def update_curves( self, context ):
'''update curve when settings have been changed'''
clip = self.id_data
# initialize animation data if required
if clip.animation_data is None:
clip.animation_data_create()
if clip.animation_data.action is None:
clip.animation_data.acti
|
on = bpy.data.actions.new(
name= clip.name
|
+'Action')
# check and get peaks shapes
peak_shapes = self.check_and_get_peaks_shapes()
if type(peak_shapes) is str:
return peak_shapes
# update amplitude net curve
amplitude_net_curve = self.update_net_amplitude_curve( clip, context )
# update peaks curve
peaks_curve = self.update_peaks_curve( context,
amplitude_net_curve, peak_shapes )
#update combination curve
combination_curve = self.update_combination_curve(
clip,
context,
amplitude_net_curve,
peaks_curve
)
# update output curve
self.update_output_curve(clip, context, combination_curve)
return True
|
voidabhi/python-scripts
|
webhook-fb-messenger.py
|
Python
|
mit
| 1,193
| 0.003353
|
import json
import requests
from django.views.decorators.csrf import csrf_exempt
FB_MESSENGER_ACCESS_TOKEN = "[TOKEN]"
def respond_FB(sender_id, text):
json_data = {
"recipient": {"id": sender_id},
"message": {"text": text + " to you!"}
}
params = {
"access_token": FB_MESSENGER_ACCESS_TOKEN
}
r = requests.post('https://graph.facebook.com/v2.6/me/messages', json=json_data, params=params)
print(r, r.status_code, r.text)
@csrf_exempt
def fb_webhook(request):
if req
|
uest.method == "GET":
if (request.GET.get('hub.verify_token') == 'this_is_a_verify_token_created_by_sean'):
return HttpResponse(request.GET.get('hub.challenge'))
return HttpResponse('Error, wrong validation token')
if request.method == "POST":
body = request.body
|
print("BODY", body)
messaging_events = json.loads(body.decode("utf-8"))
print("JSON BODY", body)
sender_id = messaging_events["entry"][0]["messaging"][0]["sender"]["id"]
message = messaging_events["entry"][0]["messaging"][0]["message"]["text"]
respond_FB(sender_id, message)
return HttpResponse('Received.')
|
kimlab/GPyM
|
granule2map.py
|
Python
|
mit
| 1,360
| 0.025735
|
#! /usr/bin/python
#--------------------------------------------------------------------
# PROGRAM : granule2map.py
# CREATED BY : hjkim @IIS.2015-07-13 11:56:07.989735
# MODIFED BY :
#
# USAGE : $ ./granule2map.py
#
# DESCRIPTION:
#------------------------------------------------------cf0.2@20120401
import os,sys
from optparse import OptionParser
from numpy import zeros, ma
from alien.upscale import upscale
from alien.nearest_idx import nearest_idx
from alien.GridCoordinates import GridCoordinates
def granule2map(lat, lon, aSrc, BBox=None, res=0.1, verbose=True):
'''
res : out resolution only support n-fold of 0.01 deg
'''
Grid = GridCoordinates(
|
'^001',BBox=BBox) # default mapCode:^001
aOut = zeros( (Grid.lat.size,Grid.lon.size), 'float32' )-9999.9
yIdx = nearest_idx(Grid.lat, lat.flatten())
xIdx = nearest_idx(Grid.lon, lon.flatten())
aOut[yIdx, xIdx] = aSrc.flatten()
nFold = int( res/Grid.res )
aOut = upscale(aOut, (Grid.lat.size/nFold, Grid.lon.size/nFold), mode='m', missing=-9999.9)
#aOut = upscale(aOut, (Grid.lat.size/nFold, Grid.lon.size/nFold)
|
, mode='s', missing=-9999.9)
if verbose:
print '\t[GRANULE2MAP] Domain:%s %s -> %s'%( BBox, aSrc.shape, aOut.shape)
return aOut
|
bodleian/stats-time-cache
|
collate/custom_variables.py
|
Python
|
gpl-3.0
| 8,288
| 0.009411
|
'''A collection of tasks to perform related to Piwik custom variables.'''
import logging
import re
import dbsources
import dbengine
class Populate(object):
'''Take existing data and populate custom variables.'''
def __init__(self):
self.CONFIG = None # tables and fields to use
self.CONNECTION = None # in this location
self.setup()
regexp = '(.*)([0-F]{8}-[0-F]{4}-[0-F]{4}-[0-F]{4}-[0-F]{12})(.*)'
self.PATTERN_CHECK = re.compile(regexp, re.IGNORECASE)
# These two codes indicate what type of update has occurred
self.DCODE_IGNORE = 'n' # value to insert when we are not interested
self.DCODE_VIEW = 'v' # value to insert when it is a view
self.DCODE_DOWN = 'd' # value to insert when a download
# Control how the WHERE clause will be generated.
self.FIND_WHERE_METHOD = self.where_notdone
self.FIND_BATCH_SIZE = 10000 # takes < 1 minute
def setup(self):
'''Setup the connection to the system being populated.'''
source = dbsources.ReadWriteDB(
|
)
source.setup_source1()
host, username, password, database = source.get_settings()
self.CONFIG = dbengine.PiwikConfig(
|
)
#self.CONFIG.setup_custom_vars(1) # check count finds stuff
self.CONNECTION = dbengine.Connection()
self.CONNECTION.setup(host, username, password, database)
# Count existing data
def sql_count_customvar_scode(self):
count = self.CONFIG.FIELD_CUSTOM_VARS_SCODE
table = self.CONFIG.TABLE_CUSTOM_VARS_STORE
return "SELECT COUNT(%s) FROM %s"%(count, table)
def sql_count_customvar_dcode(self):
count = self.CONFIG.FIELD_CUSTOM_VARS_DCODE
table = self.CONFIG.TABLE_CUSTOM_VARS_STORE
return "SELECT COUNT(%s) FROM %s"%(count, table)
def count_existing(self):
'''Return the number of custom variables that exist.'''
scode = self.CONNECTION.fetchone(self.sql_count_customvar_scode())
logging.info('Count of custom variable: %s'%scode)
dcode = self.CONNECTION.fetchone(self.sql_count_customvar_dcode())
logging.info('Count of custom variable: %s'%dcode)
return scode, dcode
# Lookup custom variables
def sql_action_lookup(self, action):
table, key, check, down = self.CONFIG.get_action_look_config()
return "SELECT %s , %s , %s FROM %s WHERE %s='%s'"%(key, check, down, table, key, action)
def action_lookup(self, action):
'''Returns data from the key to use as scode and dcode'''
query = self.sql_action_lookup(action)
return self.CONNECTION.fetchone(query)
def get_action(self, action):
'''Return details about an action.'''
result = self.action_lookup(action)
if not result:
return False
code = self.action_extract_code(result[1])
if not code:
return False
checktype = result[2]
if checktype == self.CONFIG.ACTION_ISUSEFUL:
return code, 'view'
elif checktype == self.CONFIG.ACTION_ISDOWNLOAD:
return code, 'down'
else:
return code, 'none'
def action_extract_code(self, checkname):
found = re.search(self.PATTERN_CHECK, checkname)
if found:
code = 'uuid:%s'%str(found.group(2)).lower()
return code
else:
return False
# Find data that needs checking to see if custom variables are needed.
def sql_find_items(self):
table, key, action, site, when, visit, scode, dcode = self.CONFIG.get_store_look_config()
select = 'SELECT %s , %s , %s , %s , %s , %s , %s FROM %s'%(key,
action, site, when, visit, scode, dcode, table)
return '%s%s'%(select, self.FIND_WHERE_METHOD())
def setup_where(self, cat='test'):
'''Setup the where clause to use when finding items to update.'''
if cat not in ['test','notdone']:
raise ValueError
if cat == 'test':
self.FIND_WHERE_METHOD = self.where_test
elif cat == 'notdone':
self.FIND_WHERE_METHOD = self.where_notdone
def where_test(self):
return ' LIMIT 0, 5'
def where_notdone(self):
return " WHERE %s IS NULL LIMIT 0, %s"%(
self.CONFIG.FIELD_CUSTOM_VARS_DCODE, self.FIND_BATCH_SIZE)
def find_items_to_populate(self, how='test'):
query = self.sql_find_items()
return self.CONNECTION.fetchall(query)
# Update the store if necessary.
def sql_update(self, key, scode, dcode):
table, fieldkey = self.CONFIG.get_update_store_config()
update = "UPDATE %s SET "%table
scode = "%s = '%s' , "%(self.CONFIG.FIELD_CUSTOM_VARS_SCODE, scode)
dcode = "%s = '%s' "%(self.CONFIG.FIELD_CUSTOM_VARS_DCODE, dcode)
where = "WHERE %s = %s"%(fieldkey, key)
return '%s%s%s%s'%(update, scode, dcode, where)
def update_codes(self, key, scode, dcode):
'''Execute the update of key with scode and dcode.'''
query = self.sql_update(key, scode, dcode)
return self.CONNECTION.update(query)
def run_populate(self):
'''Check the store and update any custom variables needed.'''
views = 0
downloads = 0
others = 0
for item in self.find_items_to_populate():
key = item[0]
action = item[1]
existing_scode = item[5]
existing_dcode = item[6]
# dcode controls if this item is updated.
check = (self.DCODE_IGNORE, self.DCODE_VIEW, self.DCODE_DOWN)
if existing_dcode in check:
continue
# It needs updating, find out what type of update is needed
# and work out the scodes and dcodes to use.
useful = self.get_action(action)
if not useful: # we can ignore it,
others += 1
scode = self.DCODE_IGNORE
dcode = self.DCODE_IGNORE
else: # its either a view or download
new_code = useful[0]
category = useful[1]
if category == 'view':
views += 1
if existing_scode:
scode = existing_scode
else:
scode = new_code
dcode = self.DCODE_VIEW
if category == 'down':
downloads += 1
dcode = self.DCODE_DOWN
# Deal with archived data that starts off with no scode,
if existing_scode:
scode = existing_scode
else:
scode = new_code
self.update_codes(key, scode, dcode)
return views, downloads, others
if __name__ == '__main__':
'''Do nothing unless enabled.'''
testing = False
process = False
if process:
p = Populate()
p.FIND_BATCH_SIZE = 10000000 # override the default
p.run_populate()
if testing:
logging.basicConfig(level=logging.INFO)
p = Populate()
count = p.count_existing()
logging.critical(count)
logging.warn('The above should be empty for a new populate.')
logging.warn('If not you need to CHECK why!!')
result = p.action_lookup('50') # test the lookup works
if result:
if len(result) == 3:
logging.info(result)
else:
logging.warn('Lookup failed.')
print 'Expect to see uuid:15b86a5d-21f4-44a3-95bb-b8543d326658'
print p.get_action('33162') #type 4
print p.get_action('33257') #view
print p.get_action('33258') #down
p.setup_where('test')
views, downloads, ignores = p.run_populate()
prin
|
skuda/client-python
|
kubernetes/client/models/v1_label_selector.py
|
Python
|
apache-2.0
| 4,559
| 0.001535
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LabelSelector(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, match_expressions=None, match_labels=None):
"""
V1LabelSelector - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'match_expressions': 'list[V1LabelSelectorRequirement]',
'match_labels': 'dict(str, str)'
}
self.attribute_map = {
'match_expressions': 'matchExpressions',
'match_labels': 'matchLabels'
}
self._match_expressions = match_expressions
self._match_labels = match_labels
@property
def match_expressions(self):
"""
Gets the match_expressions of this V1LabelSelector.
matchExpressions is a list of label selector requirements. The requirements are ANDed.
:return: The match_expressions of this V1LabelSelector.
:rtype: list[V1LabelSelectorRequirement]
"""
return self._match_expressions
@match_expressions.setter
def match_expressions(self, match_expressions):
"""
Sets the match_expressions of this V1LabelSelector.
matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param match_expressions: The match_expressions of this V1LabelSelector.
:type: list[V1LabelSelectorRequirement]
"""
self._match_expressions = match_expressions
@property
def match_labels(self):
"""
Gets the match_labels of this V1LabelSelector.
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.
:return: The match_labels of this V1LabelSelector.
:rtype: dict(str, str)
"""
return self._match_labels
@match_labels.setter
def match_labels(self, match_labels):
"""
Sets the match_labels of this V1LabelSelector.
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.
:param match_labels: The match_labels of this V1LabelSelector.
:type: dict(str, str)
"""
self._match_labels = match_labels
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
|
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equ
|
al
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
pmitros/DoneXBlock
|
tests/conftest.py
|
Python
|
agpl-3.0
| 842
| 0
|
import pytest
from mock import Mock
from workbench.runtime import WorkbenchRuntime
from xblock.fields import ScopeIds
from xblock.runtime import DictKeyValueStore, KvsFieldData
from done.done import DoneXBlock
def generate_scope_ids(runtime, block_type):
""" helper to generate scope IDs for an XBlock """
def
|
_id = runtime.id_generator.create_definition(block_type)
usage_id = runtime.id_generator.create_usage(def_id)
return ScopeIds('user', block_type, def_id, usage_id)
@pytest.fixture
def done_xblock():
"""Done XBlock pytest fixture."""
runtime = WorkbenchRuntime()
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
ids = generate_scope_ids(runtim
|
e, 'done')
done_xblock = DoneXBlock(runtime, db_model, scope_ids=ids)
done_xblock.usage_id = Mock()
return done_xblock
|
Zertifica/evosnap
|
evosnap/merchant_applications/pos_device.py
|
Python
|
mit
| 806
| 0.007444
|
from evosnap import constants
cl
|
ass POSDevice:
def __init__(self,**kwargs):
self.__order = [
'posDeviceType', 'posDeviceConnection', 'posDeviceColour', 'posDeviceQuantity',
]
self.__lower_camelcase = constants.ALL_FIELDS
|
self.pos_device_type = kwargs.get('pos_device_type')
self.pos_device_connection = kwargs.get('pos_device_connection')
self.pos_device_colour = kwargs.get('pos_device_colour')
self.pos_device_quantity = kwargs.get('pos_device_quantity')
@property
def hash_str(self):
required = [
'pos_device_type', 'pos_device_connection', 'pos_device_colour', 'pos_device_quantity',
]
return ''.join([str(getattr(self,f)).strip() for f in required if getattr(self,f) is not None])
|
OpenHumans/open-humans
|
public_data/migrations/0003_auto_20190508_2341.py
|
Python
|
mit
| 834
| 0
|
# Generated by Django 2.2.1 on 2019-05-08 23:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("private_sharing", "0022_auto_20190507_1843"),
("public_data", "0002_auto_20171213_1947"),
]
operations = [
migrations.AddField(
model_name="publicdataaccess",
name="project_membership",
field=models.OneToOneField(
|
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="private_sharing.DataRequestProjectMember",
),
),
migrations.AlterField(
model_name="publicdataaccess",
name="data_source",
field=models.CharField(max_length=100, null=True),
|
),
]
|
kohr-h/tomok
|
ctf.py
|
Python
|
gpl-3.0
| 3,242
| 0.000925
|
# -*- coding: utf-8 -*-
"""
ctf.py -- contrast transfer function in electron tomography
Copyright 2014 Holger Kohr
This file is part of tomok.
tomok is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
tomok is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with tomok. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import numpy as np
class ContrTransFunc(object):
"""Callable Contrast Transfer Function class.
TODO: finish this properly."""
def __init__(self, emcfg):
self.osc_polycoeff = emcfg.osc_polycoeff
self.env_polycoeff = emcfg.env_polycoeff
self.cutoff2 = (emcfg.wavenum * emcfg.aperture / (emcfg.focal_len *
emcfg.magnif))**2
def __call__(self, freq2, envelope=True):
ctfval = np.exp(np.polyval(1j * self.osc_polycoeff, freq2))
if envelope:
ctfval *= np.exp(-np
|
.polyval(self.env_polycoeff, freq2))
return np.where(freq2 < self.cutoff2, ctfval, 0.0)
# TODO: display method
class ContrTransFuncA
|
CR(object):
"""Callable class for the constant acr CTF.
TODO: finish this."""
def __init__(self, emcfg, acr=0.1):
ocoeff = emcfg.osc_polycoeff
ocoeff[3] = np.arctan(acr)
self.osc_polycoeff = ocoeff
self.env_polycoeff = emcfg.env_polycoeff
self.cutoff2 = (emcfg.wavenum * emcfg.aperture / (emcfg.focal_len *
emcfg.magnif))**2
def __call__(self, freq2, envelope=True):
ctfval = np.sin(np.polyval(self.osc_polycoeff, freq2))
if envelope:
ctfval *= np.exp(-np.polyval(self.env_polycoeff, freq2))
return np.where(freq2 < self.cutoff2, ctfval, 0.0)
def zeros(self, num=0, maxfreq2=None):
"""The zeros as an array.
TODO: finish"""
# The sine zeros are those of the polynomials a*x^2 + b*x + c_i,
# where a and b are the quadratic / linear coefficients of
# the sine argument and c_i = constant coeff. - (i+1)*pi
zeros = []
p_a = self.osc_polycoeff[1]
p_b = self.osc_polycoeff[2]
maxzeros = 1000
nmax = num if num else maxzeros
for i in range(nmax):
p_c = self.osc_polycoeff[3] - (i + 1) * np.pi
zero = np.sqrt(p_b**2 - 4. * p_a * p_c) / (2 * p_a)
if maxfreq2 is not None and zero > maxfreq2:
break
zeros.append(zero)
return np.asarray(zeros)
# TODO: display method
|
xlqian/navitia
|
source/tyr/tests/integration/autocomplete_test.py
|
Python
|
agpl-3.0
| 9,528
| 0.001364
|
# coding: utf-8
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division, unicode_literals
from tests.check_utils import api_get, api_post, api_delete, api_put, _dt
import json
import pytest
import jmespath
from navitiacommon import models
from tyr import app
@pytest.fixture
def create_autocomplete_parameter():
with app.app_context():
autocomplete_param = models.AutocompleteParameter('idf', 'OSM', 'BANO', 'FUSIO', 'OSM', [8, 9])
models.db.session.add(autocomplete_param)
models.db.session.commit()
# we also create 3 datasets, one for bano, 2 for osm
for i, dset_type in enumerate(['bano', 'osm', 'osm']):
job = models.Job()
dataset = models.DataSet()
dataset.type = dset_type
dataset.family_type = 'autocomplete_{}'.format(dataset.type)
dataset.name = '/path/to/dataset_{}'.format(i)
models.db.session.add(dataset)
job.autocomplete_params_id = autocomplete_param.id
job.data_sets.append(dataset)
job.state = 'done'
models.db.session.add(job)
models.db.session.commit()
@pytest.fixture
def create_two_autocomplete_parameters():
with app.app_context():
autocomplete_param1 = models.AutocompleteParameter('europe', 'OSM', 'BANO', 'OSM', 'OSM', [8, 9])
autocomplete_param2 = models.AutocompleteParameter('france', 'OSM', 'OSM', 'FUSIO', 'OSM', [8, 9])
models.db.session.add(autocomplete_param1)
models.db.session.add(autocomplete_param2)
models.db.session.commit()
@pytest.fixture
def autocomplete_parameter_json():
return {
"name": "peru",
"street": "OSM",
"address": "BANO",
"poi": "FUSIO",
"admin": "OSM",
"admin_level": [8],
}
def test_get_autocomplete_parameters_empty():
resp = api_get('/v0/autocomplete_parameters/')
assert resp == []
def test_get_all_autocomplete(create_autocomplete_parameter):
resp = api_get('/v0/autocomplete_parameters/')
assert len(resp) == 1
assert resp[0]['name'] == 'idf'
assert resp[0]['street'] == 'OSM'
assert resp[0]['address'] == 'BANO'
assert resp[0]['poi'] == 'FUSIO'
assert resp[0]['admin'] == 'OSM'
assert resp[0]['admin_level'] == [8, 9]
def test_get_autocomplete_by_name(create_two_autocomplete_parameters):
resp = api_get('/v0/autocomplete_parameters/')
assert len(resp) == 2
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
assert resp['street'] == 'OSM'
assert resp['address'] == 'OSM'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8, 9]
def test_post_autocomplete(autocomplete_parameter_json):
resp = api_post(
'/v0/autocomplete_parameters',
data=json.dumps(autocomplete_parameter_json),
content_type='application/json',
)
assert resp['name'] == 'peru'
assert resp['street'] == 'OSM'
assert resp['address'] == 'BANO'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8]
def test_post_autocomplete_cosmo():
resp = api_post(
'/v0/autocomplete_parameters',
data=json.dumps({"name": "bobette", "admin": "COSMOGONY"}),
content_type='application/json',
)
assert resp['name'] == 'bobette'
assert resp['street'] == 'OSM'
assert resp['address'] == 'BANO'
assert resp['poi'] == 'OSM'
assert resp['admin'] == 'COSMOGONY'
assert resp['admin_level'] == []
def test_put_autocomplete(create_two_autocomplete_parameters, autocomplete_parameter_json):
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
assert resp['street'] == 'OSM'
assert resp['address'] == 'OSM'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8, 9]
resp = api_put(
'/v0/autocomplete_parameters/france',
data=json.dumps(autocomplete_parameter_json),
content_type='application/json',
)
assert resp['street'] == 'OSM'
assert resp['address'] == 'BANO'
assert resp['poi'] == 'FUSIO'
assert resp['admin'] == 'OSM'
assert resp['admin_level'] == [8]
def test_delete_autocomplete(create_two_autocomplete_parameters):
resp = api_get('/v0/autocomplete_parameters/')
assert len(resp) == 2
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
_, status = api_delete('/v0/autocomplete_parameters/france', check=False, no_json=True)
assert status == 204
_, status = api_get('/v0/autocomplete_parameters/france', check=False)
assert status == 404
resp = api_get('/v0/autocomplete
|
_parameters/')
|
assert len(resp) == 1
def test_get_last_datasets_autocomplete(create_autocomplete_parameter):
"""
we query the loaded datasets of idf
we loaded 3 datasets, but by default we should get one by family_type, so one for bano, one for osm
"""
resp = api_get('/v0/autocomplete_parameters/idf/last_datasets')
assert len(resp) == 2
bano = next((d for d in resp if d['type'] == 'bano'), None)
assert bano
assert bano['family_type'] == 'autocomplete_bano'
assert bano['name'] == '/path/to/dataset_0'
osm = next((d for d in resp if d['type'] == 'osm'), None)
assert osm
assert osm['family_type'] == 'autocomplete_osm'
assert osm['name'] == '/path/to/dataset_2' # we should have the last one
# if we ask for the 2 last datasets per type, we got all of them
resp = api_get('/v0/autocomplete_parameters/idf/last_datasets?count=2')
assert len(resp) == 3
@pytest.fixture
def minimal_poi_types_json():
return {
"poi_types": [
{"id": "amenity:bicycle_rental", "name": "Station VLS"},
{"id": "amenity:parking", "name": "Parking"},
],
"rules": [
{
"osm_tags_filters": [{"key": "amenity", "value": "bicycle_rental"}],
"poi_type_id": "amenity:bicycle_rental",
},
{"osm_tags_filters": [{"key": "amenity", "value": "parking"}], "poi_type_id": "amenity:parking"},
],
}
def test_autocomplete_poi_types(create_two_autocomplete_parameters, minimal_poi_types_json):
resp = api_get('/v0/autocomplete_parameters/france')
assert resp['name'] == 'france'
# POST a minimal conf
resp = api_post(
'/v0/autocomplete_parameters/france/poi_types',
data=json.dumps(minimal_poi_types_json),
content_type='application/json',
)
def test_minimal_conf(resp):
assert len(resp['poi_types']) == 2
assert len(resp['rules']) == 2
bss_type = jmespath.search("poi_types[?id=='amenity:bicycle_rental']", resp)
assert len(bss_type) == 1
assert bss_type[0]['name'] == 'Station VLS'
bss_rule = jmespath
|
ContinuumIO/dask
|
dask/array/tests/test_atop.py
|
Python
|
bsd-3-clause
| 17,215
| 0.00151
|
import collections
import warnings
from operator import add
import pytest
import numpy as np
import dask
import dask.array as da
from dask.highlevelgraph import HighLevelGraph
from dask.blockwise import Blockwise, rewrite_blockwise, optimize_blockwise, index_subs
from dask.array.utils import assert_eq
from dask.array.numpy_compat import _numpy_116
from dask.utils_test import inc, dec
a, b, c, d, e, f, g = "abcdefg"
_0, _1, _2, _3, _4, _5, _6, _7, _8, _9 = ["_%d" % i for i in range(10)]
i, j, k = "ijk"
@pytest.mark.parametrize(
"inputs,expected",
[
# output name, output index, task, input indices
[[(b, "i", {b: (inc, _0)}, [(a, "i")])], (b, "i", {b: (inc, _0)}, [(a, "i")])],
[
[
(b, "i", {b: (inc, _0)}, [(a, "i")]),
(c, "i", {c: (dec, _0)}, [(a, "i")]),
(d, "i", {d: (add, _0, _1, _2)}, [(a, "i"), (b, "i"), (c, "i")]),
],
(d, "i", {b: (inc, _0), c: (dec, _0), d: (add, _0, b, c)}, [(a, "i")]),
],
[
[
(b, "i", {b: (inc, _0)}, [(a, "i")]),
(c, "j", {c: (inc, _0)}, [(b, "j")]),
],
(c, "j", {b: (inc, _0), c: (inc, b)}, [(a, "j")]),
],
[
[
(b, "i", {b: (sum, _0)}, [(a, "ij")]),
(c, "k", {c: (inc, _0)}, [(b, "k")]),
],
(c, "k", {b: (sum, _0), c: (inc, b)}, [(a, "kA")]),
],
[
[
(c, "i", {c: (inc, _0)}, [(a, "i")]),
(d, "i", {d: (inc, _0)}, [(b, "i")]),
(g, "ij", {g: (add, _0, _1)}, [(c, "i"), (d, "j")]),
],
(
g,
"ij",
{g: (add, c, d), c: (inc, _0), d: (inc, _1)},
[(a, "i"), (b, "j")],
),
],
[
[
(b, "ji", {b: (np.transpose, _0)}, [(a, "ij")]),
(c, "ij", {c: (add, _0, _1)}, [(a, "ij"), (b, "ij")]),
],
(c, "ij", {c: (add, _0, b), b: (np.transpose, _1)}, [(a, "ij"), (a, "ji")]),
],
[
[
(c, "i", {c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
(d, "i", {d: (inc, _0)}, [(c, "i")]),
],
(d, "i", {d: (inc, c), c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
],
[
[
(b, "ij", {b: (np.transpose, _0)}, [(a, "ji")]),
(d, "ij", {d: (np.dot, _0, _1)}, [(b, "ik"), (c, "kj")]),
],
(
d,
"ij",
{d: (np.dot, b, _0), b: (np.transpose, _1)},
[(c, "kj"), (a, "ki")],
),
],
[
[
(c, "i", {c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
(f, "i", {f: (add, _0, _1)}, [(d, "i"), (e, "i")]),
(g, "i", {g: (add, _0, _1)}, [(c, "i"), (f, "i")]),
],
(
g,
"i",
{g: (add, c, f), f: (add, _2, _3), c: (add, _0, _1)},
[(a, i), (b, i), (d, i), (e, i)],
),
],
[
[
(c, "i", {c: (add, _0, _1)}, [(a, "i"), (b, "i")]),
(f, "i", {f: (add, _0, _1)}, [(a, "i"), (e, "i")]),
(g, "i", {g: (add, _0, _1)}, [(c, "i"), (f, "i")]),
],
(
g,
"i",
{g: (add, c, f), f: (add, _0, _2), c: (add, _0, _1)},
[(a, "i"), (b, "i"), (e, "i")],
),
],
[
[
(b, "i", {b: (sum, _0)}, [(a, "ij")]),
(c, "i", {c: (inc, _0)}, [(b, "i")]),
],
(c, "i", {c: (inc, b), b: (sum, _0)}, [(a, "iA")]),
],
[
[
(c, "i", {c: (inc, _0)}, [(b, "i")]),
(d, "i", {d: (add, _0, _1, _2)}, [(a, "i"), (b, "i"), (c, "i")]),
],
(d, "i", {d: (add, _0, _1, c), c: (inc, _1)}, [(a, "i"), (b, "i")]),
],
# Include literals
[
[(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)])],
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)]),
],
[
[
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)]),
(c, "j", {c: (add, _0, _1)}, [(b, "j"), (456, None)]),
],
(
c,
"j",
{b: (add, _1, _2), c: (add, b, _0)},
[(456, None), (a, "j"), (123, None)],
),
],
# Literals that compare equal (e.g. 0 and False) aren't deduplicated
[
[
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (0, None)]),
(c, "j", {c: (add, _0, _1)}, [(b, "j"), (False, None)]),
],
(
c,
"j",
{b: (add, _1, _2), c: (add, b, _0)},
[(False, None), (a, "j"), (0, None)],
),
],
# Literals are deduplicated
[
[
(b, "i", {b: (add, _0, _1)}, [(a, "i"), (123, None)]),
(c, "j", {c: (add, _0, _1)}, [(b, "j"), (123, None)]),
],
(c, "j", {b: (add, _1, _0), c: (add, b, _0)}, [(123, None), (a, "j")]),
],
],
)
def test_rewrite(inputs, expected):
inputs = [
Blockwise(
*inp, numblocks={k: (1,) * len(v) for k, v in inp[-1] if v is not None}
)
for inp in inputs
]
result = rewrite_blockwise(inputs)
result2 = (
result.output,
"".join(result.output_indices),
result.dsk,
[
(name, "".join(ind) if ind is not None else ind)
for name, ind in result.indices
],
)
assert result2 == expected
def test_index_subs():
assert index_subs(tuple("ij"), {"i": "j", "j": "i"}) == tuple("ji")
def test_optimize_blockwise():
x = da.ones(10, chunks=(5,))
y = (((x + 1) + 2) + 3) + 4
dsk = da.optimization.optimize_blockwise(y.dask)
assert isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])
== 1
)
def test_blockwise_diamond_fusion():
x = da.ones(10, chunks=(5,))
y = ((x + 1) + 2) + 3
a = y * 2
b = y * 3
c = a + b
d = ((c + 1) + 2) + 3
dsk = da.optimization.optimize_blockwise(d.dask)
assert isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in dsk.di
|
cts.values() if isinstance(layer, Blockwise)])
== 1
)
def test_blockwise_non_blockwise_output():
x = da.ones(10, chunks=(5,))
y = ((x + 1) + 2) + 3
w = y.sum()
z = ((y * 2) * 3) * 4
z_top_before = tuple(z.dask.dicts[z.name].indices)
(zz,) = dask.optimize(z)
z_top_after = tuple(z.dask.dicts[z.name].indices)
assert z_top_before == z_top_after, "z_top mutated"
dsk = optimize_blockwise(z.dask, keys=list(dask.core.flatten(z.__dask_keys__())))
assert
|
isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in dsk.dicts.values() if isinstance(layer, Blockwise)])
== 1
)
dsk = optimize_blockwise(
HighLevelGraph.merge(w.dask, z.dask),
keys=list(dask.core.flatten([w.__dask_keys__(), z.__dask_keys__()])),
)
assert isinstance(dsk, HighLevelGraph)
assert (
len([layer for layer in z.dask.dicts.values() if isinstance(layer, Blockwise)])
>= 1
)
def test_top_len():
x = da.ones(10, chunks=(5,))
y = x[:, None] * x[None, :]
d = y.dask.dicts[y.name]
assert len(d) == 4
def test_inner_compute():
x = da.ones(10, chunks=(5,)) + 1 + 2 + 3
a = x.sum()
y = x * 2 * 3 * 4
b = y.sum()
z = x * 2 * 3
dask.compute(x, a, y, b, z)
@pytest.mark.parametrize("name", ["_", "_0", "_1", ".", ".0"])
def test_common_token_names_args(name):
x = np.array(["a", "bb", "ccc"], dtype=object)
d = da.from_array(x, chunks=2)
result = da.
|
chrsrds/scikit-learn
|
sklearn/feature_selection/univariate_selection.py
|
Python
|
bsd-3-clause
| 28,149
| 0.000355
|
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring
|
functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in
|
the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : array_like, sparse matrices
sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature selection procedure, not
a free standing feature selection procedure.
This is done in 2 steps:
1. The correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of
|
antoinecarme/pyaf
|
tests/artificial/transf_Difference/trend_Lag1Trend/cycle_7/ar_12/test_artificial_128_Difference_Lag1Trend_7_12_20.py
|
Python
|
bsd-3-clause
| 266
| 0.086466
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cy
|
cle_length = 7, transform = "Difference", si
|
gma = 0.0, exog_count = 20, ar_order = 12);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.