repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
revolutionaryG/androguard | androgui.py | Python | apache-2.0 | 1,817 | 0.001101 | #!/usr/bin/env python2
'''Androguard Gui'''
import argparse
import sys
from androguard.core import androconf
from androguard.session import Session
from androguard.gui.mainwindow import MainWindow
from androguard.misc import init_print_colors
from PySide import QtCore, QtGui
from threading import Thread
class IpythonConsole(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
from IPython.terminal.embed import InteractiveShellEmbed
from traitlets.config import Config
cfg = Config()
ipshell = InteractiveShellEmbed(
config=cfg,
banner1="Androguard version %s" % androconf.ANDROGUARD_VERSION)
init_print_colors()
ipshell()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Androguard GUI")
parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("-i", "--input_file", default=None)
parser.add_argument | ("-c", "--console", action="store_true", default=False)
args = parser.parse_args()
if args.debug:
androconf.set_debug()
# We need that to save huge sessions when leaving and avoid
# RuntimeError: maximum recursion depth exceeded while pickling an object
# or
# RuntimeError: maximum recursion depth exceeded in cmp
# http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
sys.setrecursionlimit(50000)
session = | Session(export_ipython=args.console)
console = None
if args.console:
console = IpythonConsole()
console.start()
app = QtGui.QApplication(sys.argv)
window = MainWindow(session=session, input_file=args.input_file)
window.resize(1024, 768)
window.show()
sys.exit(app.exec_())
|
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/biocomplexity/examples/estimation_lccm_specification_ub91to95.py | Python | gpl-2.0 | 10,640 | 0.083177 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
specification = {}
specification = {
2:
{
"equation_ids":(1,2),
"constant":(0, "act_2_2"),
"blmz":("blmz_2_1", 0),
"cd1":("cd1_2_1", 0),
"dag":("dag_2_1", 0),
"dprd":("dprd_2_1", 0),
"hai":("hai_2_1", 0),
# "pcd":("pcd_2_1", 0), - Jeff removed all PCD on 21Feb 2006
"phu":("phu_2_1", 0),
"pmu":("pmu_2_1", 0),
"tiv":("tiv_2_1", 0),
},
3:
{
"equation_ids":(1,2,3),
"constant":(0,0,"act_3_3"),
"blmz":("blmz_3_1",0, 0),
"c750":("c750_3_1","c750_3_2", 0),
"dloc":("dloc_3_1","dloc_3_2", 0),
"dnlr":(0,"dnlr_3_2", 0),
"dprd":("dprd_3_1",0, 0),
"dpub":(0,"dpub_3_2", 0),
"dres":("dres_3_1","dres_3_2", 0),
"dtim":("dtim_3_1","dtim_3_2", 0),
"gmps":("gmps_3_1",0, 0),
"h450":(0,"h450_3_2", 0),
"mmps":("mmps_3_1",0, 0),
# "pcd":("pcd_3_1","pcd_3_2", 0),
"pfld":("pfld_3_1",0, 0),
"phu":("phu_3_1","phu_3_2", 0),
"pmf":("pmf_3_1","pmf_3_2", 0),
"pmu":("pmu_3_1","pmu_3_2", 0),
"pslp":("pslp_3_1","pslp_3_2", 0),
"pwa":("pwa_3_1",0, 0),
"shei":("shei_3_1",0, 0),
"tiv":("tiv_3_1",0, 0),
"ugl":("ugl_3_1",0, 0),
},
#4:
# {
# "equation_ids":(1,2,3), # note: this is the to_id's
# "constant":(0, "act_4_2", "act_4_3"), #there is no constant term in the equation for to_id 1
# "aai":(0, "aai_4_2","aai_4_3"),
# "amps":(0, "amps_4_2","amps_4_3"),
# "blmz":(0, "blmz_4_2","blmz_4_3"),
# "c450":(0, "c450_4_2","c450_4_3"),
# "c750":(0, "c750_4_2","c750_4_3"),
# "cd1":(0, "cd1_4_2","cd1_4_3"),
# "crit":(0, "crit_4_2","crit_4_3"),
# "dag":(0, "dag_4_2","dag_4_3"),
# "dc":(0, "dc_4_2","dc_4_3"),
# "dcbd":(0, "dcbd_4_2","dcbd_4_3"),
# "dcri":(0, "dcri_4_2","dcri_4_3"),
# "ddt1":(0, "ddt1_4_2","ddt1_4_3"),
# "de":(0, "de_4_2","de_4_3"),
# "dfre":(0, "dfre_4_2","dfre_4_3"),
# "di":(0, "di_4_2","di_4_3"),
# "dloc":(0, "dloc_4_2","dloc_4_3"),
# "dmu":(0, "dmu_4_2","dmu_4_3"),
# "dnlr":(0, "dnlr_4_2","dnlr_4_3"),
# "dos":(0, "dos_4_2","dos_4_3"),
# "dprd":(0, "dprd_4_2","dprd_4_3"),
# "dpub":(0, "dpub_4_2","dpub_4_3"),
# "dres":(0, "dres_4_2","dres_4_3"),
# "dt1":(0, "dt1_4_2","dt1_4_3"),
# "dtim":(0, "dtim_4_2","dtim_4_3"),
# "dwat":(0, "dwat_4_2","dwat_4_3"),
# "dwet":(0, "dwet_4_2","dwet_4_3"),
# "fai":(0, "fai_4_2","fai_4_3"),
# "fmps":(0, "fmps_4_2","fmps_4_3"),
# "gai":(0, "gai_4_2","gai_4_3"),
# "gmps":(0, "gmps_4_2","gmps_4_3"),
# "h450":(0, "h450_4_2","h450_4_3"),
# "h750":(0, "h750_4_2","h750_4_3"),
# "hai":(0, "hai_4_2","hai_4_3"),
# "hd1":(0, "hd1_4_2","hd1_4_3"),
# "hmps":(0, "hmps_4_2","hmps_4_3"),
# "mai":(0, "mai_4_2","mai_4_3"),
# "mmps":(0, "mmps_4_2","mmps_4_3"),
# "pag":(0, "pag_4_2","pag_4_3"),
# "pcc":(0, "pcc_4_2","pcc_4_3"),
# "pcd":(0, "pcd_4_2","pcd_4_3"),
# "pcf":(0, "pcf_4_2","pcf_4_3"),
# "pcri":(0, "pcri_4_2","pcri_4_3"),
# "pes":(0, "pes_4_2","pes_4_3"),
# "pfld":(0, "pfld_4_2","pfld_4_3"),
# "pgr":(0, "pgr_4_2","pgr_4_3"),
# "phu":(0, "phu_4_2","phu_4_3"),
# "plu":(0, "plu_4_2","plu_4_3"),
# "pmf":(0, "pmf_4_2","pmf_4_3"),
# "pmu":(0, "pmu_4_2","pmu_4_3"),
# "psg":(0, "psg_4_2","psg_4_3"),
# "pslp":(0, "pslp_4_2","pslp_4_3"),
# "pstr":(0, "pstr_4_2","pstr_4_3"),
# "pub":(0, "pub_4_2","pub_4_3"),
# "pwa":(0, "pwa_4_2","pwa_4_3"),
# "pwet":(0, "pwet_4_2","pwet_4_3"),
# "shei":(0, "shei_4_2","shei_4_3"),
# "sslp":(0, "sslp_4_2","sslp_4_3"),
# "tbl":(0, "tbl_4_2","tbl_4_3"),
# "tiv":(0, "tiv_4_2","tiv_4_3"),
# "ugl":(0, "ugl_4_2","ugl_4_3"),
# }, |
5:
{
"equation_ids":(1,2,3,5,6,7), # note: this is the to_id's
"constant":("act_5_1","act_5_2","act_5_3","act_5_5",0,0),
"aai":(0,"aai_5_2","aai_5_3", 0,"aai_5_6","aai_5_7"),
"amps":("amps_5_1","amps_5_2",0, 0,0,"amps_5_7"),
# # "blmz":(0,0,0, 0,"blmz_5_6",0),
# # "c750":("c750_5_1",0,0, 0,0,0),
"cd1":("cd1_5_1",0,"cd1_5_3", 0,0,"cd1_5_7"),
"dag":("dag_5_1",0,0, 0,"dag_5_6","dag_5_7 | "),
# # "dc":(0,0,0, 0,0,"dc_5_7"),
# # "dcbd":("dcbd_5_1",0,0, 0,0,0),
"dcri":("dcri_5_1",0,0, 0,0,0),
"de":("de_5_1","de_5_2","de_5_3", 0,0,0),
"dloc":("dloc_5_1","dloc_5_2","dloc_5_3", 0,0,0),
"dnlr":(0,"dnlr_5_2","dnlr_5_3", 0,0,0),
"dos":("dos_5_1",0,0, 0,0,0),
"dprd":("dprd_5_1",0,0, 0,0,"dprd_5_7"),
"dpub":("dpub_5_1","dpub_5_2",0, 0,0,0),
"dres":(0,0,"dres_5_3", 0,0,0),
"dtim":("dtim_5_1","dtim_5_2","dtim_5_3", 0,0,0),
"dwat":("dwat_5_1",0,0, 0,0,"dwat_5_7"),
"dwet":(0,0,"dwet_5_3", 0,0,0),
"fmps":("fmps_5_1","fmps_5_2",0, 0,0,"fmps_5_7"),
"h450":(0,0,0, 0,0,"h450_5_7"),
"h750":("h750_5_1","h750_5_2",0, 0,0,"h750_5_7"),
"hai":(0,0,0, 0,"hai_5_6",0),
# "pcd":("pcd_5_1","pcd_5_2",0, 0,0,0),
"pcf":(0,0,0, 0,0,"pcf_5_7"),
"pcri":("pcri_5_1",0,0, 0,0,0),
"pes":("pes_5_1",0,0, 0,"pes_5_6","pes_5_7"),
"phu":("phu_5_1",0,"phu_5_3", 0,"phu_5_6","phu_5_7"),
"plu":(0,0,"plu_5_3", 0,"plu_5_6",0),
"pmu":("pmu_5_1","pmu_5_2",0, 0,"pmu_5_6","pmu_5_7"),
"pstr":(0,"pstr_5_2","pstr_5_3", 0,"pstr_5_6",0),
"pub":("pub_5_1",0,0, 0,0,0),
"pwa":(0,0,0, 0,"pwa_5_6",0),
"pwet":(0,0,0, 0,0,"pwet_5_7"),
"shei":("shei_5_1","shei_5_2",0, 0,"shei_5_6",0),
"sslp":("sslp_5_1","sslp_5_2","sslp_5_3", 0,"sslp_5_6",0),
"tiv":("tiv_5_1",0,0, 0,0,0),
"ugl":("ugl_5_1",0,0, 0,0,0),
},
6:
{
"equation_ids":(1,2,3,5,6), # note: this is the to_id's
"constant":("act_6_1","act_6_2","act_6_3",0,"act_6_6"),
"aai":(0,"aai_6_2","aai_6_3",0,0),
"blmz":("blmz_6_1",0,0,0,0),
"c750":("c750_6_1",0,0,0,0),
"dcri":("dcri_6_1","dcri_6_2","dcri_6_3",0, 0),
"di":("di_6_1",0,0,0,0),
"dloc":("dloc_6_1","dloc_6_2","dloc_6_3",0, 0),
"dnlr":(0,"dnlr_6_2","dnlr_6_3",0, 0),
"dos":("dos_6_1",0,0,0,0),
"dprd":("dprd_6_1",0,0,0, 0),
"dres":("dres_6_1","dres_6_2",0,0, 0),
"dtim":("dtim_6_1","dtim_6_2","dtim_6_3",0, 0),
"fmps":("fmps_6_1",0,0,0, 0),
"gai":(0,0,"gai_6_3","gai_6_5", 0),
"h750":("h750_6_1","h750_6_2",0,0, 0),
"hmps":("hmps_6_1",0,0,0,0),
# "pcd":("pcd_6_1","pcd_6_2","pcd_6_3","pcd_6_5", 0),
"pcf":("pcf_6_1","pcf_6_2",0,0,0),
"pes":("pes_6_1",0,"pes_6_3",0, 0),
"pgr":("pgr_6_1","pgr_6_2","pgr_6_3","pgr_6_5", 0),
"phu":("phu_6_1","phu_6_2",0,0, 0),
"plu":(0,0,"plu_6_3","plu_6_5", 0),
"pmu":("pmu_6_1","pmu_6_2","pmu_6_3",0, 0),
"pslp":("pslp_6_1","pslp_6_2","pslp_6_3","pslp_6_5", 0),
"pstr":("pstr_6_1","pstr_6_2",0,0, 0),
"pub":("pub_6_1",0,0,0, 0),
"pwa":(0,0,"pwa_6_3",0, 0),
"pwet":("pwet_6_1","pwet_6_2","pwet_6_3",0, 0),
"shei":("shei_6_1",0,"shei_6_3","shei_6_5", 0),
"tiv":("tiv_6_1",0,0,0, 0),
"ugl":("ugl_6_1","ugl_6_2","ugl_6_3",0, 0),
},
7:
{
"equation_ids":(1,2,3,5,7), # note: this is the to_id's
"constant":("act_7_1","act_7_2","act_7_3",0,"act_7_7"),
"aai":(0,0,"aai_7_3",0, 0),
"blmz":(0,"blmz_7_2","blmz_7_3","blmz_7_5", 0),
"crit":(0,"crit_7_2",0,0, 0),
"dc":("dc_7_1",0,0,0,0),
"dcri":("dcri_7_1","dcri_7_2","dcri_7_3",0, 0),
"ddt1":(0,0,"ddt1_7_3","ddt1_7_5", 0),
"dloc":("dloc_7_1","dloc_7_2","dloc_7_3",0, 0),
"dos":("dos_7_1",0,"dos_7_3",0, 0),
"dprd":("dprd_7_1","dprd_7_2",0,0, 0),
"dpub":(0,"dpub_7_2",0,"dpub_7_5", 0),
"dres":("dres_7_1","dres_7_2",0,0, 0),
"dwat":("dwat_7_1","dwat_7_2",0,0, 0),
"fmps":("fmps_7_1","fmps_7_2","fmps_7_3",0, 0),
"gai":(0,0,0,"gai_7_5", 0),
"h750":("h750_7_1","h750_7_2",0,0, 0),
# "pcd":("pcd_7_1","pcd_7_2","pcd_7_3","pcd_7_5", 0),
"pcf":(0,0,0,"pcf_7_5", 0),
"pes":(0,0,0,"pes_7_5", 0),
"pgr":("pgr_7_1","pgr_7_2",0,"pgr_7_5", 0),
"phu":("phu_7_1","phu_7_2",0,0, 0),
"plu":("plu_7_ |
catharsis/bugger | bugger/exceptions.py | Python | bsd-2-clause | 191 | 0.026178 | class NoPaging(Exception): pass
class BuggerLoginError(Exception): pass
class BugNo | tFound(Exception): pass
class BugRenderError(Exception): pass
class BackendConne | ctionError(Exception): pass
|
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py | Python | apache-2.0 | 18,696 | 0.007863 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
# Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import optparse
import os
import signal
import sys
import traceback
from webkitpy.common.host import Host
from webkitpy.layout_tests.controllers.manager import Manager
from webkitpy.layout_tests.port import configuration_options, platform_options
from webkitpy.layout_tests.views import buildbot_results
from webkitpy.layout_tests.views import printing
_log = logging.getLogger(__name__)
# This mirrors what the shell normally does.
INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
# This is a randomly chosen exit code that can be tested against to
# indicate that an unexpected exception occurred.
EXCEPTIONAL_EXIT_STATUS = 254
def main(argv, stdout, stderr):
options, args = parse_args(argv)
if options.platform and 'test' in options.platform:
# It's a bit lame to import mocks into real code, but this allows the user
# to run tests against the test platform interactively, which is useful for
# debugging test failures.
from webkitpy.common.host_mock import MockHost
host = MockHost()
else:
host = Host()
if options.lint_test_files:
from webkitpy.layout_tests.lint_test_expectations import lint
return lint(host, options, stderr)
try:
port = host.port_factory.get(options.platform, options)
except NotImplementedError, e:
# FIXME: is this the best way to handle unsupported port names?
print >> stderr, str(e)
return EXCEPTIONAL_EXIT_STATUS
try:
run_details = run(port, options, args, stderr)
if run_details.exit_code != -1:
bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
bot_printer.print_results(run_details)
return run_details.exit_code
except KeyboardInterrupt:
return INTERRUPTED_EXIT_STATUS
except BaseException as e:
if isinstance(e, Exception):
print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
traceback.print_exc(file=stderr)
return EXCEPTIONAL_EXIT_STATUS
def parse_args(args):
option_group_definitions = []
option_group_definitions.append(("Platform options", platform_options()))
option_group_definitions.append(("Configuration options", configuration_options()))
option_group_definitions.append(("Printing Options", printing.print_options()))
# FIXME: These options should move onto the ChromiumPort.
option_group_definitions.append(("Chromium-specific Options", [
optparse.make_option("--nocheck-sys-deps", action="store_true",
default=False,
help="Don't check the system dependencies (themes)"),
optparse.make_option("--adb-device",
action="append", default=[],
help="Run Android layout tests on these devices."),
]))
option_group_definitions.append(("Results Options", [
optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
optparse.make_option("--results-directory", help="Location of test results"),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
optparse.make_option("--new-baseline", action="store_true",
default=False, help="Save generated results as new baselines "
"into the *most-specific-platform* directory, overwriting whatever's "
"already there. Equivalent to --reset-results --add-platform- | exceptions"),
optparse.make_option("--reset-results" | , action="store_true",
default=False, help="Reset expectations to the "
"generated results in their existing location."),
optparse.make_option("--no-new-test-results", action="store_false",
dest="new_test_results", default=True,
help="Don't create new baselines when no expected results exist"),
#FIXME: we should support a comma separated list with --pixel-test-directory as well.
optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
help="A directory where it is allowed to execute tests as pixel tests. "
"Specify multiple times to add multiple directories. "
"This option implies --pixel-tests. If specified, only those tests "
"will be executed as pixel tests that are located in one of the "
"directories enumerated with the option. Some ports may ignore this "
"option while others can have a default value that can be overridden here."),
optparse.make_option("--skip-failing-tests", action="store_true",
default=False, help="Skip tests that are expected to fail. "
"Note: When using this option, you might miss new crashes "
"in these tests."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to the driver "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative driver binary to use"),
optparse.make_option("--additional-platform-directory", action="append",
default=[], help="Additional directory where to look for test "
"baselines (will take precendence over platform baselines). "
"Specify multiple times to add multiple search path entries."),
optparse.make_option("--additional-expectations", action="append", default=[],
help="Path to a test_expectations file that will override previous expectations. "
"Specify multiple times for multiple sets of overrides."),
optparse.make_option("--compare-port", action="store", default=None,
help="Use the specified port's baselines first"),
optparse.make_option("--no-show-results", actio |
e0ne/python-brickagentclient | brickclient/tests/test_service_catalog.py | Python | apache-2.0 | 10,473 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brickclient import exceptions
from brickclient import service_catalog
from brickclient.tests import utils
# Taken directly from keystone/content/common/samples/auth.json
# Do not edit this structure. Instead, grab the latest from there.
SERVICE_CATALOG = {
"access": {
"token": {
"id": "ab48a9efdfedb23ty3494",
"expires": "2010-11-01T03:32:15-05:00",
"tenant": {
"id": "345",
"name": "My Project"
}
},
"user": {
"id": "123",
"name": "jqsmith",
"roles": [
{
"id": "234",
"name": "compute:admin",
},
{
"id": "235",
"name": "object-store:admin",
"tenantId": "1",
}
],
"roles_links": [],
},
"serviceCatalog": [
{
"name": "Cloud Servers",
"type": "compute",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://compute1.host/v1/1234",
"internalURL": "https://compute1.host/v1/1234",
"region": "North",
"versionId": "1.0",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
{
"tenantId": "2",
"publicURL": "https://compute1.host/v1/3456",
"internalURL": "https://compute1.host/v1/3456",
"region": "North",
"versionId": "1.1",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
],
"endpoints_links": [],
},
{
"name": "Cinder Volume Service",
"type": "volume",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v1/1234",
"internalURL": "https://volume1.host/v1/1234",
"region": "South",
"versionId": "1.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v1/3456",
"internalURL": "https://volume1.host/v1/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v1/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
{
"name": "Cinder Volume Service V2",
"type": "volumev2",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v2/1234",
"internalURL": "https://volume1.host/v2/1234",
"region": "South",
"versionId": "2.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v2/3456",
"internalURL": "https://volume1.host/v2/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v2/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
],
"serviceCatalog_links": [
{
"rel": "next",
"href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar",
},
],
},
}
SERVICE_COMPATIBILITY_CATALOG = {
"access": {
"token": {
"id": "ab48a9efdfedb23ty3494",
"expires": "2010-11-01T03:32:15-05:00",
"tenant": {
"id": "345",
"name": "My Project"
}
},
"user": {
"id": "123",
"name": "jqsmith",
"roles": [
{
"id": "234",
"name": "compute:admin",
},
{
| "id": "235",
"name": "object-store:admin",
"tenantId": "1",
}
],
"roles_links": [],
},
"serviceCatalo | g": [
{
"name": "Cloud Servers",
"type": "compute",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://compute1.host/v1/1234",
"internalURL": "https://compute1.host/v1/1234",
"region": "North",
"versionId": "1.0",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
{
"tenantId": "2",
"publicURL": "https://compute1.host/v1/3456",
"internalURL": "https://compute1.host/v1/3456",
"region": "North",
"versionId": "1.1",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
],
"endpoints_links": [],
},
{
"name": "Cinder Volume Service V2",
"type": "volume",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v2/1234",
"internalURL": "https://volume1.host/v2/1234",
"region": "South",
"versionId": "2.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v2/3456",
"internalURL": "https://volume1.host/v2/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v2/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
|
beav/pulp | server/test/unit/server/content/sources/test_container.py | Python | gpl-2.0 | 34,946 | 0.000286 | # Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import inspect
from unittest import TestCase
from Queue import Queue, Full, Empty
from collections import namedtuple
from mock import patch, Mock
from pulp.server.content.sources.container import (
ContentContainer, NectarListener, Item, RequestQueue, Batch, DownloadReport,
Listener, NectarFeed, Tracker)
from pulp.server.content.sources.model import ContentSource
class SideEffect(object):
"""
Supports collection of side effects containing exceptions.
"""
def __init__(self, values):
self.values = iter(values)
def __call__(self, *args, **kwargs):
value = next(self.values)
if isinstance(value, Exception):
raise value
else:
return value
class TestContainer(TestCase):
@patch('pulp.server.content.sources.container.ContentSource.load_all')
def test_construction(self, fake_load):
path = 'path-1'
# test
ContentContainer(path)
# validation
fake_load.assert_called_with(path)
@patch('pulp.server.content.sources.container.Batch')
@patch('pulp.server.content.sources.container.PrimarySource')
@patch('pulp.server.content.sources.container.ContentContainer.refresh')
@patch('pulp.server.content.sources.container.ContentSource.load_all')
def test_download(self, fake_load, fake_refresh, fake_primary, fake_batch):
path = Mock()
downloader = Mock()
requests = Mock()
listener = Mock()
canceled = Mock()
canceled.is_set.return_value = False
_batch = Mock()
_batch.download.return_value = 123
fake_batch.return_value = _batch
# test
container = ContentContainer(path)
report = container.download(canceled, downloader, requests, listener)
# validation
fake_load.assert_called_with(path)
fake_refresh.assert_called_with(canceled)
fake_primary.assert_called_with(downloader)
fake_batch.assert_called_with(canceled, fake_primary(), fake_load(), requests, listener)
fake_batch().download.assert_called_with()
self.assertEqual(report, _batch.download.return_value)
@patch('pulp.server.content.sources.container.ContentSource.load_all')
@patch('pulp.server.content.sources.container.managers.content_catalog_manager')
def test_refresh(self, fake_manager, fake_load):
sources = {}
canceled = Mock()
canceled.is_set.return_value = False
for n in range(3):
s = ContentSource('s-%d' % n, {})
s.refresh = Mock(return_value=[n])
s.get_downloader = Mock()
sources[s.id] = s
fake_manager().has_entries.return_value = False
fake_load.return_value = sources
# test
container = ContentContainer('')
report = container.refresh(canceled)
# validation
for s in sources.values():
s.refresh.assert_called_with(canceled)
self.assertEqual(sorted(report), [0, 1, 2])
@patch('pulp.server.content.sources.container.ContentSource.load_all')
@patch('pulp.server.content.sources.container.managers.content_catalog_manager')
def test_refresh_raised(self, fake_manager, fake_load):
sources = {}
canceled = Mock()
canceled.is_set.return_value = False
for n in range(3):
s = ContentSource('s-%d' % n, {})
s.refresh = Mock(side_effect=ValueError('must be int'))
s.get_downloader = Mock()
sources[s.id] = s
fake_manager().has_entries.return_value = False
fake_load.return_value = sources
# test
container = ContentContainer('')
report = container.refresh(canceled)
# validation
for s in sources.values():
s.refresh.assert_called_with(canceled)
for r in report:
r.errors = ['must b | e int']
@patch('pulp.server.content.sources. | container.ContentSource.load_all')
@patch('pulp.server.content.sources.container.managers.content_catalog_manager')
def test_forced_refresh(self, fake_manager, fake_load):
sources = {}
canceled = Mock()
canceled.is_set.return_value = False
for n in range(3):
s = ContentSource('s-%d' % n, {})
s.refresh = Mock()
sources[s.id] = s
fake_manager().has_entries.return_value = True
fake_load.return_value = sources
# test
container = ContentContainer('')
container.refresh(canceled, force=True)
# validation
for s in sources.values():
s.refresh.assert_called_with(canceled)
@patch('pulp.server.content.sources.container.ContentSource.load_all')
@patch('pulp.server.content.sources.container.managers.content_catalog_manager', Mock())
def test_refresh_canceled(self, fake_load):
sources = {}
for n in range(3):
s = ContentSource('s-%d' % n, {})
s.refresh = Mock()
sources[s.id] = s
fake_load.return_value = sources
# test
canceled = Mock()
canceled.is_set.return_value = True
container = ContentContainer('')
container.refresh(canceled, force=True)
# validation
for s in sources.values():
self.assertFalse(s.refresh.called)
@patch('pulp.server.content.sources.container.ContentSource.load_all')
@patch('pulp.server.content.sources.container.managers.content_catalog_manager')
def test_purge_orphans(self, fake_manager, fake_load):
fake_load.return_value = {'A': 1, 'B': 2, 'C': 3}
# test
container = ContentContainer('')
# validation
container.purge_orphans()
fake_manager().purge_orphans.assert_called_with(fake_load.return_value.keys())
class TestNectarListener(TestCase):
@patch('pulp.server.content.sources.container.log')
def test_forward(self, mock_log):
method = Mock()
report = Mock()
# test
NectarListener._forward(method, report)
# validations
method.assert_called_with(report)
# test (raised)
method.side_effect = ValueError()
NectarListener._forward(method, report)
# validation
mock_log.exception.assert_called_with(str(method))
def test_construction(self):
batch = Mock()
# test
listener = NectarListener(batch)
# validation
self.assertEqual(listener.batch, batch)
def test_download_started(self):
batch = Mock()
batch.is_canceled = False
batch.listener = Mock()
report = Mock()
report.data = Mock()
# test
listener = NectarListener(batch)
listener.download_started(report)
# validation
batch.listener.download_started.assert_called_with(report.data)
def test_download_started_no_listener(self):
batch = Mock()
batch.is_canceled = False
batch.listener = Mock()
batch.listener.__nonzero__ = Mock(return_value=False)
report = Mock()
report.data = Mock()
# test
listener = NectarListener(batch)
listener.download_started(report)
# validation
self.assertFalse(batch.listener.download_started.called)
def test_download_started_canceled(self):
batch = Mock()
batch.is_canceled = True
batch.listener = Mock()
report = Mock()
report.data = Mock()
# test
listener = NectarListener(batch)
listener.download_s |
emmett9001/schema.to | schemato/distillery.py | Python | apache-2.0 | 1,053 | 0 | from .distillers import Distill, Distiller
class NewsDistiller(Distiller):
site = Distill("og:site_name")
title = Distill("s:headline", "og:title")
image_url = Distill("s:associatedMedia.ImageObject/url", "og:image")
pub_date = Distill("s:datePublished")
author = Distill("s:creator.Person/name", "s:author")
section = Di | still("s:articleSection")
description = Distill("s:description", "og:description")
link = Distill("s:url", "og:url")
id = Distill("s:identifier")
class ParselyDistiller(Distiller):
site = Distill("og:site_name")
title = Distill("pp:title", "s:headline", "og:title")
image_url = Distill(
"pp:image_url", "s:associatedMedia.ImageObject/url", "og:image")
pub_date = Distill("pp:pub_date", "s:datePublished")
author = Distill("pp:author", "s:creator.Pe | rson/name", "s:author")
section = Distill("pp:section", "s:articleSection")
link = Distill("pp:link", "og:url", "s:url")
post_id = Distill("pp:post_id", "s:identifier")
page_type = Distill("pp:type")
|
rombie/contrail-controller | src/nodemgr/common/windows_process_mem_cpu.py | Python | apache-2.0 | 1,375 | 0.001455 | #
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
import psutil
import time
from sandesh.nodeinfo.cpuinfo.ttypes import ProcessCpuInfo
class WindowsProcessMemCpu | UsageData(object):
def __init__(self, pid, last_cpu, last_time):
self.last_cpu = last_cpu
self.last_time = last_time
self.pid = pid
def _get_process_cpu_share(self, current_cpu):
last_cpu = self.last_cpu
last_time = self.last_time
current_time = time.time()
interval_time = 0
if last_cpu and (last_time != 0):
usage_time = (current_cpu.system - last_cpu.system) + (current_cpu.user - last_cpu.user)
interval_ | time = current_time - last_time
self.last_cpu = current_cpu
self.last_time = current_time
if interval_time > 0:
usage_percent = 100 * usage_time / interval_time
cpu_share = round(usage_percent / psutil.cpu_count(), 2)
return cpu_share
else:
return 0
def get_process_mem_cpu_info(self):
process_mem_cpu = ProcessCpuInfo()
p = psutil.Process(self.pid)
process_mem_cpu.cpu_share = self._get_process_cpu_share(p.cpu_times())
process_mem_cpu.mem_virt = p.memory_info().vms / 1024
process_mem_cpu.mem_res = p.memory_info().rss / 1024
return process_mem_cpu
|
zonca/petsc4py | test/test_sys.py | Python | bsd-2-clause | 1,904 | 0.002626 | import unittest
from petsc4py import PETSc
# --------------------------------------------------------------------
class TestVersion(unittest.TestCase):
def testGetVersion(self):
version = PETSc.Sys.getVersion()
self.assertTrue(version > (0, 0, 0))
v, patch = PETSc.Sys.getVersion(patch=True)
self.assertTrue(version == v)
self.assertTrue(patch >= 0)
v, date = PETSc.Sys.getVersion(date=True)
self.assertTrue(version == v)
self.assertTrue(isinstance(date, str))
v, author = PETSc.Sys.getVersion(author=True)
self.assertTrue(version == v)
self.assertTrue(isinstance(author, (list,tuple)))
def testGetVersionInfo(self):
version = PETSc.Sys.getVersion()
info = PETSc.Sys.getVersionInfo()
self.assertEqual(version,
(info['major'],
info['minor'],
info['subminor'],))
self.assertTrue(isinstance(info['release'], bool))
_, patch = PETSc.Sys.getVersion(patch=True)
self.assertEqual(patch, info['patch'])
v, date = PETSc.Sys.getVersion(date=True)
self.assertEqual(date, info['date'])
def testGetSetDefaultComm(self):
c = PETSc.Sys.getDefaultComm()
self.assertEqual(c, PETSc.COMM_WOR | LD)
PETSc.Sys.setDefaultComm(PETSc.COMM_SELF)
c = PETSc.Sys.getDefaultComm()
self.assertEqual(c, PETSc.COMM_SELF)
| PETSc.Sys.setDefaultComm(PETSc.COMM_WORLD)
c = PETSc.Sys.getDefaultComm()
self.assertEqual(c, PETSc.COMM_WORLD)
f = lambda : PETSc.Sys.setDefaultComm(PETSc.COMM_NULL)
self.assertRaises(ValueError, f)
# --------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
# --------------------------------------------------------------------
|
xow/mdk | mdk/commands/plugin.py | Python | gpl-3.0 | 7,095 | 0.001269 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Moodle Development Kit
Copyright (c) 2013 Frédéric Massart - FMCorz.net
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
http://github.com/FMCorz/mdk
"""
from ..plugins import *
from ..command import Command
class PluginCommand(Command):
_arguments = [
(
['action'],
{
'metavar': 'action',
'help': 'the action to perform',
'sub-commands':
{
'download': (
{
'help': 'download a plugin in the instance'
},
[
(
['pluginname'],
{
'type': str,
'metavar': 'pluginname',
'default': None,
'help': 'frankenstyle name of the plugin'
}
),
(
['name'],
{
'default': None,
'help': 'name of the instance to work on',
'metavar': 'name',
'nargs': '?'
}
),
(
['-s', '--strict'],
{
'action': 'store_true',
'help': 'prevent the download of a parent version if a file is not found for this branch',
}
),
(
['-f', '--force'],
{
'action': 'store_true',
'help': 'overrides the plugin directory if it already exists'
}
),
(
['-c', '--no-cache'],
{
'action': 'store_true',
'dest': 'nocache',
'help': 'ignore the cached files'
}
)
]
),
'install': (
{
'help': 'download and install a plugin'
},
[
(
['pluginname'],
{
'type': str,
'metavar': 'pluginname',
'default': None,
'help': 'frankenstyle name of the plugin'
}
),
(
| ['name'],
{
'default': None,
'help': 'name of the instance to work on',
'metavar': 'name',
'nargs': '?'
| }
),
(
['-s', '--strict'],
{
'action': 'store_true',
'help': 'prevent the download of a parent version if a file is not found for this branch',
}
),
(
['-f', '--force'],
{
'action': 'store_true',
'help': 'overrides the plugin directory if it already exists'
}
),
(
['-c', '--no-cache'],
{
'action': 'store_true',
'dest': 'nocache',
'help': 'ignore the cached files'
}
)
]
)
}
}
)
]
_description = 'Manage your plugins'
def run(self, args):
if args.action == 'download':
M = self.Wp.resolve(args.name)
if not M:
raise Exception('This is not a Moodle instance')
self.download(M, args)
elif args.action == 'install':
M = self.Wp.resolve(args.name)
if not M:
raise Exception('This is not a Moodle instance')
if self.download(M, args):
logging.info('Upgrading Moodle to install the new plugin')
M.upgrade()
def download(self, M, args):
po = PluginObject(args.pluginname)
if not args.force and PluginManager.hasPlugin(po, M):
logging.error('The plugin is already present, set --force to overwrite it.')
return False
branch = M.get('branch')
if branch == 'master':
branch = C.get('masterBranch')
branch = int(branch)
fi = po.getZip(branch, fileCache=not args.nocache)
if not fi and not args.strict:
fi = po.getZip(branch - 1, fileCache=not args.nocache)
if not fi:
logging.warning('Could not find a file for this plugin')
return False
PluginManager.extract(fi, po, M, override=args.force)
return True
|
ted-ross/qpid-dispatch | tools/scraper/amqp_detail.py | Python | apache-2.0 | 50,089 | 0.003094 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import datetime
import sys
import traceback
import common
import text
"""
Given a map of all connections with lists of the associated frames
analyze and show per-connection, per-session, and per-link details.
This is done in a two-step process:
* Run through the frame lists and generates an intermediate structure
with the the details for display.
* Generate the html from the detail structure.
This strategy allows for a third step that would allow more details
to be gleaned from the static details. For instance, if router A
sends a transfer to router B then router A's details could show
how long it took for the transfer to reach router B. Similarly
router B's details could show how long ago router A sent the transfer.
"""
class Counts():
"""
Holds common count sets that can be rolled up from links to
sessions to connections. Not for individual performatives.
"""
def __init__(self):
# amqp errors gleaned from any performative
self.errors = 0 # amqp error - simple count
# derived facts about message settlement
self.unsettled = 0
self.presettled = 0
self.accepted = 0
self.rejected = 0
self.released = 0
self.modified = 0
# interesting transfers
self.aborted = 0
self.more = 0
self.incomplete = 0
# link drained
self.drain = 0
# link out of credit
self.credit_not_evaluated = 0
self.no_credit = 0 # event count, excludes drain credit exhaustion
self.initial_no_credit_duration = datetime.timedelta() # before first credit
self.no_credit_duration = datetime.timedelta() # after credit issued and then exhausted
def highlight(self, name, value, color):
"""
if value is non zero then return a colorized 'name: value' text stream
else return a blank string
"""
result = ""
if value:
result = "<span style=\"background-color:%s\">%s: %s</span> " % (color, name, str(value))
return result
def highlight_duration(self, name, value, color):
"""
if value is non zero then return a colorized 'name: value' text stream
else return a blank string
"""
result = ""
if value.seconds > 0 or value.microseconds > 0:
t = float(value.seconds) + float(value.microseconds) / 1000000.0
result = "<span style=\"background-color:%s\">%s: %0.06f</span> " % (color, name, t)
return result
def show_html(self):
res = ""
res += self.highlight("errors", self.errors, common.color_of("errors"))
res += self.highlight("unsettled", self.unsettled, common.color_of("unsettled"))
res += self.highlight("presettled", self.presettled, common.color_of("presettled"))
res += self.highlight("accepted", self.accepted, common.color_of("accepted"))
res += self.highlight("rejected", self.rejected, common.color_of("rejected"))
res += self.highlight("released", self.released, common.color_of("released"))
res += self.highlight("modified", self.modified, common.color_of("modified"))
res += self.highlight("aborted", self.aborted, common.color_of("aborted"))
res += self.highlight("more", self.more, common.color_of("more"))
res += self.highlight("incomplete", self.incomplete, common.color_of("unsettled"))
res += self.highlight("drain", self.drain, common.color_of("drain"))
res += self.highlight_duration("initial", self.initial_no_credit_duration, common.color_of("no_credit"))
res += self.highlight("no_credit", self.no_credit, common.color_of("no_credit"))
res += self.highlight_duration("duration", self.no_credit_duration, common.color_of("no_credit"))
res += self.highlight("no_eval", self.credit_not_evaluated, common.color_of("no_credit"))
return res
@classmethod
def show_table_heads1(cls):
return "<th rowspan=\"2\"><span title=\"AMQP errors\">ERR</span></th>" \
"<th colspan=\"6\">Settlement - disposition</th>" \
"<th colspan=\"3\">Transfer</th>" \
"<th>Flow</th>" \
"<th colspan=\"4\">Credit starvation< | /th>"
@classmethod
def show_table_heads2(cls):
return "<th><span title=\"Unsettled transfers\">UNSTL</span></th>" \
"<th><span title=\"Presettled transfers\">PRE</span></th>" \
"<th><span title=\"Disposition: accepted\">ACCPT</span></th>" \
"<th><span title=\"Disposition: rejected\">RJCT</span></th>" \
"<th><span title=\"Disposition: released\">RLSD</span></th>" \
| "<th><span title=\"Disposition: modified\">MDFD</span></th>" \
"<th><span title=\"Transfer abort=true\">ABRT</span></th>" \
"<th><span title=\"Transfer: more=true\">MOR</span></th>" \
"<th><span title=\"Transfer: incomplete; all frames had more=true\">INC</span></th>" \
"<th><span title=\"Flow: drain=true\">DRN</span></th>" \
"<th><span title=\"Initial stall (S)\">initial (S)</span></th>" \
"<th><span title=\"Credit exhausted\">-> 0</span></th>" \
"<th><span title=\"Normal credit exhaustion stall (S)\">duration (S)</span></th>" \
"<th><span title=\"Credit not evaluated\">?</span></th>"
def show_table_element(self, name, value, color):
return ("<td>%s</td>" % text.nbsp()) if value == 0 else \
("<td>%s</td>" % ("<span style=\"background-color:%s\">%s</span> " % (color, str(value))))
def show_table_duration(self, delta):
if delta.seconds == 0 and delta.microseconds == 0:
return "<td>%s</td>" % text.nbsp()
t = float(delta.seconds) + float(delta.microseconds) / 1000000.0
return ("<td>%0.06f</td>" % t)
def show_table_data(self):
res = ""
res += self.show_table_element("errors", self.errors, common.color_of("errors"))
res += self.show_table_element("unsettled", self.unsettled, common.color_of("unsettled"))
res += self.show_table_element("presettled", self.presettled, common.color_of("presettled"))
res += self.show_table_element("accepted", self.accepted, common.color_of("accepted"))
res += self.show_table_element("rejected", self.rejected, common.color_of("rejected"))
res += self.show_table_element("released", self.released, common.color_of("released"))
res += self.show_table_element("modified", self.modified, common.color_of("modified"))
res += self.show_table_element("aborted", self.aborted, common.color_of("aborted"))
res += self.show_table_element("more", self.more, common.color_of("more"))
res += self.show_table_element("incomplete", self.incomplete, common.color_of("unsettled"))
res += self.show_table_element("drain", self.drain, common.color_of("drain"))
res += self.show_table_duration(self.initial_no_credit_duration)
res += self.show_table_element("no_credit", self.no_credit, common.color_of("no_credit"))
res += self.show_table_duration(self.no_cr |
TouK/vumi | vumi/dispatchers/simple/dispatcher.py | Python | bsd-3-clause | 1,295 | 0 |
from twisted.python import log
from twisted.internet.defer import inlineCallbacks
from vumi.service import Worker
class MessageHandler(object):
def __init__(self, worker, queue_name, publisher, publish_keys):
self.worker = worker
self.queue_name = queue_name
self.publisher = publisher
self.publish_keys = publish_keys or [] # if None assume empty set
log.msg("SimpleDispatcher forwarding from %s to %s" % (
self.queue_name, self.publish_keys))
def consume_message(self, message):
log.msg("SimpleDispatcher consuming on %s: %s" % (
self.queue_name,
repr(message)))
for k in self.publish_keys:
self.publisher.publish_message(message, routing_key=k)
class SimpleDispatcher(Worker):
@inlineCallbacks
def startWorker(self):
log.msg("Starting SimpleDispatcher with config: %s" % (self.config))
self.publisher = yield self.p | ublish_to("simpledispatcher.fallback")
for queue_name, publish_keys in self.config['route_mappings'].items():
h = MessageHandler(self, queue_name, self.publisher, publish_keys)
yield self.consume(queue_name, h.consume_message)
def s | topWorker(self):
log.msg("Stopping SimpleDispatcher")
|
gibil5/openhealth | models/order/commons/acc_lib.py | Python | agpl-3.0 | 2,687 | 0.037588 | # -*- coding: utf-8 -*-
"""
Acc Lib
- Used by
account contasis pre
account line pre
Created: 11 oct 2020
Last up: 29 mar 2021
"""
import datetime
class AccFuncs:
@staticmethod
def static_method():
# the static method gets passed nothing
return "I am a static method"
@classmethod
def class_method(cls):
# the class method gets passed the class (in this case ModCLass)
return "I am a class method"
def instance_method(self):
# An instance method gets passed the instance of ModClass
return "I am an instance method"
#------------------------------------------------ Correct Time -----------------
# Used by Account Line
# Correct Time
# Format: 1876-10-10 00:00:00
@classmethod
def correct_time(cls, date, delta):
if date != False:
year = int(date.split('-')[0])
if year >= 1900:
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATETIME_FORMAT_sp = "%d/%m/%Y %H:%M"
date_field1 = datetime.datetime.strptime(date, DATETIME_FORMAT)
date_corr = date_field1 + datetime.timedelta(hours=delta,minutes=0)
date_corr_sp = date_corr.strftime(DATETIME_FORMAT_sp)
return date_corr, date_corr_sp
# correct_time
# ----------------------------------------------------- Get Orders Filter ------
# Provides sales between begin date and end date.
# Sales and Cancelled also.
@classmethod
def get_orders_filter(cls, obj, date_bx, date_ex):
# Dates
#DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATETIME_FORMAT = "%Y-%m-%d"
date_begin = date_bx + ' 05:00:00'
date_end_dt = datetime.datetime.strptime(date_ex, DATETIME_FORMAT) + datetime.timedelta(hours=24) + datetime.timedelta(hours=5, minutes=0)
date_end = date_end_dt.strftime('%Y-%m-%d %H:%M')
#print date_end_dt
# Search Orders
orders = obj.env['sale.order'].search([
('state', 'in', ['sale', 'cancel']),
('date_order', '>=', date_begin),
('date_order', '<', date_end),
],
order='x_serial_nr asc',
#limit=1,
)
# Count
count = obj.env['sale.order'].search_count([
('state', 'in', ['sale', 'cancel']),
('date_order', '>=', date_begin),
('date_order', '<' | , date_end),
],
#order='x_serial_nr asc',
#limit=1,
)
return orders, count
# get_orders_filter
# ------------------------------------------------------ Get Net and Tax -------
# Get Net and Tax
@classmethod
def get_net_tax(cls, amount):
# Net
x = amount / 1.18
net = float("{0:.2f}".format(x))
# | Tax
x = amount * 0.18
tax = float("{0:.2f}".format(x))
return net, tax
# get_net_tax
|
Ramyak/CodingPractice | algo_practice/sort/binary_search.py | Python | gpl-2.0 | 4,558 | 0 | #!/usr/bin/env python
a = [5, 3, 6, 4, 1, 2, 0, 8, 9]
class Node(object):
def __init__(self, value, left, right):
self.value, self.left, self.right = value, left, right
def __repr__(self):
return str(self.value)
class BTree(object):
def __init__(self, value):
self.root_node = Node(value, None, None)
def add_node(self, value):
cur_node = self.root_node
new_node = Node(value, None, None)
while True:
if cur_node.value > new_node.value: # Insert left side
if cur_node.left:
cur_node = cur_node.left
continue
else:
cur_node.left = new_node
return
elif cur_node.value < new_node.value: # Insert right side
if cur_node.right:
cur_node = cur_node.right
continue
else:
cur_node.right = new_node
return
break # Same us current node do nothing
def find_depth(self, cur_node=None, depth=0):
if cur_node is None:
cur_node = self.root_node
depth1 = depth2 = depth
if cur_node.left:
depth1 = self.find_depth(cur_node.left, depth + 1)
if cur_node.right:
depth2 = self.find_depth(cur_node.right, depth + 1)
depth = depth1
if depth1 < depth2:
depth = depth2
return depth
def print_nodes(self, cur_node=None, print_array=None, cur_depth=0):
is_root_node = False
if cur_depth == 0:
is_root_node = True
cur_node = cur_node if cur_node else self.root_node
total_depth = self.find_depth(cur_node)
print_array = [[] for i in range(total_depth + 2)]
print_array[cur_depth].append(str(cur_node.value))
if cur_node.left:
self.print_nodes(cur_node.left, print_array, cur_depth + 1)
else:
print_array[cur_depth + 1].append(' | ')
if cur_node.right:
self.print_nodes(cur_node.right, print_array, cur_depth + 1)
else:
print_array[cur_depth + 1].append(' ')
if is_root_node:
for i in range(len(print_array)):
| print '{}{}'.format(''.join(' ' * (total_depth - i + 1)),
' '.join(print_array[i]))
def find_node(self, value, cur_node=None):
if cur_node is None:
cur_node = self.root_node
if cur_node.value == value:
return cur_node
elif cur_node.value > value:
return self.find_node(value, cur_node.left)
else:
return self.find_node(value, cur_node.right)
def del_node(self, del_value, cur_node=None):
# Find node and parent node
if cur_node is None:
cur_node = self.root_node
parent_node = None
while True:
if cur_node.value == del_value:
break
elif cur_node.value > del_value and cur_node.left is not None:
parent_node = cur_node
cur_node = cur_node.left
continue
elif cur_node.value < del_value and cur_node.right is not None:
parent_node = cur_node
cur_node = cur_node.right
continue
return # Did not find node
if cur_node.left is None or cur_node.right is None:
replacement_node = cur_node.left if cur_node.left else \
cur_node.right
else:
replacement_node = cur_node.left
replacement_node_parent = cur_node
while replacement_node.right:
replacement_node_parent = replacement_node
replacement_node = replacement_node.right
replacement_node_parent.right = None
replacement_node.left = cur_node.left
replacement_node.right = cur_node.right
if parent_node:
if parent_node.left == cur_node:
parent_node.left = replacement_node
else:
parent_node.right = replacement_node
return
else:
self.root_node = replacement_node
if __name__ == '__main__':
btree = BTree(a[0])
for i in a[1:]:
btree.add_node(i)
print a
btree.print_nodes()
found_node = btree.find_node(3)
btree.print_nodes(cur_node=found_node)
btree.del_node(5)
btree.print_nodes()
|
CCI-MOC/GUI-Backend | service/quota.py | Python | apache-2.0 | 8,295 | 0.000723 | from threepio import logger
from django.core.exceptions import ValidationError
from core.models import IdentityMembership, Identity
from core.models.quota import (
has_floating_ip_count_quota,
has_port_count_quota,
has_instance_count_quota,
has_cpu_quota,
has_mem_quota,
has_storage_quota,
has_storage_count_quota,
has_snapshot_count_quota
)
from service.cache import get_cached_driver
from service.driver import get_account_driver
def check_over_instance_quota(
username, identity_uuid, esh_size=None,
include_networking=False, raise_exc=True):
"""
Checks quota based on current limits (and an instance of size, if passed).
param - esh_size - if included, update the CPU and Memory totals & increase instance_count
param - launch_networking - if True, increase floating_ip_count
param - raise_exc - if True, raise ValidationError, otherwise return False
return True if passing
return False if ValidationError occurs and raise_exc=False
By default, allow ValidationError to raise.
return or raise exc
"""
membership = IdentityMembership.objects.get(
identity__uuid=identity_uuid,
member__name=username)
identity = membership.identity
quota = identity.quota
driver = get_cached_driver(identity=identity)
new_port = new_floating_ip = new_instance = new_cpu = new_ram = 0
if esh_size:
new_cpu += esh_size.cpu
new_ram += esh_size.ram
new_instance += 1
new_port += 1
if include_networking:
new_floating_ip += 1
# Will throw ValidationError if false.
try:
has_cpu_quota(driver, quota, new_cpu)
has_mem_quota(driver, quota, new_ram)
has_instance_count_quota(driver, quota, new_instance)
has_floating_ip_count_quota(driver, quota, new_floating_ip)
has_port_count_quota(identity, driver, quota, new_port)
return True
except ValidationError:
if raise_exc:
raise
return False
def check_over_storage_quota(
username, identity_uuid,
new_snapshot_size=0, new_volume_size=0, raise_exc=True):
"""
Checks quota based on current limits.
param - new_snapshot_size - if included and non-zero, increase snapshot_count
param - new_volume_size - if included and non-zero, add to storage total & increase storage_count
param - raise_exc - if True, raise ValidationError, otherwise return False
return True if passing
return False if ValidationError occurs and raise_exc=False
By default, allow ValidationError to raise.
"""
membership = IdentityMembership.objects.get(identity__uuid=identity_uuid,
member__name=username)
identity = membership.identity
quota = identity.quota
driver = get_cached_driver(identity=identity)
# FIXME: I don't believe that 'snapshot' size and 'volume' size share
# the same quota, so for now we ignore 'snapshot-size',
# and only care that value is 0 or >1
new_snapshot = 1 if new_snapshot_size > 0 else 0
new_disk = new_volume_size
new_volume = 1 if new_volume_size > 0 else 0
# Will throw ValidationError if false.
try:
has_storage_quota(driver, quota, new_disk)
has_storage_count_quota(driver, quota, new_volume)
has_snapshot_count_quota(driver, quota, new_snapshot)
return True
except ValidationError:
if raise_exc:
raise
return False
def set_provider_quota(identity_uuid, limit_dict=None):
"""
"""
identity = Identity.objects.get(uuid=identity_uuid)
if not identity.credential_set.all():
# Can't update quota if credentials arent set
return
user_quota = identity.quota
if not user_quota:
# Can't update quota if it doesn't exist
return
if identity.provider.type.name.lower() == 'openstack':
return _set_openstack_quota(user_quota, identity)
else:
# Only attempt to set quota for known provider types
return
def _get_hard_limits(identity):
"""
Lookup the OpenStack "Hard Limits" based on the account provider
"""
accounts = get_account_driver(identity.provider)
defaults = {"ram": 999, "cpu": 99} # Used when all else fails.
limits = {}
limits.update(defaults)
username = identity.get_credential('key')
project_name = identity.get_credential('ex_project_name')
user_limits = accounts.get_quota_limit(username, project_name)
if user_limits:
limits.update(user_limits)
return limits
def _set_openstack_quota(
user_quota, identity, compute=True, volume=True, network=True):
if not identity.provider.get_type_name().lower() == 'openstack':
raise Exception("Cannot set provider quota on type: %s"
% identity.provider.get_type_name())
if compute:
compute_quota = _set_compute_quota(user_quota, identity)
if network:
network_quota = _set_network_quota(user_quota, identity)
if volume:
volume_quota = _set_volume_quota(user_quota, identity)
return {
'compute': compute_quota,
'network': network_quota,
'volume': volume_quota,
}
def _limit_user_quota(user_quota, identity, limit_dict=None):
if not limit_dict:
limit_dict = _get_hard_limits(identity)
if user_quota.cpu > limit_dict['cpu']:
user_quota.cpu = limit_dict['cpu']
if user_quota.memory > limit_dict['ram']:
user_quota.memory = limit_dict['ram']
return user_quota
def _set_network_quota(user_quota, identity):
network_values = {
'port': user_quota.port_count,
'floatingip': user_quota.floating_ip_count,
# INTENTIONALLY SKIPPED/IGNORED
# 'subnet', 'router', 'network',
# 'security_group', 'security_group_rules'
}
username = identity.created_by.username
logger.info("Updating network quota for %s to %s"
% (username, network_values))
driver = get_cached_driver(identity=identity)
tenant_id = driver._connection._get_tenant_id()
ad = get_account_driver(identity.provider)
admin_driver = ad.admin_driver
admin_driver._connection._neutron_update_quota(tenant_id, network_values)
return
def _set_volume_quota(user_quota, identity):
volume_values = {
'volumes': user_quota.storage_count,
'gigabytes': user_quota.storage,
| 'snapshots': user_quota.snapshot_count,
}
username = identity.created_by.username
logger.info("Updating quota for %s to %s" % (username, volume_values))
driver = get_cached_driver(identity=identity)
username = driver._connection._get_username()
ad = get_account_driver(identity.provider)
admin_driver = ad.admin_driver
admin_driver._connection._cinder_update_quota(username, volume_values)
return
def _set_comp | ute_quota(user_quota, identity):
# Use THESE values...
compute_values = {
'cores': user_quota.cpu,
'ram': user_quota.memory * 1024, # NOTE: Value is stored in GB, Openstack (Liberty) expects MB
'floating_ips': user_quota.floating_ip_count,
'fixed_ips': user_quota.port_count,
'instances': user_quota.instance_count,
}
creds = identity.get_all_credentials()
if creds.get('ex_force_auth_version', '2.0_password') == "2.0_password":
compute_values.pop('instances')
username = identity.created_by.username
logger.info("Updating quota for %s to %s" % (username, compute_values))
driver = get_cached_driver(identity=identity)
username = driver._connection.key
tenant_id = driver._connection._get_tenant_id()
tenant_name = identity.project_name()
ad = get_account_driver(identity.provider)
ks_user = ad.get_user(username)
admin_driver = ad.admin_driver
try:
result = admin_driver._connection.ex_update_quota_for_user(
tenant_id, ks_user.id, compute_values)
except Exception:
logger.exception("Could not set a user-quota, trying to set tenant-quota")
result = admin_driver._connection.ex_update_quo |
SamiHiltunen/invenio-accounts | invenio_accounts/forms.py | Python | gpl-2.0 | 2,117 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
| #
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published b | y the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Additional non-userprofile fields used during registration.
Currently supported: recaptcha
"""
from flask_babelex import gettext as _
from flask_wtf import Form, Recaptcha, RecaptchaField
from wtforms import FormField
class RegistrationFormRecaptcha(Form):
"""Form for editing user profile."""
recaptcha = RecaptchaField(validators=[
Recaptcha(message=_("Please complete the reCAPTCHA."))])
def confirm_register_form_factory(Form, app):
"""Return confirmation for extended registration form."""
if app.config.get('RECAPTCHA_PUBLIC_KEY') and \
app.config.get('RECAPTCHA_PRIVATE_KEY'):
class ConfirmRegisterForm(Form):
recaptcha = FormField(RegistrationFormRecaptcha, separator='.')
return ConfirmRegisterForm
return Form
def register_form_factory(Form, app):
"""Return extended registration form."""
if app.config.get('RECAPTCHA_PUBLIC_KEY') and \
app.config.get('RECAPTCHA_PRIVATE_KEY'):
class RegisterForm(Form):
recaptcha = FormField(RegistrationFormRecaptcha, separator='.')
return RegisterForm
return Form
|
ssorgatem/pulsar | galaxy/tools/deps/brew_exts.py | Python | apache-2.0 | 19,588 | 0.002297 | #!/usr/bin/env python
# % brew vinstall samtools 1.0
# % brew vinstall samtools 0.1.19
# % brew vinstall samtools 1.1
# % brew env samtools 1.1
# PATH=/home/john/.linuxbrew/Cellar/htslib/1.1/bin:/home/john/.linuxbrew/Cellar/samtools/1.1/bin:$PATH
# export PATH
# LD_LIBRARY_PATH=/home/john/.linuxbrew/Cellar/htslib/1.1/lib:/home/john/.linuxbrew/Cellar/samtools/1.1/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH
# % . <(brew env samtools 1.1)
# % which samtools
# /home/john/.linuxbrew/Cellar/samtools/1.1/bin/samtools
# % . <(brew env samtools 0.1.19)
# % which samtools
# /home/john/.linuxbrew/Cellar/samtools/0.1.19/bin/samtools
# % brew vuninstall samtools 1.0
# % brew vdeps samtools 1.1
# htslib@1.1
# % brew vdeps samtools 0.1.19
from __future__ import print_function
try:
import argparse
except ImportError:
argparse = None
import contextlib
import json
import glob
import os
import re
import sys
import string
import subprocess
WHITESPACE_PATTERN = re.compile("[\s]+")
DESCRIPTION = "Script built on top of linuxbrew to operate on isolated, versioned brew installed environments."
if sys.platform == "darwin":
DEFAULT_HOMEBREW_ROOT = "/usr/local"
else:
DEFAULT_HOMEBREW_ROOT = os.path.join(os.path.expanduser("~"), ".linuxbrew")
NO_BREW_ERROR_MESSAGE = "Could not find brew on PATH, please place on path or pass to script with --brew argument."
CANNOT_DETERMINE_TAP_ERROR_MESSAGE = "Cannot determine tap of specified recipe - please use fully qualified recipe (e.g. homebrew/science/samtools)."
VERBOSE = False
RELAXED = False
BREW_ARGS = []
class BrewContext(object):
def __init__(self, args=None):
ensure_brew_on_path(args)
raw_config = brew_execute(["config"])
config_lines = [l.strip().split(":", 1) for l in raw_config.split("\n") if l]
config = dict([(p[0].strip(), p[1].strip()) for p in config_lines])
# unset if "/usr/local" -> https://github.com/Homebrew/homebrew/blob/master/Library/Homebrew/cmd/config.rb
homebrew_prefix = config.get("HOMEBREW_PREFIX", "/usr/local")
homebrew_cellar = config.get("HOMEBREW_CELLAR", os.path.join(homebrew_prefix, "Cellar"))
self.homebrew_prefix = homebrew_prefix
self.homebrew_cellar = homebrew_cellar
class RecipeContext(object):
@staticmethod
def from_args(args, brew_context=None):
return RecipeContext(args.recipe, args.version, brew_context)
def __init__(self, recipe, version, brew_context=None):
self.recipe = recipe
self.version = version
self.brew_context = brew_context or BrewContext()
@property
def cellar_path(self):
return recipe_cellar_path(self.brew_context.homebrew_cellar, self.recipe, self.version)
@property
def tap_path(self):
return os.path.join(self.brew_context.homebrew_prefix, "Library", "Taps", self.__tap_path(self.recipe))
def __tap_path(self, recipe):
parts = recipe.split("/")
if len(parts) == 1:
info = brew_info(self.recipe)
from_url = info["from_url"]
if not from_url:
| raise Exception(CANNOT_DETERMINE_TAP_ERROR_MESSAGE)
from_url_parts = from_url.split("/")
bl | ob_index = from_url_parts.index("blob") # comes right after username and repository
if blob_index < 2:
raise Exception(CANNOT_DETERMINE_TAP_ERROR_MESSAGE)
username = from_url_parts[blob_index - 2]
repository = from_url_parts[blob_index - 1]
else:
assert len(parts) == 3
parts = recipe.split("/")
username = parts[0]
repository = "homebrew-%s" % parts[1]
path = os.path.join(username, repository)
return path
def main():
global VERBOSE
global RELAXED
global BREW_ARGS
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--brew", help="Path to linuxbrew 'brew' executable to target")
actions = ["vinstall", "vuninstall", "vdeps", "vinfo", "env"]
action = __action(sys)
if not action:
parser.add_argument('action', metavar='action', help="Versioned action to perform.", choices=actions)
parser.add_argument('recipe', metavar='recipe', help="Recipe for action - should be absolute (e.g. homebrew/science/samtools).")
parser.add_argument('version', metavar='version', help="Version for action (e.g. 0.1.19).")
parser.add_argument('--relaxed', action='store_true', help="Relaxed processing - for instance allow use of env on non-vinstall-ed recipes.")
parser.add_argument('--verbose', action='store_true', help="Verbose output")
parser.add_argument('restargs', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.verbose:
VERBOSE = True
if args.relaxed:
RELAXED = True
BREW_ARGS = args.restargs
if not action:
action = args.action
brew_context = BrewContext(args)
recipe_context = RecipeContext.from_args(args, brew_context)
if action == "vinstall":
versioned_install(recipe_context, args.recipe, args.version)
elif action == "vuninstall":
brew_execute(["switch", args.recipe, args.version])
brew_execute(["uninstall", args.recipe])
elif action == "vdeps":
print_versioned_deps(recipe_context, args.recipe, args.version)
elif action == "env":
env_statements = build_env_statements_from_recipe_context(recipe_context)
print(env_statements)
elif action == "vinfo":
with brew_head_at_version(recipe_context, args.recipe, args.version):
print(brew_info(args.recipe))
else:
raise NotImplementedError()
class CommandLineException(Exception):
def __init__(self, command, stdout, stderr):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.message = ("Failed to execute command-line %s, stderr was:\n"
"-------->>begin stderr<<--------\n"
"%s\n"
"-------->>end stderr<<--------\n"
"-------->>begin stdout<<--------\n"
"%s\n"
"-------->>end stdout<<--------\n"
) % (command, stderr, stdout)
def __str__(self):
return self.message
def versioned_install(recipe_context, package=None, version=None, installed_deps=[]):
if package is None:
package = recipe_context.recipe
version = recipe_context.version
attempt_unlink(package)
with brew_head_at_version(recipe_context, package, version):
deps = brew_deps(package)
deps_metadata = []
dep_to_version = {}
for dep in deps:
version_info = brew_versions_info(dep, recipe_context.tap_path)[0]
dep_version = version_info[0]
dep_to_version[dep] = dep_version
versioned = version_info[2]
if versioned:
dep_to_version[dep] = dep_version
if dep in installed_deps:
continue
versioned_install(recipe_context, dep, dep_version)
installed_deps.append(dep)
else:
# Install latest.
dep_to_version[dep] = None
if dep in installed_deps:
continue
unversioned_install(dep)
try:
for dep in deps:
dep_version = dep_to_version[dep]
if dep_version:
brew_execute(["switch", dep, dep_version])
else:
brew_execute(["link", dep])
# dep_version obtained from brew versions doesn't
# include revision. This linked_keg attribute does.
keg_verion = brew_info(dep)["linked_keg"]
dep_metadata = {
'name': dep,
'version': keg_verion,
'versioned': versioned
}
deps_metadata.append(dep_metadata)
cellar_root = recipe_context.brew_context.homebrew_cellar
|
phihag/adhocracy | src/adhocracy/migration/versions/008_add_tagging.py | Python | agpl-3.0 | 2,241 | 0.003124 | from datetime import datetime
from sqlalchemy import *
from migrate import *
import migrate.changeset
meta = MetaData()
user_table = Table('user', meta,
Column('id', Integer, primary_key=True),
Column('user_name', Unicode(255), nullable=False, unique=True, index=True),
Column('display_name', Unicode(255), nullable=True, index=True),
Column('bio', UnicodeText(), nullable=True),
Column('email', Unicode(255), nullable=True, unique=False),
Column('email_priority', Integer, default=3),
Column('activation_code', Unicode(255), nullable=True, unique=False),
Column('reset_code', Unicode(255), nullable=True, unique=False),
Column('password', Unicode(80), nullable=False),
Column('locale', Unicode(7), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow, onupdate=datetime.utcnow),
Column('delete_time', DateTime)
)
delegateable_table = Table('delegateable', meta,
Column('id', Integer, primary_key=True),
Column('label', Unicode(255), nullable=False),
Column('type', String(50)),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow, onupda | te=datetime.utcnow),
Column('delete_time', DateTime, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('instance_id', Intege | r, ForeignKey('instance.id'), nullable=False)
)
tag_table = Table('tag', meta,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('name', Unicode(255), nullable=False)
)
tagging_table = Table('tagging', meta,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('tag_id', Integer, ForeignKey('tag.id'), nullable=False),
Column('delegateable_id', Integer, ForeignKey('delegateable.id'), nullable=False),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False)
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
tag_table.create()
tagging_table.create()
def downgrade(migrate_engine):
raise NotImplementedError()
|
geertj/draco2 | draco2/draco/robot.py | Python | mit | 2,805 | 0 | # vi: ts=8 sts=4 sw=4 et
#
# robot.py: web robot detection
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import os
import bisect
import logging
class RobotSignatures(object):
"""A repository of robot signatures.
The repository is used for detecting web robots by matching their
'User-Agent' HTTP header.
"""
def __init__(self):
"""Constructor."""
self.m_robots = []
self.m_files = []
self.m_change_context = None
@classmethod
def _create(cls, api):
"""Factory method."""
robots = cls()
robots._set_change_manager(api.changes)
section = api.config.ns('draco2')
datadir = section['datadirectory']
path = os.path.join(datadir, 'robots.ini')
robots.add_file(path)
docroot = section['documentroot']
path = os.path.join(docroot, 'robots.ini')
robots.add_file(path)
return robots
def _set_change_manager(self, changes):
"""Use change manager `changes'."""
self.m_change_context = changes.get_context('draco2.draco.robot')
self.m_change_context.add | _callback(self._change_callback)
def _change_callback(self, api):
"""Change manager | callback (when files in the ctx change)."""
self.m_robots = []
for fname in self.m_files:
self._parse_file(fname)
logger = logging.getLogger('draco2.draco.robot')
logger.debug('Reloaded robot signatures (change detected).')
def add_file(self, fname):
"""Load robot signatures from file `fname'."""
self.m_files.append(fname)
self._parse_file(fname)
if self.m_change_context:
self.m_change_context.add_file(fname)
def _parse_file(self, fname):
"""Parse a robot signatures file."""
try:
fin = file(fname)
except IOError:
return
for line in fin:
line = line.strip()
if not line or line.startswith('#'):
continue
self.m_robots.append(line.lower())
fin.close()
self.m_robots.sort()
def match(self, agent):
"""Match user agent string `agent' against the signatures.
The match operation done is a prefix match, i.e. we have a match
if `agent' matches a prefix of a registered signature.
"""
agent = agent.lower()
i = bisect.bisect_right(self.m_robots, agent)
return i > 0 and agent.startswith(self.m_robots[i-1])
|
pzread/judge | StdChal.py | Python | mit | 33,830 | 0.003547 | '''Standard challenge module.'''
import os
import shutil
import fcntl
from cffi import FFI
from tornado import gen, concurrent, process
from tornado.stack_context import StackContext
from tornado.ioloop import IOLoop
import PyExt
import Privilege
import Config
from Utils import FileUtils
STATUS_NONE = 0
STATUS_AC = 1
STATUS_WA = 2
STATUS_RE = 3
STATUS_TLE = 4
STATUS_MLE = 5
STATUS_CE = 6
STATUS_ERR = 7
MS_BIND = 4096
class StdChal:
'''Standard challenge.
Static attributes:
last_uniqid (int): Last ID.
last_standard_uid (int): Last UID for standard tasks.
last_restrict_uid (int): Last UID for restricted tasks.
null_fd (int): File descriptor of /dev/null.
build_cache (dict): Cache information of builds.
build_cache_refcount (dict): Refcount of build caches.
Attributes:
uniqid (int): Unique ID.
code_path (string): Code path.
res_path (string): Resource path.
comp_typ (string): Type of compile.
judge_typ (string): Type of judge.
test_list ([dict]): Test parameter lists.
metadata (dict): Metadata for judge.
chal_id (int): Challenge ID.
chal_path (string): Challenge path.
'''
last_uniqid = 0
last_standard_uid = Config.CONTAINER_STANDARD_UID_BASE
last_restrict_uid = Config.CONTAINER_RESTRICT_UID_BASE
null_fd = None
@staticmethod
def init():
'''Initialize the module.'''
with StackContext(Privilege.fileaccess):
try:
shutil.rmtree('container/standard/home')
except FileNotFoundError:
pass
os.mkdir('container/standard/home', mode=0o771)
try:
shutil.rmtree('container/standard/cache')
except FileNotFoundError:
pass
os.mkdir('container/standard/cache', mode=0o771)
ffi = FFI()
ffi.cdef('''int mount(const char source[], const char target[],
const char filesystemtype[], unsigned long mountflags,
const void *data);''')
ffi.cdef('''int umount(const char *target);''')
libc = ffi.dlopen('libc.so.6')
with StackContext(Privilege.fullaccess):
libc.umount(b'container/standard/dev')
libc.mount(b'/dev', b'container/standard/dev', b'', MS_BIND, \
ffi.NULL)
StdChal.null_fd = os.open('/dev/null', os.O_RDWR | os.O_CLOEXEC)
StdChal.build_cache = {}
StdChal.build_cache_refcount = {}
@staticmethod
def get_standard_ugid():
'''Generate standard UID/GID.
Returns:
(int, int): Standard UID/GID
'''
StdChal.last_standard_uid += 1
return (StdChal.last_standard_uid, StdChal.last_standard_uid)
@staticmethod
def get_restrict_ugid():
'''Generate restrict UID/GID.
Returns:
(int, int): Restrict UID/GID
'''
StdChal.last_restrict_uid += 1
return (StdChal.last_restrict_uid, StdChal.last_restrict_uid)
@staticmethod
def build_cache_find(res_path):
'''Get build cache.
Args:
res_path (string): Resource path.
Returns:
(string, int): (cache hash, GID) or None if not found.
'''
try:
return StdChal.build_cache[res_path]
except KeyError:
return None
@staticmethod
def build_cache_update(res_path, cache_hash, gid):
'''Update build cache.
Args:
res_path (string): Resource path.
cache_hash (int): Cache hash.
gid (int): GID.
Returns:
None
'''
ret = StdChal.build_cache_find(res_path)
if ret is not None:
StdChal.build_cache_decref(ret[0])
del StdChal.build_cache[res_path]
StdChal.build_cache[res_path] = (cache_hash, gid)
StdChal.build_cache_refcount[cache_hash] = 1
@staticmethod
def build_cache_incref(cache_hash):
'''Increment the refcount of the build cache.
Args:
cache_hash (int): Cache hash.
Returns:
None
'''
StdChal.build_cache_refcount[cache_hash] += 1
@staticmethod
def build_cache_decref(cache_hash):
'''Decrement the refcount of the build cache.
Delete the build cache if the refcount = 0.
Args:
cache_hash (int): Cache hash.
Returns:
None
'''
StdChal.build_cache_refcount[cache_hash] -= 1
if StdChal.build_cache_refcount[cache_hash] == 0:
with StackContext(Privilege.fileaccess):
shutil.rmtree('container/standard/cache/%x'%cache_hash)
def __init__(self, chal_id, code_path, comp_typ, judge_typ, res_path, \
test_list, metadata):
'''Initialize.
Args:
chal_id (int): Challenge ID.
code_path (string): Code path.
comp_typ (string): Type of compile.
judge_typ (string): Type of judge.
res_path (string): Resource path.
test_list ([dict]): Test parameter lists.
metadata (dict): Metadata for judge.
'''
StdChal.last_uniqid += 1
self.uniqid = StdChal.last_uniqid
self.code_path = code_path
self.res_path = res_path
self.comp_typ = comp_typ
self.judge_typ = judge_typ
self.test_list = test_list
self.metadata = metadata
self.chal_id = chal_id
self.chal_path = None
StdChal.last_standard_uid += 1
self.compile_uid, self.compile_gid = StdChal.get_standard_ugid()
@gen.coroutine
def prefetch(self):
'''Prefetch files.'''
path_set = set([self.code_path])
for root, _, files in os.walk(self.res_path):
for filename in files:
path_set.add(os.path.abspath(os.path.join(root, filename)))
path_list = list(path_set)
proc_list = []
with StackContext(Privilege.fileaccess):
for idx in range(0, len(path_list), 16):
proc_list.append(process.Subprocess(
['./Prefetch.py'] + path_list[idx:idx + 16],
stdout=process.Subprocess.STREAM))
for proc in proc_list:
yield proc.stdout.read_bytes(2)
@gen.coroutine
def start(self):
'''Start the challenge.
Returns: |
dict: Challenge result.
'''
cache_hash = None
cache_gid = None
# Check if special judge needs to rebuild.
if self.judge_typ in ['ioredir']:
hashproc = process.Subprocess( \
['./Hash | Dir.py', self.res_path + '/check'], \
stdout=process.Subprocess.STREAM)
dirhash = yield hashproc.stdout.read_until(b'\n')
dirhash = int(dirhash.decode('utf-8').rstrip('\n'), 16)
ret = StdChal.build_cache_find(self.res_path)
if ret is not None and ret[0] == dirhash:
cache_hash, cache_gid = ret
judge_ioredir = IORedirJudge('container/standard', \
'/cache/%x'%cache_hash)
else:
cache_hash = dirhash
_, cache_gid = StdChal.get_standard_ugid()
build_ugid = StdChal.get_standard_ugid()
build_relpath = '/cache/%x'%cache_hash
build_path = 'container/standard' + build_relpath
judge_ioredir = IORedirJudge('container/standard', \
build_relpath)
if not (yield judge_ioredir.build(build_ugid, self.res_path)):
return [(0, 0, STATUS_ERR)] * len(self.test_list), ''
FileUtils.setperm(build_path, \
Privilege.JUDGE_UID, cache_gid, umask=0o750)
with StackContext(Privilege.fullaccess):
os.chmod(build_path, 0o750)
StdChal.build_cache_update(self.res_path, cache_hash, cache_gid)
print('StdChal %d built checker %x'%(self.chal_id, cache_hash |
kumy/pycaching | test/test_utfgrid.py | Python | lgpl-3.0 | 7,914 | 0.000379 | #!/usr/bin/env python3
import os
import json
import logging
import unittest
from pycaching import Geocaching
from pycaching.utfgrid import UTFGrid, GridCoordinateBlock
from pycaching.errors import Error
from test.test_geocaching import _username, _password
_this_folder = os.path.dirname(__file__)
sample_files = {i: os.path.join(_this_folder, i) for i in ["sample_caches.csv", "sample_utfgrid.json"]}
class TestUTFGrid(unittest.TestCase):
def setUp(self):
self.grid = UTFGrid(Geocaching(), 8800, 5574, 14)
def test_download(self):
"""Test if downloading a tile goes nice without errors"""
self.grid._gc.login(_username, _password)
with self.subTest("Not getting .png tile first"):
list(self.grid.download())
with self.subTest("Getting .png tile first"):
list(self.grid.download(get_png_first=True))
def test_parse(self):
"""Parse locally stored grid and compa | re to expected results"""
expected_caches = {}
with open(sample_files["sample_caches.csv"]) as f:
for row in f:
wp, lat, lon = row.split(',')
expected_caches[wp] = | (float(lat), float(lon))
with open(sample_files["sample_utfgrid.json"]) as f:
j = json.loads(f.read())
caches = self.grid._parse_utfgrid(j)
for c in caches:
with self.subTest("Cache " + wp):
self.assertIn(c.wp, expected_caches)
self.assertAlmostEqual(c.location.latitude, expected_caches[c.wp][0])
self.assertAlmostEqual(c.location.longitude, expected_caches[c.wp][1])
expected_caches.pop(c.wp)
self.assertEqual(len(expected_caches), 0)
class TestGridCoordinateBlock(unittest.TestCase):
# {descriptor: [points, midpoint, x_lim, y_lim]}
good_cases = {9: [[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)],
[2.0, 2.0],
(1, 3), (1, 3)],
6: [[(1, 0), (1, 1),
(2, 0), (2, 1),
(3, 0), (3, 1)],
[2.0, 0.0],
(1, 3), (-1, 1)],
4: [[(62, 62), (62, 63),
(63, 62), (63, 63)],
[63.0, 63.0],
(62, 64), (62, 64)],
3: [[(63, 30), (63, 31), (63, 32)],
[64.0, 31.0],
(63, 65), (30, 32)],
2: [[(62, 0),
(63, 0)],
[63.0, -1.0],
(62, 64), (-2, 0)],
1: [[(0, 63)],
[-1.0, 64.0],
(-2, 0), (63, 65)],
}
bad_cases = {'too much points':
[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3), (3, 4)],
'still too much points':
[(63, 30), (63, 31), (63, 32), (63, 33)],
'point missing: 9':
[(1, 1), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)],
'point missing: 6':
[(1, 0), (1, 1),
(2, 0),
(3, 0), (3, 1)],
'points not aligned':
[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 3), (2, 4),
(3, 1), (3, 2), (3, 3)],
}
def setUp(self):
self.grid = UTFGrid(Geocaching(), 8800, 5574, 14)
self.grid.size = 64
self.cb = GridCoordinateBlock(self.grid)
def test_determine_block_size(self, *block_points):
with self.subTest("Initial value"):
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("Initial value of instance"):
self.assertEqual(GridCoordinateBlock(self.grid).size, 3)
with self.subTest("No changes: same value"):
sizes = [100] * 9 + [4] * 3 + [1]
GridCoordinateBlock.determine_block_size(*sizes)
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("No changes: no input"):
GridCoordinateBlock.determine_block_size()
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("Should change to 16"):
sizes = [16] * 21 + [4]
with self.assertLogs(level=logging.WARNING):
GridCoordinateBlock.determine_block_size(*sizes)
self.assertEqual(GridCoordinateBlock.size, 4)
with self.subTest("New value of instance"):
self.assertEqual(GridCoordinateBlock(self.grid).size, 4)
# Set back to initial value
GridCoordinateBlock.size = 3
def test_add_point(self):
"""Test passing points at initialization"""
with self.subTest("Zero points"):
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid).points)
with self.subTest("One point"):
self.cb.points = []
self.cb.add((3, 4))
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, (3, 4)).points)
with self.subTest("Multiple points: pass directly"):
points = [(0, 0), (1, 2), (3, 4), (1, 2), (5, 6)]
self.cb.points = points
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, *points).points)
with self.subTest("Multiple points: update"):
self.cb.points = []
points = [(0, 0), (1, 2), (3, 4), (1, 2), (5, 6)]
self.cb.update(points)
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, *points).points)
def test_get_middle_point(self):
"""Check that correct middle points are returned"""
for case in [self.good_cases, self.bad_cases]:
for i in case:
if case is self.good_cases:
points, mid_point, xlim, ylim = self.good_cases[i]
with self.subTest('{} points'.format(i)):
self.cb.points = points
self.assertEqual(self.cb._get_middle_point(),
mid_point)
else:
with self.subTest('Malformed input: {}'.format(i)):
with self.assertRaises(Error):
self.cb.points = self.bad_cases[i]
self.cb._get_middle_point()
def test_check_block(self):
"""Test block form with various passes and fails"""
for case in [self.good_cases, self.bad_cases]:
for i in case:
if case is self.good_cases:
self.cb.points = case[i][0]
with self.subTest(i):
if i == 9:
self.assertEqual(self.cb._check_block(), 1, i)
else:
self.assertEqual(self.cb._check_block(), 2, i)
else:
self.cb.points = case[i]
with self.subTest(i):
self.assertEqual(self.cb._check_block(), 0, i)
def test_find_limits(self):
"""Check calculation of block limits when going out of the border"""
for i in self.good_cases:
points, mid_point, xlim, ylim = self.good_cases[i]
self.cb.points = points
for axis, limits in zip(['x', 'y'], [xlim, ylim]):
with self.subTest('{} points, {} axis'.format(i, axis)):
self.assertEqual(self.cb._find_limits(axis), limits)
|
tensorflow/tensorflow | tensorflow/python/data/experimental/kernel_tests/counter_test.py | Python | apache-2.0 | 1,927 | 0.003114 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.Counter`."""
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class CounterTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(start=3, step=4, expected_output=[[3, 7, 11]]) +
combinations.combine(start=0, step=-1, expected_output=[[0, -1, -2]]))
)
def testCounter(self, start, step, expected_output):
"""Test dataset construction using `count`."""
dataset = counter.Counter(start, step)
self.assertEqual(
[ | ], dataset_ops.get_legacy_output_shapes(dataset).as_list())
self.assertEqual(dtypes.int64, dataset_ops.get_legacy_output_types(dataset))
get_next = self.getNext(dataset)
for expect | ed in expected_output:
self.assertEqual(expected, self.evaluate(get_next()))
if __name__ == "__main__":
test.main()
|
jbrendel/RESTx | src/python/restx/components/_ResourceCreateForm.py | Python | gpl-3.0 | 9,666 | 0.009518 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
A sample template for RESTx components, written in Python.
"""
import urllib
import restx.components
import restx.settings as settings
from restx.platform_specifics import STORAGE_OBJECT
from restx.components.api import *
from org.mulesoft.restx.exception import *
class _ResourceCreateForm(BaseComponent):
# Name, description and doc string of the component as it should appear to the user.
NAME = "_ResourceCreateForm" # Names starting with a '_' are kept private
DESCRIPTION = "Allows creation of a new resource by displaying a resource creation form"
DOCUMENTATION = \
"""The resource gets the name of a component as parameter at run time.
It then reads information about the component and constructs a proper
HTML form suitable for resource creation.
The user submits the filled-out form and a new resource is created.
"""
PARAM_DEFINITION = {}
# A dictionary with information about each exposed service method (sub-resource).
SERVICES = {
"form" : {
"desc" : "Show the resource creation form",
"params" : {
"component_name" : ParameterDef(PARAM_STRING, "Name of the component", required=True),
"message" : ParameterDef(PARAM_STRING, "An error message", required=False, default=""),
"specialized" : ParameterDef(PARAM_BOOL, "Indicates if this is based on a specialized component", required=False, default=False),
},
"positional_params": [ "component_name" ]
},
}
def __create(self, input, component_name, specialized=False):
"""
Accept a resource creation form for a specified component.
"""
d = dict()
for name, value in input.items():
path_elems = name.split("__")
d2 = d
for i, pe in enumerate(path_elems):
if i < len(path_elems)-1:
# More elements to come later? We must create a dict
d2 = d2.setdefault(pe, dict())
else:
if value:
d2[pe] = value
try:
return (True, makeResource(component_name, d, specialized), d)
except RestxException, e:
return (False, e.msg, d)
def form(self, method, input, component_name, message="", specialized=False):
"""
Display a resource creation form for a specified component.
@param method: The HTTP request method.
@type method: string
@param input: Any data that came in the body of the request.
@type input: string
@param component_name: Name of the component for which to create the resource.
@type component_name: string
@param message: An error message to be displayed above the form.
@type message: string
@return: The output data of this service.
@rtype: Result
"""
input_params = dict()
input_rctp = dict()
if input and HttpMethod.POST:
flag, msg, input = self.__create(input, component_name, specialized)
if not flag:
message = msg
else:
return Result.created(msg['uri'], msg)
if input:
if type(input) is dict:
# We receive a dict of values if the 'create' method discovered an
# error. In that case, the values should be used to pre-populate
# the fields when the form is re-displayed (with the error messsage
# on top).
input_rctp = input.get('resource_creation_params', dict()) # Resource creation time parameters
input_params = input.get('params', dict()) # Other parameters
if specialized:
# Need to read the definition of the partial resource and get the
# component name from there.
specialized_code_name = component_name
specialized_def = STORAGE_OBJECT.loadResourceFromStorage(specialized_code_name, True)
component_uri = specialized_def['private']['code_uri']
elems = component_uri.split("/")
component_name = elems[len(elems)-1]
# Take the parameter map from the component
comp = restx.components.make_component(component_name)
if not comp:
return Result.notFound("Cannot find component '%s'" % component_name)
header = settings.HTML_HEADER
# Assemble the form elements for the parameters
params = dict()
params.update(comp.getParams()) # In case this is a Java component, we get a Python dict this way
if specialized:
fname = specialized_def['public']['name']
fdesc = specialized_def['public']['desc']
# Remove all parameters that have been specified in the specialized component resource
# definition already
spec_params = specialized_def['private'].get('params')
if spec_params:
for name in spec_params:
if name in params:
del params[name]
else:
fname = comp.getName()
fdesc = comp.getDesc()
param_fields_html = ""
if params:
param_field_names = params.keys()
param_field_names.sort()
for pname in param_field_names:
pdef = params[pname]
if not pdef.required:
opt_str = "<br>optional, default: %s" % pdef.getDefaultVal()
else:
opt_str = ""
values = input_params.get(pname)
if type(values) is not list and pdef.isList():
if v | alues is None:
values = []
else:
values = [ values ]
param_fields_html | += \
"""<tr>
<td valign=top id="%s_name">%s<br><small>(%s%s)</small></td>
<td valign=top>%s</td>
</tr>""" % (pname, pname, pdef.desc, opt_str, pdef.html_type("params__"+pname, values))
if message:
msg = "<b><i><font color=red>%s</font></i></b><br><p>" % message
else:
msg = ""
body = """
<h3>Resource creation form for: %s</h3>
<p><i>"%s"</i></p>
<hr>
Please enter the resource configuration...<br><p>
%s
<form id="resource_form" name="input" action="%s" method="POST">
<table>""" % (fname, fdesc, msg, "%s%s/form/%s%s" % (settings.DOCUMENT_ROOT, self.getMyResourceUri(),
component_name if not specialized else specialized_code_name, "?specialized=y" if specialized else ""))
# Gather any initial values of the resource creation time form fields
suggested_name_value = input_rctp.get("suggested_name", "")
if suggested_name_value:
suggested_name_value = 'value="%s" ' % suggested_name_value
desc_value = in |
rs-dev/Test-Secretary | test_secretary/celery_setup.py | Python | isc | 446 | 0 | import os
from celery | import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_secretary.settings')
app = Celery('test_secretary')
# Using a string here means the worker will not have to
# pickle the object when using Windo | ws.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
efogliatto/multiphaseLB | src/tools/mesh/salomesh/io/write_boundaries.py | Python | gpl-3.0 | 649 | 0.040062 | import sys
import os
import salome
import numpy
from salome.geom import geomtools
def write_boundaries( geompy, bd ):
directory = "lattice"
if not os.path.exists(directory):
os.makedirs(directory)
file = open(directory + "/bound | ary", 'w')
# Number of points
file.write( str(len(bd.keys())) )
file.write( "\n" )
for key, value in bd.iteritems() :
file.write( "\n" )
file.write( key )
file.write( "\n" )
file.write( str(len(value)) )
file.write( "\n" )
| for id in value:
file.write("%d\n" % id)
file.close()
|
ercanezin/ce888labs | lab8/imdb.py | Python | gpl-3.0 | 2,192 | 0.011861 |
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Model
from keras.layers import Dense, Activation, Embedding, GlobalMaxPooling1D,Convolution1D, Input,LSTM,merge
from keras.datasets import imdb
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
###PREPROCCES | SING
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words | =max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print (X_train[0])
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
###PREPROCCESSING ENDS
inputs = Input(shape=(maxlen,))
m = inputs
m = Embedding(max_features, 128, dropout=0.2)(m)
x = Convolution1D(nb_filter=32, filter_length=4, border_mode='valid',activation='relu', subsample_length=1)(m)
x = GlobalMaxPooling1D()(x)
y=LSTM(70)(m)
z=merge([x, y], mode='concat', concat_axis=1)
z = Dense(1)(z)
predictions = Activation("sigmoid")(z)
model = Model(input=inputs, output=predictions)
#
# model = Sequential()
# model.add(Embedding(max_features, embedding_size, input_length=maxlen))
# model.add(Dropout(0.25))
# model.add(Convolution1D(nb_filter=nb_filter,
# filter_length=filter_length,
# border_mode='valid',
# activation='relu',
# subsample_length=1))
# model.add(MaxPooling1D(pool_length=pool_length))
# model.add(LSTM(lstm_output_size))
# model.add(Dense(1))
# model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc) |
crabmanX/m4baker | src/mainWindow.py | Python | gpl-2.0 | 21,163 | 0.007041 | # -*- coding: utf-8 -*-
#
# M4Baker
# Copyright (C) 2010 Kilian Lackhove
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either versio | n 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# ME | RCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Module implementing MainWindow.
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from Ui_mainWindow import Ui_MainWindow
from baseclasses import *
from splitDialog import splitDialog
from aboutDialog import aboutDialog
TITLE, CHAPTER, TRACK, DURATION, STARTTIME, FILENAME, ENDTIME = range(7)
def makeClickable(widget):
class clickFilter(QObject):
clicked = pyqtSignal()
def eventFilter(self, obj, event):
if obj == widget:
if event.type() == QEvent.MouseButtonRelease:
self.clicked.emit()
return True
return False
filter = clickFilter(widget)
widget.installEventFilter(filter)
return filter.clicked
class MainWindow(QMainWindow, Ui_MainWindow):
"""
Class documentation goes here.
"""
def __init__(self, parent = None):
"""
Constructor
"""
class delkeyFilter(QObject):
delkeyPressed = pyqtSignal()
def eventFilter(self, obj, event):
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_Delete:
self.delkeyPressed.emit()
return True
return False
class returnkeyFilter(QObject):
def eventFilter(self, obj, event):
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_Return:
current = obj.currentIndex()
current = obj.indexBelow(current)
obj.setCurrentIndex(current)
return False
self.audiobookList = audiobookContainer()
self.currentDir = os.getcwd()
QMainWindow.__init__(self, parent)
self.setupUi(self)
self.stackedWidget.setCurrentWidget(self.infoPage)
makeClickable(self.coverLabel).connect(self.on_coverLabel_clicked)
self.model = audiobookTreeModel()
self.dataTreeView.setModel(self.model)
self.progessDelegate = progressBarDelegate()
self.dataTreeView.setItemDelegateForColumn(1, self.progessDelegate)
self.connect(self.dataTreeView.selectionModel(),
SIGNAL('currentChanged(QModelIndex, QModelIndex)'),
self.on_dataTreeView_currentItemChanged)
self.connect(self.model, SIGNAL('dataChanged(QModelIndex,QModelIndex)'), self.dataChanged)
self.connect(self.model, SIGNAL('expand(QModelIndex)'), self.dataTreeView.expand)
#trying the new style of connecting signals
self.model.processingDone.connect(self.on_processingDone)
self.delfilter = delkeyFilter()
self.dataTreeView.installEventFilter(self.delfilter)
self.connect(self.delfilter, SIGNAL('delkeyPressed()'),
self.on_actionRemove_triggered)
self.returnFilter = returnkeyFilter()
self.dataTreeView.installEventFilter(self.returnFilter)
#allow only numbers in yearEdit
self.yearEdit.setValidator(QRegExpValidator(QRegExp(r'\d*'), self))
#set icons
self.actionMoveDown.setIcon(QIcon.fromTheme('go-down'))
self.actionMoveUp_2.setIcon(QIcon.fromTheme('go-up'))
#TODO: clean the name of this action
self.actionRemove.setIcon(QIcon.fromTheme('edit-delete'))
self.actionAddAudiobook.setIcon(QIcon.fromTheme('address-book-new'))
self.actionAddChapter.setIcon(QIcon.fromTheme('document-new'))
self.action_About.setIcon(QIcon.fromTheme('help-about'))
self.action_help.setIcon(QIcon.fromTheme('help-browser'))
self.actionExit.setIcon(QIcon.fromTheme('application-exit'))
self.actionProcess.setIcon(QIcon.fromTheme('system-run'))
self.chapterFileButton.setIcon(QIcon.fromTheme('document-open'))
self.outfileButton.setIcon(QIcon.fromTheme('document-open'))
self.updateTree()
def okToQuit(self):
reply = QMessageBox.question(self,"M4Baker - really quit?", \
"Really quit?",QMessageBox.Yes|QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
return True
def closeEvent(self, event):
if not self.okToQuit():
event.ignore()
@pyqtSignature("")
def on_actionAddAudiobook_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
formats = ["*%s" % format for format in supportedInputFiles]
fnames = QFileDialog.getOpenFileNames(
self,
"Choose audio files to create audiobook from",
self.currentDir,
'audio files (%s)' % " ".join(formats))
if fnames:
#fnames = [unicode(element) for element in fnames]
self.currentDir = fnames[-1].section(os.sep,0,-2)
newbook = audiobook([chapter(element) for element in fnames])
self.model.addAudiobooks(newbook, current)
self.updateTree()
@pyqtSignature("")
def on_actionMoveDown_triggered(self):
"""
Slot documentation goes here.
"""
indexes = self.dataTreeView.selectionModel().selectedIndexes()
#clean indexes list from double entries
cleanIndexes = []
for index in indexes:
if index.column() == 0:
cleanIndexes.append(index)
indexes = cleanIndexes
self.model.move(indexes, 'down')
@pyqtSignature("")
def on_actionRemove_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeView.currentIndex()
indexes = self.dataTreeView.selectionModel().selectedIndexes()
#clean indexes list from double entries
cleanIndexes = []
for index in indexes:
if index.column() == 0:
cleanIndexes.append(index)
indexes = cleanIndexes
self.model.remove(indexes)
self.updateTree()
@pyqtSignature("")
def on_actionAddChapter_triggered(self):
"""
Slot documentation goes here.
"""
formats = ["*%s" % format for format in supportedInputFiles]
fnames = QFileDialog.getOpenFileNames(
self,
"Choose audio files to append to audiobook",
self.currentDir,
'audio files (%s)' % " ".join(formats))
if fnames:
self.currentDir = fnames[-1].section(os.sep,0,-2)
#fnames = [unicode(element) for element in fnames]
chaplist = [chapter(element) for element in fnames]
current = self.dataTreeView.currentIndex()
self.model.addChapters(chaplist, current)
self.updateTree()
#TODO: maybe it is smarter to add the chapter after current item?
@pyqtSignature("")
def on_actionSortByFilename_triggered(self):
"""
Slot documentation goes here.
"""
current = self.dataTreeV |
Mathih13/IS-105_2016_Gruppe-5 | Uke-15-ICA-9/ICA-8-Python/Model/river.py | Python | mit | 16,055 | 0.015967 | # -*- coding: utf-8 -*-
# OBS! Dårlig kodingstil! For eksempel, kommentarer skal høre til funksjoner og være mellom '''disse'''
from sm import SM
import sys
class River(SM):
river_db = [] # En klønete måte å definere database på, bør være i egen klasse og kanskje ikke en liste?
# Blir kalt hver gang klassen blir instansiert
def __init__(self, initialValue):
self.startState = initialValue
self.river_db = self.startState
def crossriver(self):
# Meget primitiv implementasjon av crossriver, her må flere detaljer inn!
if ('man isat boat' in self.river_db):
if 'boat isat left' in self.river_db:
self.remove('boat isat left')
self.add('boat isat right')
elif 'boat isat right' in self.river_db:
self.remove('boat isat right')
self.add('boat isat left')
else:
print "Man is not in boat. Please get in."
self.updateWorld()
def putIn(self, item):
if ([item +' isat left'] in self.river_db):
self.remove([item+' isat left'])
else:
self.remove(item+' isat right')
self.add([item+' isat boat'])
self.updateWorld()
def takeOut(self, item):
self.remove(item+' isat boat')
if 'boat isat left' in self.river_db:
self.add(item+' isat left')
if 'boat isat right' in self.river_db:
self.add(item+' isat right')
self.updateWorld()
def getIn(self):
# Check where man is, remove the correct one(existing one)
if 'man isat left' in self.river_db:
self.remove('man isat left')
elif 'man isat right' in self.river_db:
self.remove('man isat right')
self.add('man isat boat') # Add man to boat.
self.updateWorld()
def getout(self):
# Check if the man is in the boat at all
if 'man isat boat' in self.river_db:
self.remove('man isat boat') # Remove man from boat
# Check if man is exiting on left or right side.
if 'boat isat left' in self.river_db:
self.add('man isat left')
elif 'boat isat right' in self.river_db:
self.add('man isat right')
else:
print 'Cannot get out. The man is not in the boat.' # If man is not in the boat.
self.updateWorld()
def manCheck(self):
# Run a check to see where the man is currently and apply the ' man' to the relevant string.
self.boatman = ''
self.landmanleft = ''
self.landmanright = ''
if ('man isat boat' in self.river_db):
self.boatman = ' man'
if ('man isat left' in self.river_db):
self.landmanleft = ' man'
if ('man isat right') in self.river_db:
self.landmanright = ' man'
def interface(self):
# Her implementeres logikken for "vakker" utskrift
# ...
print "** Here is the state of the river-world:"
self.s1 = "** [chicken fox grain" +self.landmanleft+" ---\\ \\_" + self.boatman+"_/ _____________/---"+self.landmanright+"]"
self.s2 = "** [fox grain" +self.landmanleft+" ---\\ \\_chicken" + self.boatman+"_/ _____________/---"+self.landmanright+"]"
self.s3 = "** [fox grain"+self.landmanleft+"---\\ \ _________________\_ chicken"+self.boatman+" _//---"+self.landmanright+"]"
self.s4 = "** [fox grain"+self.landmanleft+"---\\ \ _________________\_"+self.boatman+"_//---chicken"+self.landmanright+"]"
self.s5 = "** [fox grain"+self.landmanleft+"---\\ \ _________________\_"+self.boatman+"_//---chicken"+self.landmanright+"]"
self.s6 = "** [fox grain"+self.landmanleft+"---\\ \\_"+self.boatman+"_/ ________________ /---chicken"+self.landmanright+"]"
self.s7 = "** [fox grain"+self.landmanleft+"---\\ \\_"+self.boatman+"_/ ________________ /---chicken"+self.landmanright+"]"
self.s8 = "** [fox "+self.landmanleft+"---\\ \\_grain"+self.boatman+"_/ ________________ /---chicken"+self.landmanright+"]"
self.s9 = "** [fox "+self.landmanleft+"---\\ \________________ \_grain"+self.boatman+"_/ /---chicken"+self.landmanright+"]"
self.s10 = "** [fox"+self.landmanleft+"---\\ \________________ \_"+self.boatman+"_/ /---grain chicken"+self.landmanright+"]"
self.s11 = "** [fox"+self.landmanleft+"---\\ \________________ \_"+self.boatman+"_/ /---grain chicken"+self.landmanright+"]"
self.s12 = "** [fox"+self.landmanleft+"---\\ \________________ \_chicken"+self.boatman+"_/ /---grain "+self.landmanright+"]"
self.s13 = "** [fox "+self.landmanleft+"---\\ \\_chicken"+self.boatman+"_/ ________________ /---grain"+self.landmanright+"]"
self.s14 = "** [fox chicken"+self.landmanleft+"---\\ \\_"+self.boatman+"_/ ________________ /---grain"+self.landmanright+"]"
self.s15 = "** [fox chicken"+self.landmanleft+"---\\ \\_"+self.boatman+"_/ ________________ /---grain"+self.landmanright+"]"
self.s16 = "** [chicken"+self.landmanleft+"---\\ \\_fox"+self.boatman+"_/ ________________ /---grain"+self.landmanright+"]"
self.s17 = "** [chicken"+self.landmanleft+"---\\ \________________ \_fox"+self.boatman+"_/ /---grain "+self.landmanright+"]"
self.s18 = "** [chicken"+self.landmanleft+"---\\ \________________ \_"+self.boatman+"_/ /---fox grain "+self.landmanright+"]"
self.s19 = "** [chicken"+self.landmanleft+"---\\ \________________ \_"+self.boatman+"_/ /---fox grain "+self.landmanright+"]"
self.s20 = "** [chicken"+self.landmanleft+"---\\ \\_"+self.boatman+"_/ ________________ /---fox grain"+self.landmanright+"]"
self.s21 = "** [chicken"+self.landmanleft+"---\\ \\_"+self.boatman+"_/ ________________ /---fox grain"+self.landmanright+"]"
self.s22 = "** ["+self.landmanleft+"---\\ \\_chicken"+self.boatman+"_/ ________________ /---fox grain"+self.landmanright+"]"
self.s23 = "** ["+self.landmanleft+"---\\ \________________ \_chicken"+self.boatman+"_/ /---fox grain "+self.landmanright+"]"
self.s24 = "** ["+self.landmanleft+"---\\ \________________ \_"+self.boatman+"_/ /---chicken fox grain "+self.landmanright+"]"
self.s25 = "Congratulations! The farmer can now sell his goods at the market!"
#
self.f1 = "** [chicken fox " +self.landmanleft+" ---\\ \\_grain" + self.boatman+"_/ _____________/---"+self.landmanright+"]"
self.f2 = "** [fox chicken"+self.landmanleft+"---\\ \ _________________\_ grain"+self.boatman+" _//---"+self.landmanright+"]"
#All at right
self.allAtRight = "** ["+self.landmanleft+"---\\ \\_"+self.boatman+"_/ ________________ /---chicken fox grain"+self.landmanright+"]"
# .... slik kan alle tilstander "tegnes"
# Bruk betingelse og finn ut tilstanden fra database (db, som er en liste av lister)
# For eksempel, hvis alt er på venstre siden av elven, skriv ut allAtLeft "bilde"
# Dette er ikke en korrekt kode, - man bør sjekke på flere tilstandsvariabler
# eller implementere datastrukturer som genererer "bilder" automatisk, basert på innholdet
# i datab | asen
def statusCheck(self):
| #All At Left
if 'man isat left' in self.river_db and 'boat isat left' in self.river_db and 'fox isat left' in self.river_db and 'chicken isat left' in self.river_db and 'grain isat left' in self.river_db:
print self.s1
return "s1"
#All at left, chicken in boat
elif 'man isat left' in self.river_db and 'boat isat left' in self.river_db and 'chicken isat boat' in self.river_db and 'fox isat left' in |
uSpike/raspberry-api-server | raspberry_api/server/version.py | Python | mit | 531 | 0.00565 |
from flask_restplus import Res | ource, Namespace, reqparse, fields, marshal_with
from .gpio import GPIO
api = Namespace('version')
version_model = | api.model('version', {
'product': fields.String,
'rev': fields.String
})
version_list = api.model('version_list', {
'host': fields.Nested(version_model),
})
@api.route('/')
class Version(Resource):
@api.marshal_with(version_list)
def get(self):
return {
'host': {'product': 'raspberry_pi', 'rev': GPIO.RPI_INFO['P1_REVISION']},
}
|
treyhunner/django-simple-history | simple_history/tests/tests/test_signals.py | Python | bsd-3-clause | 1,806 | 0 | from datetime import datetime
from django.test import TestCase
from simple_history.signals import (
post_create_historical_record,
pre_create_historical_record,
)
from ..models import Poll
today = datetime(2021, 1, 1, 10, 0)
cl | ass PrePostCr | eateHistoricalRecordSignalTest(TestCase):
def setUp(self):
self.signal_was_called = False
self.signal_instance = None
self.signal_history_instance = None
self.signal_sender = None
def test_pre_create_historical_record_signal(self):
def handler(sender, instance, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = kwargs["history_instance"]
self.signal_sender = sender
pre_create_historical_record.connect(handler)
p = Poll(question="what's up?", pub_date=today)
p.save()
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().__class__)
def test_post_create_historical_record_signal(self):
def handler(sender, instance, history_instance, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = history_instance
self.signal_sender = sender
post_create_historical_record.connect(handler)
p = Poll(question="what's up?", pub_date=today)
p.save()
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().__class__)
|
zackmdavis/swift | test/unit/account/test_server.py | Python | apache-2.0 | 94,681 | 0.000011 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the L | icense is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import mock
import unittest
from tempfile import mkdtemp
from shutil import rmtree
from StringIO import StringIO
from time import gmtime
from test.unit import FakeLogger
import itertools
import random
import simplejson
import xml.dom.minidom
fro | m swift import __version__ as swift_version
from swift.common.swob import Request
from swift.common import constraints
from swift.account.server import AccountController
from swift.common.utils import normalize_timestamp, replication, public
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies
from swift.common.storage_policy import StoragePolicy, POLICIES
@patch_policies
class TestAccountController(unittest.TestCase):
"""Test swift.account.server.AccountController"""
def setUp(self):
"""Set up for testing swift.account.server.AccountController"""
self.testdir_base = mkdtemp()
self.testdir = os.path.join(self.testdir_base, 'account_server')
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.account.server.AccountController"""
try:
rmtree(self.testdir_base)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def test_OPTIONS(self):
server_handler = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEquals(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEquals(len(resp.headers['Allow'].split(', ')), 7)
self.assertEquals(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version))
def test_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_DELETE_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_not_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
# We now allow deleting non-empty accounts
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_now_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '2',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_HEAD_not_found(self):
# Test the case in which account does not exist (can be recreated)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
# Test the case in which account was deleted but not yet reaped
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
|
deepmind/optax | setup.py | Python | apache-2.0 | 2,830 | 0.001413 | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_namespace_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'optax', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `optax/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='optax',
version=_get_version(),
url='https://github.com/deepmind/optax',
license='Apache 2.0',
author='DeepMind',
description=('A gradient processing and optimisation library in JAX.'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='optax-dev@google.com',
keywords='reinforcement-learning python machine learning',
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
python_requires='>=3.7',
classifiers=[
'Development Status | :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: D | evelopers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
chelseawinfree/symantecssl | tests/unit/utils.py | Python | apache-2.0 | 623 | 0 | from __future__ import absolute_import, division, print_function
from lxml import etree
import os
def open_xml_file(filename, mode):
"""Opens an XML file for use.
:param filename: XML file to create fi | le from
:param mode: file mode for open
:return:
"""
base = os.path.dirname(__file__) + '/xml_test_files/'
return open(os.path.join(base, filename), mode)
def create_node_from_file(filename):
"""Creates an xml node from a given XML file.
:param filename: XML file to create node from
| :return: node
"""
node = etree.parse(open_xml_file(filename, 'r'))
return node
|
sburnett/seattle | repy/tests/ut_repytests_testinit.py | Python | mit | 67 | 0.059701 | #pragma out
#pragma repy
if callfunc=='initialize':
prin | t 'OK! | '
|
zwy1135/z_idea | graph.py | Python | mit | 4,756 | 0.034192 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 04 17:51:20 2014
@author: wy
"""
import numpy as np
def sigmoid(x):
x = np.sum(x)
return 1./(np.e**-x + 1)
class Graph(object):
def __init__(self,vs = [],es = []):
self.vs = set()
self.es = set()
for v in vs:
self.add_vertex(v)
for e in es:
self.add_edge(e)
def add_vertex(self,v):
self.vs.add(v)
def remove_vertex(self,v):
self.vs.remove(v)
for v1 in self.vs:
v1.removeEdge(v)
def add_edge(self,e):
self.es.add(e)
v,w = e.getVertex()
v.setEdge(w,e)
w.setEdge(v,e)
def remove_edge(self,e):
try:
self.es.remove(e)
except:
pass
v,w = e.getVertex()
v.removeEdge(w)
w.removeEdge(v)
class DirectedGraph(Graph):
def add_edge(self,e,s):
self.es.add(e)
v,w = e.getVertex()
if v==s:
v.setEdge(w,e)
else:
w.setEdge(v,w)
class Vertex(object):
def __init__(self,label = '',func = sigmoid):
self.label = label
self.func = func
self.value = 0.0
self.edges = {}
self.isupdated = False
self.__str__ == self.__repr__
def __repr__(self):
return "Vertex %s"%self.label
def setEdge(self,w,e):
self.edges[w] = e
def removeEdge(self,w):
try:
del self.edges[w]
except:
pass
#print 'Nothing to delete.'
def getEdges(self):
return self.edges
def active(self):
if self.func == None or self.isupdated:
return self.value
pairs = self.edges.items()
para = [p[0].active()*p[1].getValue() for p in pairs]
self.value = self.func(para)
self.isupdated = True
return self.value
def update(self,value = None):
if not value == None:
self.value = value
else:
self.active()
class Edge(object):
def __init__(self,v1,v2,value = 1):
self.edge = tuple([v1,v2])
self.value = value
self.__str__ == self.__repr__
def __repr__(self):
return "Edge(%s,%s)"%(repr(self.edge[0]),repr(self.edge[1]))
def getVertex(self):
return self.edge
def setValue(self,value):
self.value = value
def getValue(self):
return self.value
class SmallWorldGraph(Graph):
def __init__(self,num,k,p):
Graph.__init__(self)
#add vertex
for i in range(num):
self.add_vertex(Vertex(str(i)))
#构造正则图
vs = list(self.vs)
for i in range(num):
for j in range(1,k + 1):
idx = i+j
if idx >= num:
idx -= num
self.add_edge(Edge(vs[i],vs[idx]))
#随机连边
removelist = []
for e in self.es:
if np.random.ran | dom()<p:
#print '<'
removelist.append(e)
| for e in removelist:
self.remove_edge(e)
v1,v2 = np.random.choice(vs,2,replace = False)
self.add_edge(Edge(v1,v2))
def findPathLength(s,t):
fringe = [s]
length = 0
visited = set()
while(len(fringe)):
length += 1
new_fringe = set()
for v in fringe:
for v1 in v.edges:
if v1 == t:
return length
if v1 in visited:
continue
new_fringe.add(v1)
visited.add(v)
fringe = new_fringe
return None
def findAverLength(graph):
total = 0.0
count = 0
notfound = 0
vs = list(graph.vs)
for i in range(len(vs)):
for j in range(1,len(vs)):
l = findPathLength(vs[i],vs[j])
if l == None:
notfound +=1
continue
total += l
count += 1
return total/count,count,total,notfound
if __name__=="__main__":
sw = SmallWorldGraph(30,3,0.5)
vs = sw.vs
del sw
print vs
|
DC23/cookiecutter-dcpypackage | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}.py | Python | mit | 662 | 0.006042 | # -*- coding: utf-8 -*-
# Ensure backwards compatibility with Python 2
from __future__ import (
absolute_import,
division,
print_function,
unicode_l | iterals)
from builtins import *
def {{cookiecutter.cli_entry_point_funct | ion_name }}():
""" Command-line entry point for {{ cookiecutter.repo_name }} """
print('{{ cookiecutter.repo_name }} placeholder CLI entry point')
def {{cookiecutter.gui_entry_point_function_name }}():
""" GUI entry point for {{ cookiecutter.repo_name }} """
print('{{ cookiecutter.repo_name }} placeholder GUI entry point')
if __name__ == 'main':
{{ cookiecutter.cli_entry_point_function_name }}()
|
depet/scikit-learn | examples/svm/plot_separating_hyperplane.py | Python | bsd-3-clause | 1,252 | 0.002396 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, | 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors | _[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
|
hkrist/ratatoskr | ratatoskr/__init__.py | Python | mit | 843 | 0 | import utils
import operation_registry
import operatio | n_wrappers.base_wrappers as base_wrappers
import types
import exceptions
from protectron import protectron
@utils.doublewrap
def register_operation(func, operation_wrapper=base_wrappers.LocalOperation):
if isinstance(operation_wrapper, types.ClassType):
operation_wrapper_instance = operation_wrapper()
else:
operation_wrapper_instance = operation_wrapper
operation_wrapper_instance.load_wrapped_operation(func)
operation_registry_cls = operation_reg | istry.OperationRegistry
operation_registry_cls.register_operation(operation_wrapper_instance)
@utils.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def dispatch_event(event):
return operation_registry.OperationRegistry.call(event)
|
armab/st2contrib | packs/rackspace/actions/create_dns_zone.py | Python | apache-2.0 | 452 | 0 | from lib.action import PyraxBaseAction
from lib.formatters import to_dns_zone_dict
__all__ = [
'CreateDNSZoneAction'
]
class CreateDNSZoneAction(PyraxBaseAction):
def run(self, name, email_address, ttl=None, comment=None):
cdns = self.pyrax.cloud_dns
zone = cdns. | create(name=name, emailAddres | s=email_address, ttl=ttl,
comment=comment)
result = to_dns_zone_dict(zone)
return result
|
python-daisychain/daisychain | test/test_executor.py | Python | mit | 9,094 | 0.004948 | import daisychain.steps.input
from daisychain.executor import Executor, Execution, ExecutorAborted, ConsoleInput, CheckStatusException
from . import test_step
from mock import patch
import py3compat
if py3compat.PY2:
input_function = 'daisychain.steps.input.input'
else:
import builtins
input_function = 'builtins.input'
def test_init():
e = Executor()
assert e.scan_interval == 0.0
assert e.execution is None
assert e.user_input_class is ConsoleInput
assert e.on_failure is Executor.RAISE
e = Executor(on_failure=Executor.PROMPT, scan_interval=1.0)
assert e.scan_interval == 1.0
assert e.execution is None
assert e.user_input_class is ConsoleInput
assert e.on_failure is Executor.PROMPT
try:
e = Executor(on_failure='NOT_A_KNOWN_FAILURE_TYPE')
except ValueError:
pass
else:
assert False, "Should have thrown a Value Error for an unknown failure mode"
def test_attach_self_as_executor():
e = Executor(name='test_executor')
assert e.scan_interval == 0.0
assert e.execution is None
assert e.user_input_class is ConsoleInput
assert e.on_failure is Executor.RAISE
dep = test_step.MockStep(name='mock_step')
e.dependencies.add(dep)
e.execution = Execution(executor=e)
assert dep.executor is e
assert dep.root_log_id == e.root_log_id
def test_prompt_user_for_step():
with patch(input_function) as mock_raw_input:
dep = test_step.MockStep(name='mock_step', run_exception=RuntimeError('Exception while running step'))
e = Executor(name='test_executor')
global times_called
times_called = 0
def raw_input_output(*args, **kwargs):
global times_called
times_called += 1
responses = ['y','','r','']
prompt = kwargs.get('prompt', args[0])
assert 'mock_step' in prompt
assert 'Does this test work (y)/(n)?' in prompt
return responses[times_called - 1]
mock_raw_input.side_effect = raw_input_output
assert e.prompt_user_for_step(step=dep, prompt='Does this test work (y)/(n)?') == 'y'
assert e.prompt_user_for_step(step=dep, prompt='Does this test work (y)/(n)?', valid_choices=['d','r']) == 'r'
assert e.prompt_user_for_step(step=dep, prompt='Does this test work (y)/(n)?', default='n') == 'n'
e.execution = Execution()
e.execution.aborted = True
try:
e.prompt_user_for_step(step=dep, prompt='Does this test work (y)/(n)?')
except ExecutorAborted:
pass
else:
assert False, 'Should have raised an ExecutorAborted exception if it was previously aborted'
def test_execute():
dep = test_step.MockStep(name='mock_step')
e = Executor(name='test_executor', dependencies=[dep])
e.execute()
assert dep.finished
dep_named = test_step.MockStep(name='mock_step_named')
dep = test_step.MockStep(name='mock_step', named_reference=dep_named)
assert dep.named_reference is dep_named
e = Executor(name='test_executor', dependencies=[dep])
e.execute()
assert dep_named.finished
assert dep.finished
dep_dep = test_step.MockStep(name='mock_step_dep')
dep = test_step.MockStep(name='mock_step', dependencies=[dep_dep])
assert dep.dependencies == {dep_dep}
e = Executor(name='test_executor', dependencies=[dep])
assert e.dependencies == {dep}
e.execute()
assert dep_named.finished
assert dep.finished
dep = test_step.MockStep(name='mock_step', run_exception=RuntimeError('Exception while running step'))
e = Executor(name='test_executor', dependencies=[dep])
try:
e.execute()
except RuntimeError:
assert dep.failed
else:
assert False, "Should have thrown the error the step raised"
def test_execute_check_status_failure_in_step():
dep = test_step.MockStep(name='mock_step', check_status_exception=TypeError("Exception while checking status"))
e = Executor(name='test_executor', dependencies=[dep])
try:
e.execute()
except CheckStatusException:
assert dep.failed
else:
assert False, "Should have thrown a CheckStatusException on failure"
dep = test_step.MockStep(name='mock_step', check_status_exception=TypeError("Exception while checking status"))
dep2 = test_step.MockStep(name='mock_failing_step_parent', dependencies=[dep])
e = Executor(name='test_executor', dependencies=[dep2])
try:
e.execute()
except CheckStatusException:
assert dep2.validated
assert dep.failed
else:
assert False, "Should have thrown a CheckStatusException on failure"
dep = test_step.MockStep(name='mock_step')
def raise_error():
raise RuntimeError("Exception while forwarding callback")
dep.status.check = raise_error
dep2 = test_step.MockStep(name='mock_failing_step_parent', dependencies=[dep])
e = Executor(name='test_executor', dependencies=[dep2])
try:
e.execute()
except CheckStatusException:
assert dep2.validated
assert dep.failed
else:
assert False, "Should have thrown a CheckStatusException on failure"
def test_execute_skip_failures():
dep = test_step.MockStep(name='mock_sibling_step', run_exception=RuntimeError("test_run_exception"))
dep2 = test_step.MockStep(name='mock_sibling_step2', run_exception=RuntimeError("test_run_exception"))
successful_dep = test_step.MockStep(name='successful_dep')
parent = test_step.MockStep(name='mock_parent_step', dependencies=[dep, dep2, successful_dep])
assert parent.dependencies == {dep, dep2, successful_dep}
successful_parent = test_step.MockStep(name='mock_successful_parent', dependencies=[successful_dep])
e = Executor(name='test_executor', on_failure=Executor.SKIP, dependencies=[parent, successful_parent])
e.execute()
assert dep.failed
assert dep2.failed
assert successful_dep.finished
assert parent.validated
assert successful_parent.finished
assert not e.execution.aborted
def test_execute_graceful_shutdown():
dep = test_step.MockStep(name='mock_sibling_step', run_exception=RuntimeError("test_run_exception"))
dep2 = test_step.MockStep(name='mock_sibling_step2')
dep2.run = lambda: dep2.status.set_validated()
successful_dep = test_step.MockStep(name='successful_dep')
parent = test_step.MockStep(name='mock_parent_step', dependencies=[dep, dep2, successful_dep])
assert parent.dependencies == {dep, dep2, successful_dep}
successful_parent = test_step.MockStep(name='mock_successful_parent', dependencies=[successful_dep])
e = Executor(name='test_executor', on_failure=Executor.GRACEFUL_SHUTDOWN, dependencies=[parent, successful_parent])
e.execute()
assert dep.status.failed
assert dep2.status.finished or dep2.status.validated
assert successful_dep.status.finished or successful_dep.status.validated
assert parent.validated
assert successful_parent.validated
assert e.execution.aborted
def test_execute_graceful_shutdown_with_already_aborted_execution():
dep = test_step.MockStep(name='mock_sibling_step')
dep2 = test_step.MockStep(name='mock_sibling_step2', run_exception=RuntimeError("test_run_exception"))
successful_dep = test_step.MockStep(name='successful_dep')
parent = test_step.MockStep(name='mock_parent_step', dependencies=[dep, dep2, successful_dep])
assert parent.dependencies == {dep, dep2, successful_dep}
successful_parent = test_step.MockStep(name='mock_successful_parent', dependencies=[successful_dep])
e = Executor(name='test_executor', on_failure=Executor.GRACEFUL_SHUTDOWN, depend | encies=[parent, successful_parent])
e.execution = Execution(executor=e)
e.execution.aborted = True
e.execute()
assert dep.status.pending
assert dep2 | .status.pending
assert successful_dep.status.pending
assert parent.status.pending
assert successful_parent.status.pending
def test_prompting_during_execution():
with patch(input_function) as mock_raw_input:
dep = test_step.MockStep(name='mock_step |
sanghviharshit/script.kodi.lifx.ambilight | resources/lib/lifxlan/products.py | Python | gpl-2.0 | 4,906 | 0.000815 | # coding=utf-8
product_map = {1: "Original 1000",
3: "Color 650",
10: "White 800 (Low Voltage)",
11: "White 800 (High Voltage)",
18: "White 900 BR30 (Low Voltage)",
20: "Color 1000 BR30",
22: "Color 1000",
27: "LIFX A19",
28: "LIFX BR30",
29: "LIFX+ A19",
30: "LIFX+ BR30",
31: "LIFX Z",
32: "LIFX Z 2",
36: "LIFX Downlight",
37: "LIFX Downlight",
43: "LIFX A19",
44: "LIFX BR30",
45: "LIFX+ A19",
46: "LIFX+ BR30",
49: "LIFX Mini",
50: "LIFX Mini White",
51: "LIFX Mini Day and Dusk",
52: "LIFX GU10"
}
# Identifies which products are lights.
# Currently all LIFX products that speak the LAN protocol are lights.
# However, the protocol was written to allow addition of other kinds
# of devices, so it's important to be able to differentiate.
light_products = [1, 3, 10, 11, 18, 20, 22, 27, 28, 29, 30, 31, 32, 36, 37, 43, 44, 45, 46, 49, 50, 51, 52]
features_map = {1: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
3: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
10: {"color": False,
"temperature": True,
"infrared": False,
"multizone": False},
11: {"color": False,
"temperature": True,
"infrared": False,
"multizone": False},
18: {"color": False,
"temperature": True,
"infrared": False,
"multizone": False},
20: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
22: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
27: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
28: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
29: {"color": True,
"temperature": True,
"infrared": True,
"multizone": False},
30: {"color": True,
"temperature": True,
"infrared": True,
"multizone": False},
31: {"color": Tru | e,
"temperature": True,
"infrared": False,
"multizone": True},
32: {"color": True,
"temperature": True,
"infrared": False,
"multizone": True},
36: {"color": True,
"temperature": True,
"infrared": | False,
"multizone": False},
37: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
43: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
44: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
45: {"color": True,
"temperature": True,
"infrared": True,
"multizone": False},
46: {"color": True,
"temperature": True,
"infrared": True,
"multizone": False},
49: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False},
50: {"color": False,
"temperature": False,
"infrared": False,
"multizone": False},
51: {"color": False,
"temperature": True,
"infrared": False,
"multizone": False},
52: {"color": True,
"temperature": True,
"infrared": False,
"multizone": False}
}
|
osuripple/pep.py | events/setAwayMessageEvent.py | Python | agpl-3.0 | 743 | 0.020188 | from common.log import logUtils as log
from constants import clientPackets
from constants import serverPackets
def handle(userToken, packetData):
# get token data
username = userToken.username
# Read packet data
packetData = clientPackets.setAwayMessage(packetData)
# Set token away message
userToken.awayMessage = packetData["awayMe | ssage"]
# Send private message from fokabot
if packetData["awayMessage"] == "":
fokaMessage = "Your away message has been reset"
else:
fokaMessage = "Your away message is now: {}".f | ormat(packetData["awayMessage"])
userToken.enqueue(serverPackets.sendMessage("FokaBot", username, fokaMessage))
log.info("{} has changed their away message to: {}".format(username, packetData["awayMessage"]))
|
mikkokeskinen/tunnistamo | yletunnus/urls.py | Python | mit | 171 | 0 | fr | om allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import YleTunnusProvider
urlpatterns = default_urlpatterns(YleTun | nusProvider)
|
vinutah/apps | tools/vulfi/master/scripts/driver.py | Python | gpl-3.0 | 20,628 | 0.046878 | import os,sys,math,pdb,filecmp,logging,argparse,subprocess
from argparse import RawTextHelpFormatter
import time
import numpy as np
import imp
retry_count=3
# Check if OpenCV library is available
# Abort further execution if image handling is required
cv2_flag = True
try:
imp.find_module("cv2")
except:
print "WARNING: Couldn't find OpenCV module: cv2"
print "WARNING: All image related operation will fail!!"
cv2_flag = False
else:
import cv2
def isFloat(data):
try:
float(data)
except ValueError:
return False
return True
def processFICSV(arglist):
ficsv = {}
if os.path.exists(str(arglist.ficsv)):
fcsv = open(str(arglist.ficsv),'r')
content = fcsv.readlines()
for i in range(1,len(content)):
line = content[i]
fields = line.split(";")
fields[len(fields)-1]=fields[len(fields)-1].replace('\n','')
key = fields[0]
del fields[0]
if key not in ficsv.keys():
value=[]
value.append(fields)
ficsv.update({key:value})
else:
ficsv[key].append(fields)
fcsv.close()
return ficsv
def compareTxtfiles(file1,file2):
if filecmp.cmp(file1,file2):
print "Both files are identical!"
return [True,0.0]
else:
print "Both files are not identical!"
return [False,1.0]
def genImgfiles(file1,file2,file3):
img1 = cv2.imread(file1)
img2 = cv2.imread(file2)
if len(img1)==0 or len(img2)==0:
print "Error:invalid image file"
sys.exit(-1)
if len(img1) != len(img2):
print "Error:image resolution mismatch"
sys.exit(-1)
if img1.size != img2.size:
print "Error:Image resolution mismatch"
sys.exit(-1)
if img1.size == 0 or img2.size==0:
print "Error:Invalid image resolution"
sys.exit(-1)
img3 = img1.copy()
x = img1.shape[1]
y = img1.shape[0]
for i in range(y):
for j in range(x):
img3[i][j][0] = float(img1[i][j][0])-float(img2[i][j][0])
img3[i][j][1] = float(img1[i][j][1])-float(img2[i][j][1])
img3[i][j][2] = float(img1[i][j][2])-float(img2[i][j][2])
cv2.imwrite(file3,img3)
return
def compareNumfiles(file1,file2,threshold):
if not os.path.exists(file1) or not os.path.exists(file1):
print "Error:one of the file does not exist.."
sys.exit(-1)
f1 = open(file1,"r")
f2 = open(file2,"r")
content1 = f1.readlines()
content2 = f2.readlines()
if len(content1) != len(content2):
return [False,-1.0] # sdc - number of values are unequal
if len(content1)==0 or len(content2)==0:
return [True,-1.0] # not a sdc - one of the file has no content
l2norm = 0.0
linf=0.0
for i in range(len(content1)):
if not isFloat(content1[i].split('\n')[0]) or not isFloat(content2[i].split('\n')[0]):
continue
val1 = float(content1[i].split('\n')[0])
val2 = float(content2[i].split('\n')[0])
l2norm = l2norm + ((val1-val2)*(val1-val2))
diff = abs(val1-val2)
if diff > linf:
linf=diff
l2norm = math.sqrt(l2norm)
if linf > threshold:
print "L2 Norm is: " + str(l2norm)
print "L-infinity Norm is: " + str(linf)
return [False,linf]
elif linf != linf or linf == float('Inf') or linf == -float('Inf'):
return [False,-1.0]
f1.close()
f2.close()
return [True,linf]
def compareImgfiles(file1,file2,threshold):
maxfile1=0
mse=0.0
psnr=0.0
img1 = cv2.imread(file1)
img2 = cv2.imread(file2)
if img1.size != img2.size:
print "Warning:Image resolution mismatch"
return [False,-1.0]
if img1.size == 0 or img2.size==0:
print "Warning:Invalid image resolution"
return [False,-1.0]
x = img1.shape[1]
y = img1.shape[0]
for i in range(y):
for j in range(x):
if max(img1[i][j]) > m | axfile1:
maxfile1=max(img1[i][j])
mse = np.sum((img1.astype("float") - img2.astype("float")) ** 2)
mse = mse / float(img1.size)
print "Image resolution: " + str(x)+"x"+str(y)
print "MSE: "+str(mse)
print "MAX_I: "+str(maxfile1)
if mse > 0:
maxsqr = maxfile1**2
ratio = maxsqr / mse
if ratio > 0.0:
psnr = math.log10(ratio) * 10.0
print "PSNR: "+str(psnr)
if psnr >= threshold:
pr | int "Info: psnr is above threshold!"
return [True,psnr]
else:
print "Info: psnr is below threshold!"
return [False,psnr]
else:
print "Warning: ratio of maxi and mse should always be gt 0"
return [True,-1.0]
print "Info: The two images are identical!"
return [True,0.0]
def printg(txt):
line = '\033[1;42m'
line = line + txt
line = line + '\033[1;m'
print line
return
def writeOverheadResult(arglist,exectime1,exectime2):
key = str(time.time())
fld2 = arglist.cmd1
fld3 = arglist.cmd1
fld4 = str(exectime1)
fld5 = str(exectime2)
outline = key + ","
outline = outline + fld2 + ","
outline = outline + fld3 + ","
outline = outline + fld4 + ","
outline = outline + fld5 + "\n"
# write result
if os.path.exists(arglist.rslt):
#write result
frslt = open(arglist.rslt,'a')
frslt.write(outline)
frslt.close()
else:
#write header and result
frslt = open(arglist.rslt,'w')
frslt.write("exec_key,exe1_name,exe2_name,\
exe1_time,exe2_time\n")
frslt.write(outline)
frslt.close()
return
# field 1 - key
# field 2 - instructions
# field 3 - original value
# field 4 - corrupted value
# field 5 - corrupted bit position
# field 6 - sdc
# field 7 - benign
# field 8 - crash
# field 9 - error
def writeResult(arglist,result,diffval,fsCount,detectCount):
key = ''
fld2 = ''
fld3 = ''
fld4 = ''
fld5 = ''
fld6 = str(result[0])
fld7 = str(result[1])
fld8 = str(result[2])
fld9 = str(result[3])
fld10 = str(diffval)
fld11 = str(fsCount)
fld12 = str(detectCount)
# read execution key
if os.path.exists("fi.out"):
fi = open("fi.out",'r')
contentfi = fi.readlines()
for i in range(len(contentfi)):
if "timestamp" in contentfi[i]:
key = str(contentfi[i].split(":")[1])
key = key.replace("\n","")
fi.close()
else:
key = 'Not Available'
# read fi data from fi.csv and combine it with the result
ficsv = processFICSV(arglist)
if key in ficsv.keys():
value = ficsv[key]
if len(value)>=1:
fld2 = (value[0][0]).replace(",","#") # instr name
fld3 = value[0][1] # original val
fld4 = value[0][2] # corrupted val
fld5 = value[0][3] # corrupted bit pos
elif len(value)>=2:
for j in range(1,len(value)):
fld2 = fld2 + "\n" + value[j][0] # instr name
fld3 = fld3 + "\n" + value[j][1] # original val
fld4 = fld4 + "\n" + value[j][2] # corrupted val
fld5 = fld5 + "\n" + value[j][3] # corrupted bit pos
outline = key + ","
outline = outline + fld2 + ","
outline = outline + fld3 + ","
outline = outline + fld4 + ","
outline = outline + fld5 + ","
outline = outline + fld6 + ","
outline = outline + fld7 + ","
outline = outline + fld8 + ","
outline = outline + fld9 + ","
outline = outline + fld10 + ","
outline = outline + fld11 + ","
outline = outline + fld12 + "\n"
# write result
if os.path.exists(arglist.rslt):
#write result
frslt = open(arglist.rslt,'a')
frslt.write(outline)
frslt.close()
else:
#write header and result
frslt = open(arglist.rslt,'w')
frslt.write("exec_key,instr_list,original_val,corrupted_val,affected_bit_pos,sdc,benign,crash,error,value_diff,dyn_instr_count,detection_count\n")
frslt.write(outline)
frslt.close()
return
def printResult(arglist,result):
# total execution count
printg("Total number of executions: " + str(arglist.exec_count))
# silent data corruptions
printg("Number of executions with sdc: " + str(result[0]))
# benign excutions
printg("Number of benign executions: " + str(result[1]))
# This should ideally not happen
printg("Number of executions which potentially crashed: " + str(result[2]))
# This should always be 0
printg("Number of errorneous executions: " + str(result[3]))
return
def analyzeResult(arglist,retcode):
# result[0/1/2/3] - sdc,benign,crash,error
result = [0,0,0,0]
val = True
# potential error
if not os.path.exists(arglist.out1) or os.stat(arglist.out1).st_size == 0:
result[3]=result[3]+1
return [result,1.0]
# potential crash
if retcode != 0 or not os.path.exists(arglist.out2) or os.stat(arglist.out2).st_size == 0:
result[2]=result[2]+1
return [result,1.0]
# check for sdc
if arglist.fcp_mode == 'bex':
val = compareTxtfiles(arglist.out1,arglist.out2)
elif arglist.fcp_mode == |
acsone/website | website_sale_collapse_categories/__init__.py | Python | agpl-3.0 | 289 | 0 | # -*- encoding: utf-8 -*-
############################################## | ################################
# For copyright and license notices, see __ope | nerp__.py file in root directory
##############################################################################
from . import controllers
|
seleznev/firefox-complete-theme-build-system | tests/test_addonconf.py | Python | mpl-2.0 | 1,050 | 0.006667 | import sys
import unittest
sys.path.insert(0, "../src/build")
import addonconf
class AddonConfModuleTestCase(unittest.TestCase):
def test_load(self):
# act
config = addonconf.load("configs/config.json")
# assert
self.assertEqual(config, None, "Wrong return value for not exists config")
def test_load2(self) | :
# act
config = addonconf.load("configs/config.json.1")
# assert
self.assertEqual(config, None, "Wrong return value for unvalide config")
def test_load3(self):
# arrange
correct_config = {'version': '0.1', 'xpi': {'theme': 'firefox-theme-test.xpi', 'package': 'firefox-test-@VERSION@.xpi', 'extension': 'firefox-extension-t | est.xpi'}, 'max-version': '31.0a1', 'directory-structure': {'shared-dir': 'chrome'}, 'min-version': '29.0'}
# act
config = addonconf.load("configs/config.json.2")
# assert
self.assertEqual(config, correct_config, "Uncorrect load config")
if __name__ == '__main__':
unittest.main()
|
jeremiak/regulations-site | regulations/tests/node_types_tests.py | Python | cc0-1.0 | 3,720 | 0.000269 | #vim: set encoding=utf-8
from unittest import TestCase
from regulations.generator.node_types import *
class NodeTypesTest(TestCase):
def test_change_appendix(self):
node_parts_before = ['243', 'A', '30(a)']
node_parts_after = to_markup_id(node_parts_before)
node_string = "-".join(node_parts_after)
self.assertEqual('243-A-30a', node_string)
def test_type_from_l | abel(self):
self.assertEqual(REGTEXT, type_from_label(['250', '5', 'A']))
self.assertEqual(APPENDIX, type_from_label(['250', 'A2']))
self.assertEqual(APPENDIX, type_from_label(['250', 'A']))
self.assertEqual(APPENDIX, type_from_label(['250', 'A', '3(b)']))
self.assertEqual(REGTEXT, type_from_label(['250']))
self.assertEqual(REGTEXT, type_from_label(['250', '5']))
self.assertEqual(REGTEXT, type_from_label(['250' | , '5', 'a', 'i', 'C']))
self.assertEqual(EMPTYPART, type_from_label(['250', 'Subpart']))
self.assertEqual(SUBPART, type_from_label(['250', 'Subpart', 'C']))
self.assertEqual(INTERP, type_from_label(['250', 'Interp']))
self.assertEqual(INTERP, type_from_label(['250', 'A', 'Interp']))
self.assertEqual(INTERP, type_from_label(['250', '5', 'Interp']))
self.assertEqual(INTERP, type_from_label(['250', '5', 'b', 'Interp']))
self.assertEqual(INTERP,
type_from_label(['250', '5', 'b', 'Interp', '1']))
self.assertEqual(INTERP,
type_from_label(['250', '5', 'Interp', '5', 'r']))
def test_transform_part_none(self):
part = '30'
self.assertEqual('30', transform_part(part))
def test_label_to_text(self):
self.assertEqual('2323.4', label_to_text(['2323', '4']))
self.assertEqual('2323.5(r)(3)',
label_to_text(['2323', '5', 'r', '3']))
self.assertEqual('4', label_to_text(['2323', '4'], False))
self.assertEqual('5(r)(3)',
label_to_text(['2323', '5', 'r', '3'], False))
self.assertEqual(u'§ 2323.1',
label_to_text(['2323', '1'], True, True))
self.assertEqual(u'§ 1', label_to_text(['2323', '1'], False, True))
self.assertEqual(
'Appendix A to Part 2323', label_to_text(['2323', 'A']))
self.assertEqual('Appendix A-4', label_to_text(['2323', 'A', '4']))
self.assertEqual('Appendix A-4(b)(2)',
label_to_text(['2323', 'A', '4', 'b', '2']))
self.assertEqual('Comment for 2323.5',
label_to_text(['2323', '5', 'Interp']))
self.assertEqual('Comment for 2323.7(b)-1.v',
label_to_text(['2323', '7', 'b', 'Interp', '1', 'v']))
self.assertEqual('Comment for Appendix Z to Part 2323',
label_to_text(['2323', 'Z', 'Interp']))
self.assertEqual('Regulation 204', label_to_text(['204']))
self.assertEqual('Supplement I to Part 204',
label_to_text(['204', 'Interp']))
self.assertEqual('Interpretations for Regulation Text of Part 204',
label_to_text(['204', 'Subpart', 'Interp']))
self.assertEqual('Interpretations for Subpart C of Part 204',
label_to_text(['204', 'Subpart', 'C', 'Interp']))
self.assertEqual('Interpretations for Appendices of Part 204',
label_to_text(['204', 'Appendices', 'Interp']))
self.assertEqual('This Section',
label_to_text(['204', 'Interp', 'h1']))
self.assertEqual(
'Appendix M2 to Part 204', label_to_text(['204', 'M2']))
|
intel-analytics/BigDL | python/nano/src/bigdl/nano/quantization/neural_compressor/quantization.py | Python | apache-2.0 | 7,996 | 0.003252 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from neural_compressor.conf.config import Quantization_Conf
from neural_compressor.experimental import Quantization, common
from neural_compressor.experimental.common import Metric
from .metric import METRICS
class QuantizationINC(Quantization):
def __init__(self,
framework: str,
conf='',
approach='post_training_static_quant',
tuning_strategy='bayesian',
accuracy_criterion: dict = None,
timeout=0,
max_trials=1,
inputs=None,
outputs=None
):
"""
Create a Intel Neural Compressor Quantization object. To understand INC quantization,
please refer to https://github.com/intel/neural-compressor/blob/master/docs/Quantization.md.
:param framework: 'tensorflow', 'pytorch', 'pytorch_fx', 'pytorch_ipex', 'onnxrt_integer',
'onnxrt_qlinear' or 'mxnet'; allow new framework backend extension.
Default: 'pytorch_fx'. Consistent with Intel Neural Compressor
Quantization.
:param conf: A path to conf yaml file for quantization.
Default: None, using default config.
:param approach: 'post_training_static_quant', 'post_training_dynamic_quant',
'quant_aware_training'.
Default: 'post_training_static_quant'.
:param tuning_strategy: 'bayesian', 'basic', 'mse', 'sigopt'. Default: 'bayesian'.
:param accuracy_criterion: Tolerable accuracy drop.
accuracy_criterion = {'relative': 0.1, 'higher_is_better':True}
allows relative accuracy loss: 1%. accuracy_criterion = {
'absolute': 0.99, 'higher_is_better':False} means accuracy
< 0.99 must be satisfied.
:param timeout: Tuning timeout (seconds). Default: 0, which means early stop.
combine with max_trials field to decide when to exit.
:param max_trials: Max tune times. Default: 1.
Combine with timeout field to decide when to exit.
:param inputs: For tensorflow to specify names of inputs. e.g. inputs=['img',]
:param outputs: For tensorflow to specify names of outputs. e.g. outputs=['logits',]
"""
qconf = Quantization_Conf(conf)
cfg = qconf.usr_cfg
# Override default config
cfg.model.framework = framework
cfg.quantization.approach = approach
cfg.tuning.strategy.name = tuning_strategy
if accuracy_criterion:
cfg.tuning.accuracy_criterion = accuracy_criterion
cfg.tuning.exit_policy.timeout = timeout
cfg.tuning.exit_policy.max_trials = max_trials
cfg.model.inputs = inputs
cfg.model.outputs = outputs
super().__init__(qconf)
def post_training_quantize(self, model, calib_dataloader=None, val_dataloader=None,
metric=None):
self.check(calib_dataloader, val_dataloader, metric)
self.model = common.Model(model)
def func(data):
# TODO: only x, y are supported here for onnx quantization
import torch
x, y = zip(*data)
if isinstance(x[0], torch.Tensor):
x = torch.stack(x, dim=0).numpy()
if isinstance(y[0], torch.Tensor):
y = torch.stack(y, dim=0).numpy()
return x, y
if calib_dataloader:
if "pytorch" in self.cfg.model.framework or "tensorflow" in self.cfg.model.framework:
self.calib_dataloader = calib_dataloader
if "onnx" in self.cfg.model.framework:
import torch
assert isinstance(calib_dataloader, torch.utils.data.DataLoader), \
"Only torch dataloader is supported for onnx quantization."
# add a collate_fn to transform torch dataloader to a numpy dataloader
calib_dataloader.collate_fn = func
self.calib_dataloader = calib_dataloader
if val_dataloader:
if "pytorch" in self.cfg.model.framework or "tensorflow" in self.cfg.model.framework:
self.eval_dataloader = val_dataloader
if "onnx" in self.cfg.model.framework:
import torch
assert isinstance(val_dataloader, torch.utils.data.DataLoader), \
"Only torch dataloader is supported for onnx quantization."
# add a collate_fn to transform torch dataloader to a numpy dataloader
val_dataloader.collate_fn = func
self.eval_dataloader = val_dataloader
if metric:
framework = self.cfg.model.framework
if 'pytorch' in framework:
framework_metric = METRICS['pytorch']
elif 'onnx' in framework:
framework_metric = METRICS['onnx']
else:
framework_metric = METRICS[framework]
class MyMetric(framework_metric):
def __init__(self):
"""
This local class is to resolve dumping issue in tensorflow.
In tensorflow, INC will try to dump the metric to yaml which
somehow causes unexpected error. So we moved metric assignment
to the new local class to avoid that.
"""
self.metric = metric
self.metric = Metric(
MyMetric,
name=f"{framework}_{type(metric).__name__}_"
f"{framework_metric.get_next_metric_id()}"
)
quantized = self()
# unset the collate_fn and set back to default_collate
# TODO: use users' original collate function
if "onnx" in self.cfg.mod | el.framework:
from torch.utils.data.dataloader import default_collate
if calib_dataloader:
calib_data | loader.collate_fn = default_collate
if val_dataloader:
val_dataloader.collate_fn = default_collate
if quantized:
return quantized
else:
raise RuntimeError("Found no quantized model satisfying accuracy criterion.")
def check(self, calib_dataloader, val_dataloader, metric):
"""
Call before self.__call__() to check if the object is well-initialized
for quantization.
"""
if self.cfg.quantization.approach == 'post_training_static_quant':
assert calib_dataloader, \
"calib_calib_dataloader must not be None when approach is " \
"post-training static quantization."
if self.cfg.quantization.approach == 'post_training_dynamic_quant':
assert calib_dataloader is None, \
"calib_calib_dataloader must be None when approach is " \
"post-training dynamic quantization."
if metric and not val_dataloader:
raise RuntimeError("val_dataloader must be specified when metric is not None.")
|
rfmcpherson/killerbee | killerbee/GoodFETatmel128.py | Python | bsd-3-clause | 9,329 | 0.016615 | # GoodFETclient to interface zigduino/atmel128 radio
# forked by bx from code by neighbor Travis Goodspeed
from GoodFETAVR import GoodFETAVR
import sys, binascii, os, array, time, glob, struct
fmt = ("B", "<H", None, "<L")
class GoodFETatmel128rfa1(GoodFETAVR):
ATMELRADIOAPP = 0x53
autocrc = 0
verbose = False
connected = 0
enable_AACK = False
def serInit(self, port=None, timeout=2, attemptlimit=None):
if port==None:
port=os.environ.get("GOODFET");
self.pyserInit(port, timeout, attemptlimit)
def pyserInit(self, port, timeout, attemptlimit):
"""Open the serial port"""
if self.connected == 0:
if (not (attemptlimit == None)) and (attemptlimit <= 1):
# it always takes at least 2 tries
attemptlimit == 2
# Make timeout None to wait forever, 0 for non-blocking mode.
import serial;
if os.name=='nt' and sys.version.find('64 bit')!=-1:
print "WARNING: PySerial requires a 32-bit Python build in Windows.";
if port is None and os.environ.get("GOODFET")!=None:
glob_list = glob.glob(os.environ.get("GOODFET"));
if len(glob_list) > 0:
port = glob_list[0];
else:
port = os.environ.get("GOODFET");
if port is None:
glob_list = glob.glob("/dev/tty.usbserial*");
if len(glob_list) > 0:
port = glob_list[0];
if port is None:
glob_list = glob.glob("/dev/ttyUSB*");
if len(glob_list) > 0:
port = glob_list[0];
if port is None:
glob_list = glob.glob("/dev/ttyU0");
if len(glob_list) > 0:
port = glob_list[0];
if port is None and os.name=='nt':
from scanwin32 import winScan;
scan=winScan();
for order,comport,desc,hwid in sorted(scan.comports()):
try:
if hwid.index('FTDI')==0:
port=comport;
#print "Using FTDI port %s" % port
except:
#Do nothing.
a=1;
baud=115200;
self.serialport = serial.Serial(
port,
baud,
parity = serial.PARITY_NONE,
timeout=timeout
)
self.verb=0;
self.data=""
attempts=0;
self.connected=0;
while self.connected==0:
self.serialport.setDTR(False)
while self.verb!=0x7F or self.data!="http://goodfet.sf.net/":
if attemptlimit is not None and attempts >= attemptlimit:
return
attempts=attempts+1;
self.readcmd(); #Read the first command.
if self.verbose:
print "Got %02x,%02x:'%s'" % (self.app,self.verb,self.data);
#Here we have a connection, but maybe not a good one.
#print "We have a connection."
for foo in range(1,30):
time.sleep(1)
if not self.monitorecho():
self.connected = 0
if self.verbose:
print "Comm error on try %i." % (foo)
else:
self.connected = 1
break
if self.verbose:
print "Connected after %02i attempts." % attempts;
self.serialport.timeout = 12;
def serClose(self):
self.connected = 0
self.serialport.close()
def writecmd(self, app, verb, count=0, data=[]):
"""Write a command and some data to the GoodFET."""
self.serialport.write(chr(app));
self.serialport.write(chr(verb));
if self.verbose:
print "Tx: ( 0x%02x, 0x%02x, %d )" % ( app, verb, count )
if count > 0:
if(isinstance(data,list)):
old = data
data = []
for i in range(0,count):
data += chr(old[i]);
outstr=''.join(data);
#little endian 16-bit length
count = len(outstr)
self.serialport.write(chr(count&0xFF));
self.serialport.write(chr(count>>8));
if count > 0:
if self.verbose:
print "sending: %s" %outstr.encode("hex")
self.serialport.write(outstr);
if not self.besilent:
out = self.readcmd()
if out and self.verbose:
print "read: " + out.encode("hex")
return out
else:
return None
def readcmd(self):
"""Read a reply from the GoodFET."""
app = self.serialport.read(1)
if len(app) < 1:
if self.verbose:
print "Rx: None"
self.app = 0
self.verb = 0
self.count = 0
self.data = ""
return
self.app=ord(app);
v = self.serialport.read(1);
if v:
self.verb = ord(v)
else:
self.verb = 0
c1 = self.serialport.read(1)
c2 = self.serialport.read(1)
if (c1 and c2):
self.count= ord(c1) + (ord(c2)<<8)
else:
self.count = 0
if self.verbose:
print "Rx: ( 0x%02x, 0x%02x, %i )" % ( self.app, self.verb, self.count )
#Debugging string; print, but wait.
if self.app==0xFF:
if self.verb==0xFF:
print "# DEBUG %s" % self.serialport.read(self.count)
elif self.verb==0xFE:
print "# DEBUG 0x%x" % struct.unpack(fmt[self.count-1], self.serialport.read(self.count))[0]
elif self.verb==0xFD:
| #Do nothing, just wait so there's no timeout.
print "# NOP.";
return ""
else:
self.data=self.serialport.read(self.count);
return self.data;
def RF_setchannel(self, chan):
if (chan < 11) or (chan > 26):
| print "Channel out of range"
else:
self.poke(0x8, chan)
def peek(self,reg,bytes=1):
"""Read a Register. """
#Automatically calibrate the len.
if bytes != 1:
print "Warning, currently cannot poke more than 1 byte"
bytes = 1
data = [reg, 0, bytes%255, bytes>>8] #+ ([0]*bytes)
self.data = None
self.writecmd(self.ATMELRADIOAPP,0x02,len(data),data);
toret=0;
#print self.data.encode("hex")
if self.data:
#for i in range(0,bytes):
# toret=toret|(ord(self.data[i+1])<<(8*i));
#return toret;
# right now only works with a byte of data
return ord(self.data)
else:
return -1
def poke(self,reg,val,bytes=1): # todo, support >1 byte
"""Write an Register."""
data = [reg, 0] #+ ([0]*bytes)
data=[reg, 0]
if bytes != 1:
print "Warning, currently cannot poke more than 1 byte"
bytes = 1
for i in range(0,bytes):
data=data+[(val>>(8*i))&0xFF];
self.writecmd(self.ATMELRADIOAPP,0x03,len(data),data);
newval = self.peek(reg,bytes)
if newval!=val:
print "Warning, failed to set r%02x=%02x, got %02x." %(
reg,
val,
newval);
return;
def setup(self):
self.RF_setup()
def RF_setup(self):
self.writecmd(self.ATMELRADIOAPP, 0x10, 0, None)
def RF_rxpacket(self):
"""Get a packet from the radio. Returns None if none is waiting."""
#doto: check if packet has arrived, flush if not new
self.writecmd(self.ATMELRADIOAPP, 0x80, 0, None)
data=self.data;
self.packetlen = len(data)
if (self.packe |
andpp/cherrymusic | backport/collections/_backported.py | Python | gpl-3.0 | 4,139 | 0.000483 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from U | serDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clea | r()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
quanvm009/codev7 | openerp/tools/translate.py | Python | agpl-3.0 | 45,384 | 0.003944 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.pooler as pooler
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from os.path import join
from datetime import datetime
from lxml import etree
import config
import misc
from misc import UpdateableStr
from misc import SKIPPED_ELEMENT_TYPES
import osutil
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
is_new_cr = False
cr = frame.f_locals.get('cr', frame.f_locals.get('cursor'))
if not cr:
s = frame.f_locals.get('self', {})
cr = getattr(s, 'cr', None)
if not cr and allow_create:
db = self._get_db()
if db is not None:
cr = db.cursor()
is_new_cr = True
return cr, is_new_cr
def _get_uid(self, frame):
return frame.f_locals.get('uid') or frame.f_locals.get('user')
def _get_lang(self, frame):
lang = None
ctx = frame.f_locals.get('context')
if not ctx:
kwargs = frame.f_locals.get('kwargs')
if kwargs is None:
args = frame.f_locals.get('args')
if args and isinstance(args, (list, tuple)) \
and isinstance(args[-1], dict):
ctx = args[-1]
elif isinstance(kwargs, dict):
ctx = kwargs.get('context')
if ctx:
lang = ctx.get('lang')
s = frame.f_locals.get('self', {})
if not lang:
c = getattr(s, 'localcontext', None)
if c:
lang = c.get('lang')
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
| pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr an | d uid:
lang = pool.get('res.users').context_get(cr, uid)['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
pool = pooler.get_pool(cr.dbname)
res = pool.get('ir.translation')._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong |
vileopratama/vitech | src/openerp/service/wsgi_server.py | Python | mit | 8,202 | 0.003292 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
WSGI stack, common code.
"""
import httplib
import urllib
import xmlrpclib
import StringIO
import errno
import logging
import platform
import socket
import sys
import threading
import traceback
import werkzeug.serving
import werkzeug.contrib.fixers
import openerp
import openerp.tools.config as config
_logger = logging.getLogger(__name__)
# XML-RPC fault codes. Some care must be taken when changing these: the
# constants are also defined client-side and must remain in sync.
# User code must use the exceptions defined in ``openerp.exceptions`` (not
# create directly ``xmlrpclib.Fault`` objects).
RPC_FAULT_CODE_CLIENT_ERROR = 1 # indistinguishable from app. error.
RPC_FAULT_CODE_APPLICATION_ERROR = 1
RPC_FAULT_CODE_WARNING = 2
RPC_FAULT_CODE_ACCESS_DENIED = 3
RPC_FAULT_CODE_ACCESS_ERROR = 4
def xmlrpc_return(start_response, service, method, params, string_faultcode=False):
"""
Helper to call a service's method with some params, using a wsgi-supplied
``start_response`` callback.
This is the place to look at to see the mapping between core exceptions
and XML-RPC fault codes.
"""
# Map OpenERP core exceptions to XML-RPC fault codes. Specific exceptions
# defined in ``openerp.exceptions`` are mapped to specific fault codes;
# all the other exceptions are mapped to the generic
# RPC_FAULT_CODE_APPLICATION_ERROR value.
# This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for
# exception handling.
try:
result = openerp.http.dispatch_rpc(service, method, params)
response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None)
except Exception, e:
if string_faultcode:
response = xmlrpc_handle_exception_string(e)
else:
response = xmlrpc_handle_exception_int(e)
start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))])
return [response]
def xmlrpc_handle_exception_int(e):
if isinstance(e, openerp.exceptions.UserError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.RedirectWarning):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.MissingError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance (e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
if hasattr(e, 'message') and e.message == 'AccessDenied': # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
#InternalError
else:
info = sys.exc_info()
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def xmlrpc_handle_exception_string(e):
if isinstance(e, openerp.exceptions.UserError):
fault = xmlrpclib.Fault('warning -- %s\n\n%s' % (e.name, e.value), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.RedirectWarning):
fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '')
elif isinstance(e, openerp.exceptions.MissingError):
fault = xmlrpclib.Fault('warning -- MissingError\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault('AccessDenied', str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.ustr(e.message), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
#InternalError
else:
info = sys.exc_info()
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.exception_to_unicode(e), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def wsgi_xmlrpc(environ, start_response):
""" Two routes are available for XML-RPC
/xmlrpc/<service> route returns faultCode as strings. This is a historic
violation of the protocol kept for compatibility.
/xmlrpc/2/<service> is a new route that returns faultCode as int and is
therefore fully compliant.
"""
if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith('/xmlrpc/'):
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
# Distinguish betweed the 2 faultCode modes
string_faultcode = True
if environ['PATH_INFO'].startswith('/xmlrpc/2/'):
service = environ['PATH_INFO'][len('/xmlrpc/2/'):]
string_faultcode = False
else:
service = environ['PATH_INFO'][len('/xmlrpc/'):]
params, method = x | mlrpclib.loads(data)
retur | n xmlrpc_return(start_response, service, method, params, string_faultcode)
def application_unproxied(environ, start_response):
""" WSGI entry point."""
# cleanup db/uid trackers - they're set at HTTP dispatch in
# web.session.OpenERPSession.send() and at RPC dispatch in
# openerp.service.web_services.objects_proxy.dispatch().
# /!\ The cleanup cannot be done at the end of this `application`
# method because werkzeug still produces relevant logging afterwards
if hasattr(threading.current_thread(), 'uid'):
del threading.current_thread().uid
if hasattr(threading.current_thread(), 'dbname'):
del threading.current_thread().dbname
with openerp.api.Environment.manage():
# Try all handlers until one returns some result (i.e. not None).
for handler in [wsgi_xmlrpc, openerp.http.root]:
result = handler(environ, start_response)
if result is None:
continue
return result
# We never returned from the loop.
response = 'No handler found.\n'
start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', str(len(response)))])
return [response]
def application(environ, start_response):
if config['proxy_mode'] and 'HTTP_X_FORWARDED_HOST' in environ:
return werkzeug.contrib.fixers.ProxyFix(application_unproxied)(environ, start_response)
else:
return application_unproxied(env |
nemonik/Intellect | intellect/grammar/PolicyParser.py | Python | bsd-3-clause | 180,891 | 0.013804 | # $ANTLR 3.1.3 Mar 17, 2009 19:23:44 /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g 2013-03-25 15:29:47
import sys
from antlr3 import *
from antlr3.compat import set, frozenset
from intellect.Node import *
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
SLASHEQUAL=38
BACKQUOTE=80
EXPONENT=84
STAR=64
CIRCUMFLEXEQUAL=42
LETTER=82
TRIAPOS=85
GREATEREQUAL=52
COMPLEX=77
ASSIGNEQUAL=24
NOT=21
EOF=-1
NOTEQUAL=55
LEADING_WS=90
MINUSEQUAL=36
VBAR=58
RPAREN=10
IMPORT=7
NAME=12
GREATER=50
INSERT=31
DOUBLESTAREQUAL=45
LESS=49
COMMENT=91
RBRACK=71
RULE=15
LCURLY=72
INT=74
DELETE=29
RIGHTSHIFT=27
DOUBLESLASHEQUAL=46
WS=89
VBAREQUAL=41
OR=47
LONGINT=75
FORGET=28
FROM=8
PERCENTEQUAL=39
LESSEQUAL=53
DOLLAR=79
MODIFY=32
DOUBLESLASH=67
LBRACK=70
CONTINUED_LINE=88
OBJECTBINDING=23
DOUBLESTAR=69
HALT=33
ESC=87
ATTRIBUTE=25
DEDENT=5
FLOAT=76
RIGHTSHIFTEQUAL=44
AND=48
LEARN=30
INDENT=4
LPAREN=9
PLUSEQUAL=35
AS=13
SLASH=65
THEN=20
IN=56
COMMA=11
IS=57
AMPER=60
EQUAL=51
TILDE=68
LEFTSHIFTEQUAL=43
LEFTSHIFT=61
PLUS=62
EXISTS=22
DIGIT=83
DOT=14
AGENDAGROUP=18
PERCENT=66
MINUS=63
SEMI=78
PRINT=26
COLON=16
TRIQUOTE=86
AMPEREQUAL=40
NEWLINE=6
WHEN=19
RCURLY=73
ASSIGN=34
GLOBAL=81
STAREQUAL=37
CIRCUMFLEX=59
STRING=17
ALT_NOTEQUAL=54
# token names
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"INDENT", "DEDENT", "NEWLINE", "IMPORT", "FROM", "LPAREN", "RPAREN",
"COMMA", "NAME", "AS", "DOT", "RULE", "COLON", "STRING", "AGENDAGROUP",
"WHEN", "THEN", "NOT", "EXISTS", "OBJECTBINDING", "ASSIGNEQUAL", "ATTRIBUTE",
"PRINT", "RIGHTSHIFT", "FORGET", "DELETE", "LEARN", "INSERT", "MODIFY",
"HALT", "ASSIGN", "PLUSEQUAL", "MINUSEQUAL", "STAREQUAL", "SLASHEQUAL",
"PERCENTEQUAL", "AMPEREQUAL", "VBAREQUAL", "CIRCUMFLEXEQUAL", "LEFTSHIFTEQUAL",
"RIGHTSHIFTEQUAL", "DOUBLESTAREQUAL", "DOUBLESLASHEQUAL", "OR", "AND",
"LESS", "GREATER", "EQUAL", "GREATEREQUAL", "LESSEQUAL", "ALT_NOTEQUAL",
"NOTEQUAL", "IN", "IS", "VBAR", "CIRCUMFLEX", "AMPER", "LEFTSHIFT",
"PLUS", "MINUS", "STAR", "SLASH", "PERCENT", "DOUBLESLASH", "TILDE",
"DOUBLESTAR", "LBRACK", "RBRACK", "LCURLY", "RCURLY", "INT", "LONGINT",
"FLOAT", "COMPLEX", "SEMI", "DOLLAR", "BACKQUOTE", "GLOBAL", "LETTER",
"DIGIT", "EXPONENT", "TRIAPOS", "TRIQUOTE", "ESC", "CONTINUED_LINE",
"WS", "LEADING_WS", "COMMENT"
]
class PolicyParser(Parser):
grammarFileName = "/Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g"
antlr_version = version_str_to_tuple("3.1.3 Mar 17, 2009 19:23:44")
antlr_version_str = "3.1.3 Mar 17, 2009 19:23:44"
tokenNames = tokenNames
def __init__(self, input, state=None, *args, **kwargs):
if state is None:
state = RecognizerSharedState()
super(PolicyParser, self).__init__(input, state, *args, **kwargs)
self.dfa34 = self.DFA34(
self, 34,
eot = self.DFA34_eot,
eof = self.DFA34_eof,
min = self.DFA34_min,
max = self.DFA34_max,
accept = self.DFA34_accept,
special = self.DFA34_special,
transition = self.DFA34_transition
)
self.dfa54 = self.DFA54(
self, 54,
eot = self.DFA54_eot,
eof = self.DFA54_eof,
min = self.DFA54_min,
max = self.DFA54_max,
accept = self.DFA54_accept,
special = self.DFA54_special,
transition = self.DFA54_transition
)
self.dfa59 = self.DFA59(
self, 59,
eot = self.DFA59_eot,
eof = self.DFA59_eof,
min = self.DFA59_min,
max = self.DFA59_max,
accept = self.DFA59_accept,
special = self.DFA59_special,
transition = self.DFA59_transition
)
self.dfa61 = self.DFA61(
se | lf, 61,
eot = self.DFA61_eot,
eof = self.DFA61_eof,
min = self.DFA61_min,
max = s | elf.DFA61_max,
accept = self.DFA61_accept,
special = self.DFA61_special,
transition = self.DFA61_transition
)
# $ANTLR start "file"
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:61:1: file returns [object] : ( ( NEWLINE | statement )+ | EOF );
def file(self, ):
object = None
statement1 = None
object = File()
try:
try:
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:63:3: ( ( NEWLINE | statement )+ | EOF )
alt2 = 2
LA2_0 = self.input.LA(1)
if ((NEWLINE <= LA2_0 <= LPAREN) or LA2_0 == NAME or LA2_0 == RULE or LA2_0 == STRING or LA2_0 == NOT or LA2_0 == OBJECTBINDING or (PLUS <= LA2_0 <= MINUS) or LA2_0 == TILDE or LA2_0 == LBRACK or LA2_0 == LCURLY or (INT <= LA2_0 <= COMPLEX)) :
alt2 = 1
elif (LA2_0 == EOF) :
alt2 = 2
else:
nvae = NoViableAltException("", 2, 0, self.input)
raise nvae
if alt2 == 1:
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:63:5: ( NEWLINE | statement )+
pass
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:63:5: ( NEWLINE | statement )+
cnt1 = 0
while True: #loop1
alt1 = 3
LA1_0 = self.input.LA(1)
if (LA1_0 == NEWLINE) :
alt1 = 1
elif ((IMPORT <= LA1_0 <= LPAREN) or LA1_0 == NAME or LA1_0 == RULE or LA1_0 == STRING or LA1_0 == NOT or LA1_0 == OBJECTBINDING or (PLUS <= LA1_0 <= MINUS) or LA1_0 == TILDE or LA1_0 == LBRACK or LA1_0 == LCURLY or (INT <= LA1_0 <= COMPLEX)) :
alt1 = 2
if alt1 == 1:
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:63:7: NEWLINE
pass
self.match(self.input, NEWLINE, self.FOLLOW_NEWLINE_in_file78)
elif alt1 == 2:
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:63:17: statement
pass
self._state.following.append(self.FOLLOW_statement_in_file82)
statement1 = self.statement()
self._state.following.pop()
#action start
object.append_child( statement1 )
#action end
else:
if cnt1 >= 1:
break #loop1
eee = EarlyExitException(1, self.input)
raise eee
cnt1 += 1
elif alt2 == 2:
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:64:5: EOF
pass
self.match(self.input, EOF, self.FOLLOW_EOF_in_file93)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return object
# $ANTLR end "file"
# $ANTLR start "statement"
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:67:1: statement returns [object] : ( importStmt | attributeStmt | ruleStmt );
def statement(self, ):
object = None
importStmt2 = None
attributeStmt3 = None
ruleStmt4 = None
try:
try:
# /Users/walsh/Development/workspace/Intellect/intellect/grammar/Policy.g:68:3: ( importStmt | attributeStmt | ruleStmt )
alt3 = 3
LA3 = self.input |
feredean/cs313 | notes/test_recursivity-nope.py | Python | mit | 976 | 0.052254 |
def find_words(letters):
""" find_words from scrabble transformation to
recursive """
results = set()
for a in letters:
if a in WORDS: results.add(a)
if a not in PREFIXES: continue
for b in removed(letters, a):
w = a + b
if w in WORDS: results.add(w)
if w not in PREFIXES: continue
for c in removed(letters, w | ):
w = a + b + c
if w i | n WORDS: results.add(w)
if w not in PREFIXES: continue
for d in removed(letters, w)
w = a + b + c + d
if w in WORDS: results.add(w)
if w not in PREFIXES: continue
for e in removed(letters, w)
w = a + b + c + d + e
if w in WORDS: results.add(w)
if w not in PREFIXES: continue
for f in removed(letters, w)
w = a + b + c + d + e + f
if w in WORDS: results.add(w)
if w not in PREFIXES: continue
for g in removed(letters, w)
w = a + b + c + d + e + g
if w in WORDS: results.add(w)
if not in PREFIXES: contiune |
ytyng/django-elasticindex | elasticindex/managers.py | Python | bsd-3-clause | 9,274 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import logging
import time
from collections import OrderedDict
from contextlib import contextmanager
import six
from django.u | tils.functional import cached_property
from .client import get_es_client
logger = logging.getLogger('elasticindex')
class ElasticQuerySet(object):
def __init__(self, model_cls, body=None, **kwargs):
self.model_cls = model_cls
self.body = body or {"query": { | "match_all": {}}}
self.kwargs = kwargs or {}
self.latest_total_count = None
self.latest_raw_result = None
self.query_finished = False
def __len__(self):
return len(self.result_list)
def __iter__(self):
return iter(self.result_list)
def __bool__(self):
return bool(self.result_list)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self.query_finished:
return self.result_list[k]
if isinstance(k, slice):
qs = self
offset = 0
if k.start is not None:
offset = int(k.start)
qs = qs.offset(offset)
if k.stop is not None:
limit = int(k.stop) - offset
qs = qs.limit(limit)
return list(qs)[::k.step] if k.step else qs
qs = self.limit(1).offset(k)
return list(qs)[0]
def _clone(self):
"""
:rtype: ElasticQuerySet
"""
qs = self.__class__(
self.model_cls, copy.deepcopy(self.body),
**copy.deepcopy(self.kwargs))
return qs
@cached_property
def result_list(self):
self.query_finished = True
return list(self.get_result())
def get_result(self):
"""
elasticsearch の search をそのまま実行
:rtype: generator
"""
with self.log_query():
result = self.es_client.search(
index=self.model_cls.INDEX,
doc_type=self.model_cls.DOC_TYPE,
body=self.body, **self.kwargs)
self.latest_total_count = result['hits']['total']
self.latest_raw_result = result
for hit in result['hits']['hits']:
yield self.model_cls(hit)
@cached_property
def es_client(self):
"""
:rtype: Elasticsearch
"""
return get_es_client()
def get_by_id(self, id):
"""
Elasticsearch のIDで1件取得
:param id:
:return:
"""
result = self.es_client.get(
self.model_cls.INDEX, id, doc_type=self.model_cls.DOC_TYPE)
self.latest_raw_result = result
if not result['found']:
raise self.model_cls.DoesNotExist(id)
return self.model_cls(result)
def delete_by_id(self, id, **kwargs):
"""
Elasticsearch のIDで1件削除
:param id: elasticsearch document id
"""
result = self.es_client.delete(
self.model_cls.INDEX, self.model_cls.DOC_TYPE, id, **kwargs)
self.latest_raw_result = result
return result
def all(self):
"""
:rtype: ElasticQuerySet
"""
return self._clone()
def limit(self, limit):
"""
:rtype: ElasticQuerySet
"""
o = self._clone()
if limit is None:
if 'size' in o.body:
del o.body['size']
else:
o.body['size'] = limit
return o
def offset(self, offset):
"""
:rtype: ElasticQuerySet
"""
o = self._clone()
if offset is None:
if 'from' in o.body:
del o.body['from']
else:
o.body['from'] = offset
return o
def query(self, filter_query_dict):
"""
:param filter_query_dict:
- {"match": {"product_id": 192}}
- {"match_all": {}} # default
- {"multi_match": {
"query": query_word,
"fields": [
"upc", "title^3", "description", "authors",
"publishers", "tags", "keywords"]
}}
- {"bool": {
"must": [
{"match": {"is_used": True}},
{"range": {"stock": {"gt": 0}}}
]}}
:rtype: ElasticQuerySet
"""
o = self._clone()
o.body['query'] = filter_query_dict
return o
def set_body(self, body_dict):
"""
replace query body
"""
o = self._clone()
o.body = body_dict
return o
def get(self, filter_query_dict):
"""
1件取得
複数件あってもエラーは出さず、黙って1件だけ返す
"""
qs = self.query(filter_query_dict).limit(1)
if not qs:
raise self.model_cls.DoesNotExist(filter_query_dict)
return qs[0]
def count(self):
"""
件数取得
"""
if self.query_finished:
return len(self.result_list)
body = self.body.copy()
if 'sort' in body:
del body['sort']
with self.log_query(label='count', body=body):
result = self.es_client.count(
index=self.model_cls.INDEX,
doc_type=self.model_cls.DOC_TYPE,
body=body, **self.kwargs
)
self.latest_raw_result = result
return result['count']
def order_by(self, order_query_list):
"""
sort パラメータをつける
:type order_query_list: list, dict, string
- "mz_score"
- {"mz_score": "desc"}
"""
o = self._clone()
o.body['sort'] = order_query_list
return o
@property
def log_query(self):
"""
クエリをロギングするコンテクストマネージャ
elasticsearch や elasticsearch.trace のロガーを
DEBUG レベルで設定するともっと詳しく出る (結果が全部出る)
"""
@contextmanager
def _context(label='', body=None):
start_time = time.time()
yield
elapsed_time = time.time() - start_time
logger.debug('{}time:{}ms, body:{}'.format(
'{}: '.format(label) if label else '',
int(elapsed_time * 100), body or self.body))
return _context
def bulk(self, body):
return self.es_client.bulk(
body, index=self.model_cls.INDEX,
doc_type=self.model_cls.DOC_TYPE)
class ElasticDocumentManager(object):
"""
class ElasticDocumentManager(ElasticQuerySet)
でもいいんだけど、インスタンス変数が汚れる可能性があるので
クラスプロパティっぽい感じで、アクセスされるたびに新しいクエリセットを作ることにした
"""
def __init__(self, model_cls, body=None, **kwargs):
self.model_cls = model_cls
self.kwargs = kwargs
def __get__(self, cls, owner):
return ElasticQuerySet(self.model_cls)
class ElasticIndexManager(object):
def __init__(self, model_cls):
self.model_cls = model_cls
@cached_property
def mappings_properties(self):
return OrderedDict(
[
(f_name, f.mapping)
for f_name, f
in self.model_cls._cached_fields().items()
])
@cached_property
def mappings(self):
"""
インデックスの mappings の指定にそのまま使える dict
"""
return {
self.model_cls.DOC_TYPE: {
"properties": self.mappings_properties
}
}
def delete(self):
"""
インデックスを削除
:return:
"""
es = get_es_client()
es.indices.delete(self.model_cls.INDEX, ignore=[404, ])
@cached_property
def create_body_params(self):
body = {"mappings": self.mappings}
index_setting = getattr(self.model_cls, 'INDEX_SETTINGS', None)
if index_setting:
body["settings"] = index_setting
|
hagabbar/pycbc_copy | pycbc/waveform/pycbc_phenomC_tmplt.py | Python | gpl-3.0 | 14,998 | 0.008668 | # Copyright (C) 2012 Prayush Kumar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import lal
import numpy
from numpy import sqrt, log, float128
from pycuda.elementwise import ElementwiseKernel
from pycbc.libutils import pkg_config_header_strings
from pycbc.types import FrequencySeries, zeros, Array, complex64
preamble = """
#include <lal/LALConstants.h>
"""
phenomC_text = """
/* ********* Main paper : Phys Rev D82, 064016 (2010) ********* */
const double f = (double) (i + kmin ) * delta_f;
const double fd = (double) m_sec * f;
const double v = (double) cbrt(piM*f);
const double v2 = v * v;
const double v3 = v * v * v;
const double v4 = v2 * v2;
const double v5 = v2 * v3;
const double v6 = v3 * v3;
const double v7 = v3 * v4;
const double w = (double) cbrt( m_sec * f );
const double w3 = (double) w * w * w;
/* ******************************************************* */
/* *********************** Phasing *********************** */
/* This is defined in Eq 5.1 - 5.9, 3.13 of the main paper */
/* ******************************************************* */
double phSPA = 1. + pfa2 * v2 + pfa3 * v3 + pfa4 * v4 +
(1. + log(v3)) * pfa5 * v5 + (pfa6 + pfa6log * log(v3))*v6 +
pfa7 * v7;
phSPA *= (pfaN / v5);
phSPA -= (LAL_PI/4.0);
double phPM = (a1/(w3 * w * w)) + (a2/w3) + (a3/w) + a4 + (a5 * w * w) +(a6 * w3);
phPM /= eta;
double phRD = b1 + b2*fd;
double wPlusf1 = 0.5*(1. + tanh( (4*(fd - Mf1)/d1) ));
double wMinusf1 = 0.5*(1. - tanh( (4*(fd - Mf1)/d1) ));
double wPlusf2 = 0.5*(1. + tanh( (4*(fd - Mf2)/d2) ));
double wMinusf2 = 0.5*(1. - tanh( (4*(fd - Mf2)/d2) ));
double phasing = (phSPA * ((double) wMinusf1)) + (phPM * ((double) wPlusf1 * wMinusf2)) +
(phRD * ((double) wPlusf2));
/* ******************************************************* */
/* ********************** Amplitude **************** */
/* *** This is defined in Eq 5.11 - 5.13, 3.10, 3.6 ****** */
/* ******************************************************* */
double xdot = 1. + xdota2 * v2 + xdota3 * v3 + xdota4 * v4 + xdot | a5 * v5 +
(xdota6 + xdota6log * log(v2)) * v6 + xdota7 * v7;
xdo | t *= (xdotaN * v5 * v5);
double omgdot = 0.0, ampfac = 0.0;
double ampSPA = 0.0, ampSPAre = 0.0, ampSPAim = 0.0;
/* If xdot becomes negative, take ampSPA = 0.0 */
/* This is valid because it becomes negative much after ISCO */
if( xdot > 0.0 )
{
omgdot = 1.5 * v * xdot;
ampfac = sqrt( LAL_PI / omgdot );
ampSPAre = ampfac * AN * v2 * (1. + A2 * v2 + A3 * v3 + A4 * v4 +
A5 * v5 + (A6 + A6log * log(v2)) * v6);
ampSPAim = ampfac * AN * v2 * (A5imag * v5 + A6imag * v6);
ampSPA = sqrt( ampSPAre * ampSPAre + ampSPAim * ampSPAim );
}
double ampPM = ampSPA + (g1 * pow(fd, 5./6.));
const double sig = Mfrd * del2 / Q;
double sig2 = sig * sig;
double L = sig2 / ((fd - Mfrd) * (fd - Mfrd) + sig2/4.);
double ampRD = del1 * L * pow( fd, -7./6.);
double wPlusf0 = 0.5*(1. + tanh( (4*(fd - Mf0)/d0) ));
double wMinusf0 = 0.5*(1. - tanh( (4*(fd - Mf0)/d0) ));
double amplitude = (ampPM * ((double) wMinusf0)) + (ampRD * ((double) wPlusf0));
amplitude /= distance;
/* ************** htilde **************** */
htilde[i]._M_re = amplitude * cos( phasing );
htilde[i]._M_im = -1.0 * amplitude * sin( phasing );
"""
phenomC_kernel = ElementwiseKernel("""pycuda::complex<double> *htilde, int kmin, double delta_f,
double eta, double Xi, double distance,
double m_sec, double piM, double Mfrd,
double pfaN, double pfa2, double pfa3, double pfa4,
double pfa5, double pfa6, double pfa6log, double pfa7,
double a1, double a2, double a3, double a4,
double a5, double a6, double b1, double b2,
double Mf1, double Mf2, double Mf0,
double d1, double d2, double d0,
double xdota2, double xdota3, double xdota4,
double xdota5, double xdota6, double xdota6log,
double xdota7, double xdotaN, double AN,
double A2, double A3, double A4, double A5,
double A5imag, double A6, double A6log, double A6imag,
double g1, double del1, double del2, double Q""",
phenomC_text, "phenomC_kernel",
preamble=preamble, options=pkg_config_header_strings(['lal']))
def FinalSpin( Xi, eta ):
"""Computes the spin of the final BH that gets formed after merger. This is done usingn Eq 5-6 of arXiv:0710.3345"""
s4 = -0.129
s5 = -0.384
t0 = -2.686
t2 = -3.454
t3 = 2.353
etaXi = eta * Xi
eta2 = eta*eta
finspin = (Xi + s4*Xi*etaXi + s5*etaXi*eta + t0*etaXi + 2.*(3.**0.5)*eta + t2*eta2 + t3*eta2*eta)
if finspin > 1.0:
raise ValueError("Value of final spin > 1.0. Aborting")
else:
return finspin
def fRD( a, M):
"""Calculate the ring-down frequency for the final Kerr BH. Using Eq. 5.5 of Main paper"""
f = (lal.C_SI**3.0 / (2.0*lal.PI*lal.G_SI*M*lal.MSUN_SI)) * (1.5251 - 1.1568*(1.0-a)**0.1292)
return f
def Qa( a ):
"""Calculate the quality factor of ring-down, using Eq 5.6 of Main paper"""
return (0.7 + 1.4187*(1.0-a)**-0.4990)
#Functions to calculate the Tanh window, defined in Eq 5.8 of the main paper
def imrphenomc_tmplt(**kwds):
""" Return an IMRPhenomC waveform using CUDA to generate the phase and amplitude
Main Paper: arXiv:1005.3306
"""
# Pull out the input arguments
f_min = float128(kwds['f_lower'])
f_max = float128(kwds['f_final'])
delta_f = float128(kwds['delta_f'])
distance = float128(kwds['distance'])
mass1 = float128(kwds['mass1'])
mass2 = float128(kwds['mass2'])
spin1z = float128(kwds['spin1z'])
spin2z = float128(kwds['spin2z'])
if 'out' in kwds:
out = kwds['out']
else:
out = None
# Calculate binary parameters
M = mass1 + mass2
eta = mass1 * mass2 / (M * M)
Xi = (mass1 * spin1z / M) + (mass2 * spin2z / M)
Xisum = 2.*Xi
Xiprod = Xi*Xi
Xi2 = Xi*Xi
m_sec = M * lal.MTSUN_SI;
piM = lal.PI * m_sec;
## The units of distance given as input is taken to pe Mpc. Converting to SI
distance *= (1.0e6 * lal.PC_SI / (2. * sqrt(5. / (64.*lal.PI)) * M * lal.MRSUN_SI * M * lal.MTSUN_SI))
# Check if the value of f_max is correctly given, else replace with the fCut
# used in the PhenomB code in lalsimulation. The various coefficients come
# from Eq.(4.18) of http://arxiv.org/pdf/0710.2335 and
# Table I of http://arxiv.org/pdf/0712.0343
if not f_max:
f_max = (1.7086 * eta * eta - 0.26592 * eta + 0.28236) / piM
# Transform the eta, chi to Lambda parameters, using Eq 5.14, Table II of Main
# paper.
z101 = -2.417e-03
z102 = -1.093e-03
z111 = -1.917e-02
z110 = 7.267e-02
z120 = -2.504e-01
z201 = 5.962e-01
z202 = -5.600e-02
z211 = 1.520e-01
z210 = -2.970e+00
z220 = 1.3 |
david-caro/jenkins-job-builder | jenkins_jobs/modules/triggers.py | Python | apache-2.0 | 68,792 | 0 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Triggers define what causes a Jenkins job to start building.
**Component**: triggers
:Macro: trigger
:Entry Point: jenkins_jobs.triggers
Example::
job:
name: test_job
triggers:
- timed: '@daily'
"""
from collections import OrderedDict
import logging
import re
import xml.etree.ElementTree as XML
import six
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
logger = logging.getLogger(str(__name__))
def gerrit_handle_legacy_configuration(data):
hyphenizer = re.compile("[A-Z]")
def hyphenize(attr):
"""Convert strings like triggerOn to trigger-on.
"""
return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(),
attr)
def convert_dict(d, old_keys):
for old_key in old_keys:
if old_key in d:
new_key = hyphenize(old_key)
logger.warn("'%s' is deprecated and will be removed after "
"1.0.0, please use '%s' instead", old_key, new_key)
d[new_key] = d[old_key]
del d[old_key]
convert_dict(data, [
'triggerOnPatchsetUploadedEvent',
'triggerOnChangeAbandonedEvent',
'triggerOnChangeMergedEvent',
'triggerOnChangeRestoredEvent',
'triggerOnCommentAddedEvent',
'triggerOnDraftPublishedEvent',
'triggerOnRefUpdatedEvent',
'triggerApprovalCategory',
'triggerApprovalValue',
'overrideVotes',
'gerritBuildSuccessfulVerifiedValue',
'gerritBuildFailedVerifiedValue',
'failureMessage',
'skipVote',
])
|
for project in data['projects']:
convert_dict(project, [
'projectCompareType',
'projectPattern',
'branchCompareType',
'branchPattern',
])
old_format_events = OrderedDict(
| (key, should_register) for key, should_register in six.iteritems(data)
if key.startswith('trigger-on-'))
trigger_on = data.setdefault('trigger-on', [])
if old_format_events:
logger.warn("The events: %s; which you used is/are deprecated. "
"Please use 'trigger-on' instead.",
', '.join(old_format_events))
if old_format_events and trigger_on:
raise JenkinsJobsException(
'Both, the new format (trigger-on) and old format (trigger-on-*) '
'gerrit events format found. Please use either the new or the old '
'format of trigger events definition.')
trigger_on.extend(event_name[len('trigger-on-'):]
for event_name, should_register
in six.iteritems(old_format_events) if should_register)
for idx, event in enumerate(trigger_on):
if event == 'comment-added-event':
trigger_on[idx] = events = OrderedDict()
events['comment-added-event'] = OrderedDict((
('approval-category', data['trigger-approval-category']),
('approval-value', data['trigger-approval-value'])
))
def build_gerrit_triggers(xml_parent, data):
available_simple_triggers = {
'change-abandoned-event': 'PluginChangeAbandonedEvent',
'change-merged-event': 'PluginChangeMergedEvent',
'change-restored-event': 'PluginChangeRestoredEvent',
'draft-published-event': 'PluginDraftPublishedEvent',
'patchset-uploaded-event': 'PluginPatchsetCreatedEvent',
'patchset-created-event': 'PluginPatchsetCreatedEvent',
'ref-updated-event': 'PluginRefUpdatedEvent',
}
tag_namespace = 'com.sonyericsson.hudson.plugins.gerrit.trigger.' \
'hudsontrigger.events'
trigger_on_events = XML.SubElement(xml_parent, 'triggerOnEvents')
for event in data.get('trigger-on', []):
if isinstance(event, six.string_types):
tag_name = available_simple_triggers.get(event)
if event == 'patchset-uploaded-event':
logger.warn("'%s' is deprecated. Use 'patchset-created-event' "
"format instead.", event)
if not tag_name:
known = ', '.join(available_simple_triggers.keys()
+ ['comment-added-event',
'comment-added-contains-event'])
msg = ("The event '%s' under 'trigger-on' is not one of the "
"known: %s.") % (event, known)
raise JenkinsJobsException(msg)
XML.SubElement(trigger_on_events,
'%s.%s' % (tag_namespace, tag_name))
else:
if 'patchset-created-event' in event.keys():
pce = event['patchset-created-event']
pc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginPatchsetCreatedEvent'))
XML.SubElement(pc, 'excludeDrafts').text = str(
pce.get('exclude-drafts', False)).lower()
XML.SubElement(pc, 'excludeTrivialRebase').text = str(
pce.get('exclude-trivial-rebase', False)).lower()
XML.SubElement(pc, 'excludeNoCodeChange').text = str(
pce.get('exclude-no-code-change', False)).lower()
if 'comment-added-event' in event.keys():
comment_added_event = event['comment-added-event']
cadded = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginCommentAddedEvent'))
XML.SubElement(cadded, 'verdictCategory').text = \
comment_added_event['approval-category']
XML.SubElement(
cadded,
'commentAddedTriggerApprovalValue').text = \
str(comment_added_event['approval-value'])
if 'comment-added-contains-event' in event.keys():
comment_added_event = event['comment-added-contains-event']
caddedc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace,
'PluginCommentAddedContainsEvent'))
XML.SubElement(caddedc, 'commentAddedCommentContains').text = \
comment_added_event['comment-contains-value']
def build_gerrit_skip_votes(xml_parent, data):
outcomes = [('successful', 'onSuccessful'),
('failed', 'onFailed'),
('unstable', 'onUnstable'),
('notbuilt', 'onNotBuilt')]
skip_vote_node = XML.SubElement(xml_parent, 'skipVote')
skip_vote = data.get('skip-vote', {})
for result_kind, tag_name in outcomes:
if skip_vote.get(result_kind, False):
XML.SubElement(skip_vote_node, tag_name).text = 'true'
else:
XML.SubElement(skip_vote_node, tag_name).text = 'false'
def gerrit(parser, xml_parent, data):
"""yaml: gerrit
Trigger on a Gerrit event.
Requires the Jenkins :jenkins-wiki:`Gerrit Trigger Plugin <Gerrit+Trigger>`
version >= 2.6.0.
:arg list trigger-on: Events to react on. Please use either the new
**trigger-on**, or the old **trigger-on-*** events definitions. You
cannot use both at once.
.. _trigger_on:
:Trigger on:
* **patchset-creat |
tectronics/admiral-jiscmrd | test/FileShare/tests/TestConfig.py | Python | mit | 984 | 0.022358 | # $Id: $
#
# Test configuration parameters
#
class TestConfig:
#hostname = "zoo-admiral-behav.zoo.ox.ac.uk"
#hostname = "zoo-admiral-silk.zoo.ox.ac.uk"
#hostname = "zoo-admiral-devel.zoo.ox.ac.uk"
hostname = "zoo-admiral-ibrg.zoo.ox.ac.uk"
#hostname = "zakynthos.zoo.ox.ac.uk"
cifssharename = "data"
cifsmountpoint = "mountadmira | l"
webdavmountpoint = "mountadmiralwebdav"
webdavbaseurl = "http://"+hostname+"/data/"
readmefile = "ADMIRAL.README"
readmetext = "This directory is the root of the ADMIRAL shared file system.\n"
userAname = "TestUser1"
userApass = "user1"
userBname = "TestUser2"
userBpass = "user2"
userDname = "TestUserD"
userDpas | s = "userd"
userRGleadername = "TestLeader"
userRGleaderpass = "leader"
collabname = "TestCollab"
collabpass = "collab"
# End.
|
Flavoured/CodinGamePuzzles | Easy/TheDescent/TheDescent.py | Python | unlicense | 455 | 0.002198 | import sys
import math
while True:
tallest_inde | x = -1
tallest_height = -1
for i in range(8):
mountain_h = int(input()) # represents the height of one mountain.
if(tallest_height != -1):
if(mountain_h > tallest_height):
tallest | _index = i
tallest_height = mountain_h
else:
tallest_index = i
tallest_height = mountain_h
print(tallest_index)
|
autotest/virt-test | virttest/libvirt_xml/vm_xml.py | Python | gpl-2.0 | 84,182 | 0.000154 | """
Module simplifying manipulation of XML described at
http://libvirt.org/formatdomain.html
"""
import logging
from autotest.client.shared import error
from virttest import xml_utils
from virttest.libvirt_xml import base, accessors, xcepts
from virttest.libvirt_xml.devices import librarian
class VMXMLDevices(list):
"""
List of device instances from classes handed out by librarian.get()
"""
@staticmethod
def __type_check__(other):
try:
# Raise error if object isn't dict-like or doesn't have key
device_tag = other['device_tag']
# Check that we have support for this type
librarian.get(device_tag)
except (AttributeError, TypeError, xcepts.LibvirtXMLError):
# Required to always raise TypeError for list API in VMXML class
raise TypeError("Unsupported item type: %s" % str(type(other)))
def __setitem__(self, key, value):
self.__type_check__(value)
super(VMXMLDevices, self).__setitem__(key, value)
return self
def append(self, value):
self.__type_check__(value)
super(VMXMLDevices, self).append(value)
return self
def extend(self, iterable):
# Make sure __type_check__ happens
for item in iterable:
self.append(item)
return self
def by_device_tag(self, tag):
result = VMXMLDevices()
for device in self:
if device.device_tag == tag:
result.append(device)
return result
class VMXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for VMXML class properties (items in __slots__)
Properties:
hypervisor_type: string, hypervisor type name
get: return domain's type attribute value
set: change domain type attribute value
del: raise xcepts.LibvirtXMLError
vm_name: string, name of the vm
get: return text value of name tag
set: set text value of name tag
del: raise xcepts.LibvirtXMLError
uuid: string, uuid string for vm
get: return text value of uuid tag
set: set text value for (new) uuid tag (unvalidated)
del: remove uuid tag
vcpu, max_mem, current_mem, iothreads: integers
get: returns integer
set: set integer
del: removes tag
dumpcore: string, control guest OS memory dump
get: return text value
set: set 'on' or 'off' for guest OS memory dump
del: removes tag
numa_memory: dictionary
get: return dictionary of numatune/memory attributes
set: set numatune/memory attributes from dictionary
del: remove numatune/memory tag
numa_memnode: list dict of memnode attributes cellid, mode and nodeset
get: return list of dictionary with numatune/memnode attributes
set: set multiple numatune/memnode attributes from dictionary list
del: remove numatune/memnode tag
on_poweroff: string, action to take when the guest requests a poweroff
get: returns text value of on_poweroff tag
set: set test of on_poweroff tag
del: remove on_poweroff tag
on_reboot: string, action to take when the guest requests a reboot
get: returns text value of on_reboot tag
set: set test of on_reboot tag
del: remove on_reboot tag
on_crash: string, action to take when the guest crashes
get: returns text value of on_crash tag
set: set test of on_crash tag
del: remove on_crash tag
devices: VMXMLDevices (list-like)
get: returns VMXMLDevices instance for all devices
set: Define all devices from VMXMLDevices instance
del: remove all devices
cputune: VMCPUTuneXML
get: return VMCPUTuneXML instance for the domain.
set: Define cputune tag from a VMCPUTuneXML instance.
del: remove cputune tag
cpu: VMCPUXML
get: return VMCPUXML instance for the domain.
set: Define cpu tag from a VMCPUXML instance.
del: remove cpu tag
current_vcpu: string, 'current' attribute of vcpu tag
get: return a string for 'current' attribute of vcpu
set: change 'current' attribute of vcpu
del: remove 'current' attribute of vcpu
placement: string, 'placement' attribute of vcpu tag
get: return a string for 'placement' attribute of vcpu
set: change 'placement' attribute of vcpu
del: remove 'placement' attribute of vcpu
cpuset: string, 'cpuset' attribute of vcpu tag
get: return a string for 'cpuset' attribute of vcpu
set: change 'cpuset' attribute of vcpu
del: remove 'cpuset' attribute of vcpu
emulatorpin: string, cpuset value (see man virsh: cpulist)
get: return text value of cputune/emulatorpin attributes
set: set cputune/emulatorpin attributes from string
del: remove cputune/emulatorpin tag
features: VMFeaturesXML
get: return VMFeaturesXML instances for the domain.
set: define features tag from a VMFeaturesXML instances.
del: remove features tag
mem_backing: VMMemBackingXML
get: return VMMemBackingXML instances for the domain.
set: define memoryBacking tag from a VMMemBackingXML instances.
del: remove memoryBacking tag
max_mem_unit: string, 'unit' attribute of memory
get: return text value of memory unit attribute
set: set memory unit attribute
del: remove memory unit attribute
current_mem_unit: string, 'unit' attribute of memory
get: return text value of current_memory unit attribute
set: set current_memory unit attribute
del: remove current_memory unit attribute
memtune: VMMemTuneXML
get: return VMMemTuneXML instance for the domain.
set: Define memtune tag from a VMCPUTuneXML instance.
del: remove memtune tag
"""
# Additional names of attributes and dictionary-keys instances may contain
__slots__ = ('hypervisor_type', 'vm_name', 'uuid', 'vcpu', 'max_mem',
'current_mem', 'dumpcore', 'numa_memory', 'numa_memnode',
'devices', 'seclabel', 'cputune', 'placement', 'cpuset',
'current_vcpu', 'os', 'cpu', 'pm', 'on_poweroff', 'on_reboot',
'on_crash', 'features', 'mb', 'max_mem_unit',
'current_mem_unit', 'memtune', 'max_mem_rt', 'max_mem_rt_unit',
'max_mem_rt_slots', 'iothreads')
__uncompareable__ = base.LibvirtXMLBase.__uncompareable__
__schema_name__ = "domain"
def __init__(self, virsh_instance=base.virsh):
accessors.XMLAttribute(property_name="hypervisor_type",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='domain',
attribute='type')
accessors.XMLElementText(property_name | ="vm_name",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='name')
accessors.XMLElementText(property_name="uuid",
libvirtxml=self,
| forbidden=None,
parent_xpath='/',
tag_name='uuid')
accessors.XMLElementInt(property_name="iothreads",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='iothreads')
accessors.XMLElementInt(property_name="vcpu",
libvirtxml=self,
|
xavi783/u-tad | Modulo4/ejercicio3/reducer.py | Python | gpl-3.0 | 915 | 0.020765 | #! /usr/bin/python
import sys
sys.path.append('../')
from toolbox.hreaders import token_readers as reader
from toolbox.hreducers import list_reducer as reducer
SOLO_FACTURA = False
def reduction(x, | y):
v1 = x.split(',')
v2 = y.split(',')
r = x if int(v1[1])>=int(v2[1]) else y
return r
_reader = reader.Token_reader("\t",1)
_reducer = reducer.List_reducer(reduction) #x: previous reduction result, y: next element
if SOLO_FACTURA:
for line in sys.stdin:
key, value = _reader.read_all(line)
K,V = _reducer.reduce(key,value)
if K:
print '{}\t{}'.format(V.split(',')[0],V.split(',')[1])
V = _reducer.out.split(',')
print '{}\t{}'.format(V[0],V | [1])
else:
for line in sys.stdin:
key, value = _reader.read_all(line)
K,V = _reducer.reduce(key,value)
if K:
print '{}\t{}'.format(K,V)
print '{}\t{}'.format(key,V) |
JelteF/Flask-Migrate | tests/app.py | Python | mit | 518 | 0.001931 | #!/bin/env python
from flask import Flask
fr | om flask_sqlalchemy import SQLAlchemy
from fla | sk_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
if __name__ == '__main__':
manager.run()
|
OneTraTown/douban_group | doubangroup/spiders/rotate_useragent.py | Python | gpl-3.0 | 2,788 | 0.001435 | from scrapy.utils.log import configure_logging
import random
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
class RotateUserAgentMiddleware(UserAgentMiddleware):
def __init__(self,user_agent=''):
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.ua_useragent)
if ua:
configure_logging({'INFO':'Current_useragent'+ua})
request.headers.setdefault('User-Agent',ua)
ua_useragent = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
"(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
"(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
"(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
"(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/53 | 6.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
| "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
"(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
"(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
|
AdamantLife/alcustoms | alcustoms/sql/tests/sqltest__depricated__.py | Python | gpl-3.0 | 8,816 | 0.010776 |
#########################################################################
""" SQL TOOLS """
#########################################################################
"""
SQL Tools
Tools provided for parsing sql statements using regex. For each function, a precompiled re object using
re.VERBOSE and re.IGNORECASE are provided; the python-compliant regex string used to compile the re object
is available using the suffix "_regex". e.g.- CreateTable is the precompiled re object for parsing the major
components of a CREATE TABLE command; the SQL string used to compile CreateTable can be referenced via
CreateTable_regex.
"""
SchemaName_regex = """
(?P<schema>(?P<schemaname>.+)\.)? ## schemaname"."
(?P<tablename>[^\(]+)\s* ## tablename
"""
SchemaName = re.compile(SchemaName_regex, re.VERBOSE | re.IGNORECASE)
SelectStatement_regex = """(?P<asselect>AS\s+\((?P<select>[\s\S]*)\))"""
SelectStatement = re.compile(SelectStatement_regex,re.IGNORECASE)
CreateTable_regex = f"""
CREATE\s+ ## "Create"
(?P<temptag>TEMP\s+|TEMPORARY\s+)? ## Temporary Tag (TEMP or TEMPORARY
(TABLE|VIEW)\s+ ## "Table" or "View"
(?P<ifnotexisttag>IF\s+NOT\s+EXISTS\s+)? ## "IF NOT EXISTS"
{SchemaName_regex} ## "schema.?name"
(
{SelectStatement_regex} ## "AS ("expression")"
| ## OR
(?P<columns>\((?P<columndefs>[\s\S]*)\)\s*) ## "("definitions")"
(?P<norowid>WITHOUT\s+ROWID)? ## "WITHOUT ROWID"
);? ## End Statement semicolon?
"""
CreateTable = re.compile(CreateTable_regex,re.VERBOSE | re.IGNORECASE)
ColumnDefinition_regex = """
(?P<column_definition> ## Column Definition
(?P<columnname> ## Ways to define column name
\".+\" ## Any characters within quotes
| \w+ ## As a continuous string of letters
)\s*
(?P<column_type> ## Column Type
(?: ## Negative Look-Ahead Container
(?! ## NL-A Regex
PRIMARY\ KEY ## Stop at Constraint Keywords
| UNIQUE
| CHECK\W
| FOREIGN\ KEY
| REFERENCES
| NOT\ NULL
)
\S)* ## NL-A Cont. captures One Character at a Time
)\s*
(?P<column_constraint> ## Column Constraints, if any
(?: ## Viable Column Constraints
PRIMARY\ KEY
| NOT\ NULL
| UNIQUE
| CHECK
| DEFAULT
| REFERENCES
| COLLATE
)
.*?
)?\s* | ## Capture All Data (Handling it separately)
(?:,|$) ## Declaration ends with comma or end-of-string
)
"""
ColumnDefinition = re.compile(ColumnDefinition_regex, re.VERBOSE | re.IGNORECASE)
TableDefinition_regex = f"""
(?P<multiline_comment>/\*\s*(?P<mc | _text>[\s\S]*?)\s*\*/) ## /* Multiline Comment */
| (?P<comment>--(?P<commenttext>.*)) ## -- Regular Comment
| (?P<table_constraint> ## Table Constraints
(?P<tablecon_type> ## Viable Table Constraints
PRIMARY\ KEY
| UNIQUE
| CHECK
| FOREIGN\ KEY
)\W
(?: ## Beginning Capturing
(?! ## Negative Lookahead (Stop if you match the following)
$ ## End of string
| UNIQUE\W ## Table Constraint Keywords
| CHECK\W
| FOREIGN\ KEY\W
)
.)+ ## Capture one character at a time
)
| {ColumnDefinition_regex}
"""
TableDefinition = re.compile(TableDefinition_regex, re.VERBOSE | re.IGNORECASE)
ConstraintDefinition_regex = """
(?P<constraint_type> ## Match Constraint Type From List
PRIMARY\ KEY
| NOT\ NULL
| UNIQUE
| CHECK
| DEFAULT
| REFERENCES
| COLLATE
| FOREIGN\ KEY
)\s*
(?P<cc_info> ## Get Additional Info
(
(?! ## Match info up until next constraint Keyword
PRIMARY\ KEY
| NOT\ NULL
| UNIQUE
| CHECK
| DEFAULT
| REFERENCES
| COLLATE
| FOREIGN\ KEY
| ON\ CONFLICT) ## Be sure to skip ON CONFLICT
.)*
)?\s*
(?P<on_conflict>ON\ CONFLICT\ ## Check for ON CONFLICT clause
(?P<conflict_clause> ## Identify Type
ROLLBACK
| ABORT
| FAIL
| IGNORE
| REPLACE
)
)?
"""
ConstraintDefinition = re.compile(ConstraintDefinition_regex, re.VERBOSE | re.IGNORECASE)
ClauseColumnIdentifier_regex = """
(?P<multiple>\((?P<columns>.*)\))
|
(?P<single>.+)
"""
ClauseColumnIdentifier = re.compile(ClauseColumnIdentifier_regex, re.VERBOSE | re.IGNORECASE)
def _parse_definition(self):
""" Parses the Table's definition as part of the instantiation of the class """
r = self._regex_result = CreateTable.search(self.definition)
if not self._regex_result:
raise AttributeError("Invalid Table Definition")
if r.group("schema"):
self._schema = r.group("schemaname")
self._name = r.group("tablename").strip()
if r.group("temptag"):
self._istemporary = True
if r.group("ifnotexisttag"):
self._ifnotexists = True
if r.group("norowid"):
self._norowid = True
if r.group("columns"):
columndefs = r.group("columndefs")
## Iterate over each matched element
for line in TableDefinition.finditer(columndefs):
## Table Constraints
if line.group("table_constraint"):
self._tableconstraints.append(TableConstraint.parse(line.group("table_constraint"))[0])
## Column Definitions
elif line.group("column_definition"):
column = Column.parse_regex(line)
column.table = self
self._columns[str(column.name)] = column
## We're just dropping comments for right now. If anything else: raise error
elif not line.group("multiline_comment") and not line.group("comment"):
raise ValueError(f"Could not parse line: {line.group(0)}")
for constraint in self.tableconstraints:
for column in constraint.columns:
if column in self.columns:
idx = self.columns.index(column)
self.columns[idx].tableconstraints.append(constraint)
elif r.group("asselect"):
raise RuntimeError("TODO")
else:
raise ValueError("Could Not Determine Table Type")
#########################################################################
""" Depricated SQLTests """
#########################################################################
""" <<GROUP INDICES>>
0 |id INT PRIMARY KEY,
1 2 |value FLOAT, --This is a comment
3 |quantity NOT NULL,
4 5 |name TEXT UNIQUE, "many-on-one-line" Text,
6 |thecheck REAL CHECK(thecheck is not null),
7 |complexcheck NUMERIC CHECK (thecheck IS (id > 10)),
8 |thedefault DEFAULT +1,
9 |anotherdefault DEFAULT foobar,
10 |scientificdefault DEFAULT 123.321e+987,
11 |quoteddefault DEFAULT " |
dlebauer/plantcv | lib/plantcv/logical_xor.py | Python | gpl-2.0 | 460 | 0.023913 | ### Join images (XOR)
import cv2
from . import print_image
def logical_xor(img1, img2, device, debug=False):
# Join two images using the bitwise XOR operator
# img1, img2 = ima | ge objects, grayscale
# device = device number. Used to count steps in the pipeline
# debug = True/False. If True, print image
device += 1
merged = cv2.bitwise_xor(img1, img2)
if debug:
print_image(merged, (str(de | vice) + '_xor_joined.png'))
return device, merged |
ctrlaltdel/neutrinator | vendor/urllib3/util/ssl_.py | Python | gpl-3.0 | 13,786 | 0.000435 | from __future__ import absolute_import
import errno
import warnings
import hmac
import re
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
from ..packages import six
from ..packages.rfc3986 import abnf_regexp
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
IS_SECURETRANSPORT = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
# Borrow rfc3986's regular expressions for IPv4
# and IPv6 addresses for use in is_ipaddress()
_IP_ADDRESS_REGEX = re.compile(
r'^(?:%s|%s|%s)$' % (
abnf_regexp.IPv4_RE,
abnf_regexp.IPv6_RE,
abnf_regexp.IPv6_ADDRZ_RFC4007_RE
)
)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_REQUIRED
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try: # Platform-specific: Python 3.6
from ssl import PROTOCOL_TLS
PROTOCOL_SSLv23 = PROTOCOL_TLS
except ImportError:
try:
from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
PROTOCOL_SSLv23 = PROTOCOL_TLS
except ImportError:
PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs, DSS, and other
# insecure ciphers for security reasons.
# - NOTE: TLS 1.3 cipher suites are managed through a different interface
# not exposed by CPython (yet!) and are enabled by default if they're available.
DEFAULT_CIPHERS = ':'.join([
'ECDHE+AESGCM',
'ECDHE+CHACHA20',
'DHE+AESGCM',
'DHE+CHACHA20',
'ECDH+AESGCM',
'DH+AESGCM',
'ECDH+AES',
'DH+AES',
'RSA+AESGCM',
'RSA+AES',
'!aNULL',
'!eNULL',
'!MD5',
'!DSS',
])
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
class SSLContext(object): # Platform-specific: Python 2
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` inst | ead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_s | ocket.
"""
if candidate is None:
return CERT_REQUIRED
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_TLS
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param cipher |
mercadopago/px-android | scripts/rename_resources.py | Python | mit | 559 | 0.039356 | import os
import sys
import fnmatch
directory | = os.path.dirname(os.path.r | ealpath(sys.argv[0])) #get the directory of your script
for subdir, dirs, files in os.walk(directory):
print(files)
for filename in files:
if fnmatch.fnmatch(filename,'mpsdk_*') > 0:
subdirectoryPath = os.path.relpath(subdir, directory) #get the path to your subdirectory
filePath = os.path.join(subdirectoryPath, filename) #get the path to your file
newFilePath = filePath.replace("mpsdk_","px_") #create the new name
os.rename(filePath, newFilePath) #rename your file
|
sssllliang/edx-analytics-pipeline | edx/analytics/tasks/tests/acceptance/test_user_activity.py | Python | agpl-3.0 | 5,050 | 0.00297 | """Ensure we can compute activity for a set of events"""
import datetime
from edx.analytics.tasks.tests.acceptance import AcceptanceTestCase
class UserActivityAcceptanceTest(AcceptanceTestCase):
"""Ensure we can compute activity for a set of events"""
INPUT_FILE = 'user_activity_tracking.log'
END_DATE = datetime.date(2014, 7, 1)
NUM_WEEKS = 6
COURSE_ID = u'edX/Open_DemoX/edx_demo_course'
COURSE_ID2 = u'course-v1:edX+DemoX+Test_2014'
NUM_REDUCERS = 1
def test_user_activity(self):
self.maxDiff = None
self.upload_tracking_log(self.INPUT_FILE, self.END_DATE)
self.task.launch([
'CourseActivityWeeklyTask',
'--source', self.test_src,
'--end-date', self.END_DATE.isoformat(),
'--weeks', str(self.NUM_WEEKS),
'--credentials', self.export_db.credentials_file_url,
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
with self.export_db.cursor() as cursor:
cursor.execute('SELECT course_id, interval_start, interval_end, label, count FROM course_activity ORDER BY course_id, interval_end, label')
results = cursor.fetchall()
# pylint: disable=line-too-long
self.assertItemsEqual([
row for row in results
], [
(self.COURSE_ID2, datetime.datetime(2014, 5, 19, 0, 0), datetime.datetime(2014, 5, 26, 0, 0), 'ACTIVE', 1),
(self.COURSE_ID2, datetime.datetime(2014, 5, 19, 0, 0), datetime.datetime(2014, 5, 26, 0, 0), 'PLAYED_VIDEO', 1),
(self.COURSE_ID2, datetime.datetime(2014, 6, 16, 0, 0), datetime.datetime(2014, 6, 23, 0, 0), 'ACTIVE', 4),
(self.COURSE_ID2, datetime.datetime(2014, 6, 16, 0, 0), datetime.datetime(2014, 6, 23, 0, 0), 'ATTEMPTED_PROBLEM', 1),
(self.COURSE_ID2, datetime.datetime(2014, 6, 16, 0, 0), datetime.datetime(2014, 6, 23, 0, 0), 'PLAYED_VIDEO', 3),
(self.COURSE_ID, datetime.datetime(2014, 6, 9, 0, 0), datetime.datetime(2014, 6, 16, 0, 0), 'ACTIVE', 1),
(self.COURSE_ID, datetime.datetime(2014, 6, 9, 0, 0), datetime.datetime(2014, 6, 16, 0, 0), 'PLAYED_VIDEO', 1),
(self.COURSE_ID, datetime.datetime(2014, 6, 16, 0, 0), datetime.datetime(2014, 6, 23, 0, 0), 'ACTIVE', 4),
(self.COURSE_ID, datetime.datetime(2014, 6, 16, 0, 0), datetime.datetime(2014, 6, 23, 0, 0), 'ATTEMPTED_PROBLEM', 2),
(self.COURSE_ID, datetime.datetime(2014, 6, 16, 0, 0), datetime.datetime(2014, 6, 23, 0, 0), 'PLAYED_VIDEO', 3),
])
self.task.launch([
'CourseActivityDailyTask',
'--source', self.test_src,
'--interval', '2014-05-25-' + self.END_DATE.isoformat(),
'--credentials', self.export_db.credentials_file_url,
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
with self.export_db.cursor() as cursor:
cursor.execute('SELECT course_id, date, label, count FROM course_activity_daily ORDER BY course_id, date, label')
results = cursor.fetchall()
self.assertItemsEqual([
row for row in results
], [
(self.COURSE_ID2, datetime.date(2014, 5, 25), 'ACTIVE', 1),
(self.COURSE_ID2, datetime.date(2014, 5, 25), 'PLAYED_VIDEO', 1),
(self.COURSE_ID2, datetime.date(2014, 6, 19), 'ACTIVE', 4),
(self.COURSE_ID2, datetime.date(2014, 6, 19), 'ATTEMPTED_PROBLEM', 1),
(self.COURSE_ID2, datetime.date(2014, 6, 19), 'P | LAYED_VIDEO', 3),
(self.COURSE_ID, datetime.date(2014, 6, 12), 'ACTIVE', 1),
(self.COURSE_ID, datetime.date(2014, 6, 12), 'PLAYED_VIDEO', 1),
(self.COURSE_ID, datetime.date(2014, 6, 19), 'ACTIVE', 4),
(self.COURSE_ID, datetime.date(2014, 6, 19), 'ATTEMPTED_PROBLEM', 2),
(self.COURSE_ID, datetime.date(2014, | 6, 19), 'PLAYED_VIDEO', 3),
])
self.task.launch([
'CourseActivityMonthlyTask',
'--source', self.test_src,
'--end-date', self.END_DATE.isoformat(),
'--months', str(2),
'--credentials', self.export_db.credentials_file_url,
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
with self.export_db.cursor() as cursor:
cursor.execute('SELECT course_id, year, month, label, count FROM course_activity_monthly ORDER BY course_id, year, month, label')
results = cursor.fetchall()
self.assertItemsEqual([
row for row in results
], [
(self.COURSE_ID2, 2014, 5, 'ACTIVE', 1),
(self.COURSE_ID2, 2014, 5, 'PLAYED_VIDEO', 1),
(self.COURSE_ID2, 2014, 6, 'ACTIVE', 4),
(self.COURSE_ID2, 2014, 6, 'ATTEMPTED_PROBLEM', 1),
(self.COURSE_ID2, 2014, 6, 'PLAYED_VIDEO', 3),
(self.COURSE_ID, 2014, 6, 'ACTIVE', 4),
(self.COURSE_ID, 2014, 6, 'ATTEMPTED_PROBLEM', 2),
(self.COURSE_ID, 2014, 6, 'PLAYED_VIDEO', 3),
])
|
howthebodyworks/pelican-plugins | liquid_tags/notebook.py | Python | agpl-3.0 | 10,596 | 0.002926 | """
Notebook Tag
------------
This is a liquid-style tag to include a static html rendering of an IPython
notebook in a blog post.
Syntax
------
{% notebook filename.ipynb [ cells[start:end] language[language] ]%}
The file should be specified relative to the ``notebooks`` subdirectory of the
content directory. Optionally, this subdirectory can be specified in the
config file:
NOTEBOOK_DIR = 'notebooks'
The cells[start:end] statement is optional, and can be used to specify which
block of cells from the notebook to include.
The language statement is obvious and can be used to specify whether ipython2
or ipython3 syntax highlighting should be used.
Requirements
------------
- The plugin requires IPython version 1.0 or above. It no longer supports the
standalone nbconvert package, which has been deprecated.
Details
-------
Because the notebook relies on some rather extensive custom CSS, the use of
this plugin requires additional CSS to be inserted into the blog theme.
After typing "make html" when using the notebook tag, a file called
``_nb_header.html`` will be produced in the main directory. The content
of the file should be included in the header of the theme. An easy way
to accomplish this is to add the following lines within the header template
of the theme you use:
{% if EXTRA_HEADER %}
{{ EXTRA_HEADER }}
{% endif %}
and in your ``pelicanconf.py`` file, include the line:
EXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')
this will insert the appropriate CSS. All efforts have been made to ensure
that this CSS will not override formats within the blog theme, but there may
still be some conflicts.
"""
import warnings
import re
import os
from functools import partial
from io impor | t open
from .mdx_liquid_tags import LiquidTags
import | IPython
IPYTHON_VERSION = IPython.version_info[0]
try:
import nbformat
except:
pass
if not IPYTHON_VERSION >= 1:
raise ValueError("IPython version 1.0+ required for notebook tag")
if IPYTHON_VERSION > 1:
warnings.warn("Pelican plugin is not designed to work with IPython "
"versions greater than 1.x. CSS styles have changed in "
"later releases.")
try:
from nbconvert.filters.highlight import _pygments_highlight
except ImportError:
try:
from IPython.nbconvert.filters.highlight import _pygments_highlight
except ImportError:
# IPython < 2.0
from IPython.nbconvert.filters.highlight import _pygment_highlight as _pygments_highlight
from pygments.formatters import HtmlFormatter
try:
from nbconvert.exporters import HTMLExporter
except ImportError:
from IPython.nbconvert.exporters import HTMLExporter
try:
from traitlets.config import Config
except ImportError:
from IPython.config import Config
try:
from nbconvert.preprocessors import Preprocessor
except ImportError:
try:
from IPython.nbconvert.preprocessors import Preprocessor
except ImportError:
# IPython < 2.0
from IPython.nbconvert.transformers import Transformer as Preprocessor
try:
from traitlets import Integer
except ImportError:
from IPython.utils.traitlets import Integer
from copy import deepcopy
#----------------------------------------------------------------------
# Some code that will be added to the header:
# Some of the following javascript/css include is adapted from
# IPython/nbconvert/templates/fullhtml.tpl, while some are custom tags
# specifically designed to make the results look good within the
# pelican-octopress theme.
JS_INCLUDE = r"""
<style type="text/css">
/* Overrides of notebook CSS for static HTML export */
div.entry-content {
overflow: visible;
padding: 8px;
}
.input_area {
padding: 0.2em;
}
a.heading-anchor {
white-space: normal;
}
.rendered_html
code {
font-size: .8em;
}
pre.ipynb {
color: black;
background: #f7f7f7;
border: none;
box-shadow: none;
margin-bottom: 0;
padding: 0;
margin: 0px;
font-size: 13px;
}
/* remove the prompt div from text cells */
div.text_cell .prompt {
display: none;
}
/* remove horizontal padding from text cells, */
/* so it aligns with outer body text */
div.text_cell_render {
padding: 0.5em 0em;
}
img.anim_icon{padding:0; border:0; vertical-align:middle; -webkit-box-shadow:none; -box-shadow:none}
div.collapseheader {
width=100%;
background-color:#d3d3d3;
padding: 2px;
cursor: pointer;
font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;
}
</style>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
tex2jax: {
inlineMath: [['$','$'], ['\\(','\\)']],
processEscapes: true,
displayMath: [['$$','$$'], ["\\[","\\]"]]
}
});
</script>
<script type="text/javascript" async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js?config=TeX-MML-AM_CHTML">
</script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script type="text/javascript">
jQuery(document).ready(function($) {
$("div.collapseheader").click(function () {
$header = $(this).children("span").first();
$codearea = $(this).children(".input_area");
console.log($(this).children());
$codearea.slideToggle(500, function () {
$header.text(function () {
return $codearea.is(":visible") ? "Collapse Code" : "Expand Code";
});
});
});
});
</script>
"""
CSS_WRAPPER = """
<style type="text/css">
{0}
</style>
"""
#----------------------------------------------------------------------
# Create a custom preprocessor
class SliceIndex(Integer):
"""An integer trait that accepts None"""
default_value = None
def validate(self, obj, value):
if value is None:
return value
else:
return super(SliceIndex, self).validate(obj, value)
class SubCell(Preprocessor):
"""A transformer to select a slice of the cells of a notebook"""
start = SliceIndex(0, config=True,
help="first cell of notebook to be converted")
end = SliceIndex(None, config=True,
help="last cell of notebook to be converted")
def preprocess(self, nb, resources):
nbc = deepcopy(nb)
if IPYTHON_VERSION < 3:
for worksheet in nbc.worksheets:
cells = worksheet.cells[:]
worksheet.cells = cells[self.start:self.end]
else:
nbc.cells = nbc.cells[self.start:self.end]
return nbc, resources
call = preprocess # IPython < 2.0
#----------------------------------------------------------------------
# Custom highlighter:
# instead of using class='highlight', use class='highlight-ipynb'
def custom_highlighter(source, language='ipython', metadata=None):
formatter = HtmlFormatter(cssclass='highlight-ipynb')
if not language:
language = 'ipython'
output = _pygments_highlight(source, formatter, language)
return output.replace('<pre>', '<pre class="ipynb">')
#----------------------------------------------------------------------
# Below is the pelican plugin code.
#
SYNTAX = "{% notebook /path/to/notebook.ipynb [ cells[start:end] ] [ language[language] ] %}"
FORMAT = re.compile(r"""^(\s+)?(?P<src>\S+)(\s+)?((cells\[)(?P<start>-?[0-9]*):(?P<end>-?[0-9]*)(\]))?(\s+)?((language\[)(?P<language>-?[a-z0-9\+\-]*)(\]))?(\s+)?$""")
@LiquidTags.register('notebook')
def notebook(preprocessor, tag, markup):
match = FORMAT.search(markup)
if match:
argdict = match.groupdict()
src = argdict['src']
start = argdict['start']
end = argdict['end']
language = argdict['language']
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
if start:
start = int(start)
else:
start = 0
if end:
end = int(end)
else:
end = None
language_applied_highlighter = partial(custom_highlighter, language=language)
nb_dir = preprocessor.configs.getConfig('NOTEBOOK_DIR')
nb_path = os.path.join('content', nb_dir, src)
if not os.path.ex |
edisonlz/fruit | web_project/base/site-packages/django/http/cookie.py | Python | apache-2.0 | 3,531 | 0.001982 | from __future__ import absolute_import, unicode_literals
from django.utils.encoding import force_str
from django.utils import six
from django.utils.six.moves import http_cookies
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = http_cookies.SimpleCookie()
try:
_tc.load(str('foo:bar=1'))
_cookie_allows_colon_in_names = True
except http_cookies.CookieError:
_cookie_allows_colon_in_names = False
if _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if six.PY2 and isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
self.bad_cookies.add(key)
| dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, http_cookies.B | aseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except http_cookies.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
|
sasongko26/sideka-desktop | login_indikator_kemiskinan.py | Python | gpl-2.0 | 2,727 | 0.013935 | import wx
import input_indikator_kemiskinan
import peringatan
import frm_sideka_menu
def create(parent):
return Dialog1(parent)
[wxID_DIALOG1, wxID_DIALOG1BUTTON1, wxID_DIALOG1BUTTON2,
wxID_DIALOG1STATICLINE1, wxID_DIALOG1STATICTEXT1, wxID_DIALOG1STATICTEXT2,
wxID_DIALOG1TEXTCTRL1,
] = [wx.NewId() for _init_ctrls in range(7)]
class Dialog1(wx.Dialog):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_DIALOG1, name='', parent=prnt,
pos=wx.Point(515, 304), size=wx.Size(402, 140), style=wx.CAPTION,
title=u'Otentifikasi')
self.SetClientSize(wx.Size(402, 140))
self.staticText1 = wx.StaticText(id=wxID_DIALOG1STATICTEXT1,
label=u'Password', name='staticText1', parent=self,
pos=wx.Point(16, 64), size=wx.Size(60, 17), style=0)
self.textCtrl1 = wx.TextCtrl(id=wxID_DIALOG1TEXTCTRL1, name='textCtrl1',
parent=self, pos=wx.Point(96, 56), size=wx.Size(296, | 25),
style=wx.TE_PASSWORD, value='')
self.staticText2 = wx.StaticText(id=wxID_DIALOG1STATICTEXT2,
label=u'MASUKAN PASSWORD DAHULU', name='staticText2', parent=self,
pos=wx.Point(104, 16), size=wx.Size(203, 17), style=0)
self.button1 = wx.But | ton(id=wxID_DIALOG1BUTTON1, label=u'Lanjutkan',
name='button1', parent=self, pos=wx.Point(208, 96),
size=wx.Size(184, 30), style=0)
self.button1.Bind(wx.EVT_BUTTON, self.OnButton1Button,
id=wxID_DIALOG1BUTTON1)
self.button2 = wx.Button(id=wxID_DIALOG1BUTTON2,
label=u'Kembali Ke Menu', name='button2', parent=self,
pos=wx.Point(16, 96), size=wx.Size(184, 30), style=0)
self.button2.Bind(wx.EVT_BUTTON, self.OnButton2Button,
id=wxID_DIALOG1BUTTON2)
self.staticLine1 = wx.StaticLine(id=wxID_DIALOG1STATICLINE1,
name='staticLine1', parent=self, pos=wx.Point(16, 40),
size=wx.Size(368, 2), style=0)
def __init__(self, parent):
self._init_ctrls(parent)
def OnButton1Button(self, event):
oneng = 'andri'
user_password = self.textCtrl1.GetValue()
if user_password == oneng:
self.main=input_indikator_kemiskinan.create(None)
self.main.Show()
self.Close()
else:
self.Close()
self.main=peringatan.create(None)
self.main.Show()
def OnButton2Button(self, event):
self.Close()
self.main=frm_sideka_menu.create(None)
self.main.Show()
|
Thierry46/CalcAl | gui/PatientFrame.py | Python | gpl-3.0 | 15,036 | 0.004123 | # -*- coding: utf-8 -*-
"""
************************************************************************************
Class : PatientFrame
Author : Thierry Maillard (TMD)
Date : 26/11/2016 - 1/12/2016
Role : Define Patient frame content.
Licence : GPLv3
Copyright (c) 2016 - Thierry Maillard
This file is part of CalcAl project.
CalcAl project is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CalcAl project is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CalcAl project. If not, see <http://www.gnu.org/licenses/>.
************************************************************************************
"""
import datetime
import tkinter
from tkinter.ttk import Combobox
from tkinter import messagebox
from util import CalcalExceptions
from . import CallTypWindow
from . import FrameBaseCalcAl
class PatientFrame(FrameBaseCalcAl.FrameBaseCalcAl):
""" Patient frame used to manage patient information """
def __init__(self, master, mainWindow, logoFrame, patientFrameModel):
""" Initialize Patients Frame """
super(PatientFrame, self).__init__(master, mainWindow, logoFrame)
self.patientFrameModel = patientFrameModel
self.patientFrameModel.addObserver(self)
self.listPathologies = []
##########
# Patient definition Frame
patientMainFrame = tkinter.LabelFrame(self, text=_("Patient information"), padx=10)
patientMainFrame.pack(side=tkinter.TOP)
rightFrame = tkinter.Frame(patientMainFrame)
rightFrame.pack(side=tkinter.LEFT)
patientDefinitionFrame = tkinter.Frame(rightFrame)
patientDefinitionFrame.pack(side=tkinter.TOP)
labelPatientCode = tkinter.Label(patientDefinitionFrame, text=_("Patient code") + " :")
labelPatientCode.grid(row=0, column=0, sticky=tkinter.E)
CallTypWindow.createToolTip(labelPatientCode,
_("Type new patient code or select an existant"),
self.delaymsTooltips)
self.patientCodeVar = tkinter.StringVar()
self.patientCodeVar.trace_variable("w", self.changePatient)
widthCode = int(self.configApp.get('Size', 'patientCodeComboboxWidth'))
self.patientCodeCombobox = Combobox(patientDefinitionFrame, exportselection=0,
textvariable=self.patientCodeVar,
state=tkinter.NORMAL,
width=widthCode)
self.patientCodeCombobox.grid(row=0, column=1, sticky=tkinter.W)
tkinter.Label(patientDefinitionFrame, text=_("Birth year") + " :").grid(row=1, column=0,
| sticky=tkinter.E)
self.currentYear = datetime.datetime.now().year
self.oldestPatient = int(self.configApp.get('Patient', 'oldestPatient'))
self.birthYearCombobox = Combobox(patientDefinitionFrame, exportselection=0,
state="readonly",
width=len(str(self.currentYear + self.old | estPatient)),
values=list(range(self.currentYear-self.oldestPatient,
self.currentYear+1)))
self.birthYearCombobox.bind('<<ComboboxSelected>>', self.modifyPatient)
self.birthYearCombobox.grid(row=1, column=1, sticky=tkinter.W)
tkinter.Label(patientDefinitionFrame, text=_("Gender") + " :").grid(row=2, column=0,
sticky=tkinter.E)
genderFrame = tkinter.Frame(patientDefinitionFrame)
genderFrame.grid(row=2, column=1, sticky=tkinter.EW)
self.genderVar = tkinter.StringVar()
tkinter.Radiobutton(genderFrame, text=_("M"), variable=self.genderVar, value="M",
command=self.modifyPatient).pack(side=tkinter.LEFT)
tkinter.Radiobutton(genderFrame, text=_("F"), variable=self.genderVar, value="F",
command=self.modifyPatient).pack(side=tkinter.LEFT)
tkinter.Radiobutton(genderFrame, text=_("U"), variable=self.genderVar, value="U",
command=self.modifyPatient).pack(side=tkinter.LEFT)
tkinter.Label(patientDefinitionFrame, text=_("Size") + " (cm) :").grid(row=3, column=0,
sticky=tkinter.E)
self.sizeMin = int(self.configApp.get('Patient', 'sizeMin'))
self.sizeMax = int(self.configApp.get('Patient', 'sizeMax'))
self.sizeCombobox = Combobox(patientDefinitionFrame, exportselection=0,
state="readonly", width=len(str(self.sizeMax)),
values=list(range(self.sizeMin, self.sizeMax+1)))
self.sizeCombobox.bind('<<ComboboxSelected>>', self.modifyPatient)
self.sizeCombobox.grid(row=3, column=1, sticky=tkinter.W)
# Buttons command
buttonDefinitionFrame = tkinter.Frame(rightFrame)
buttonDefinitionFrame.pack(side=tkinter.TOP)
tkinter.Button(buttonDefinitionFrame, text=_("Delete"),
command=self.deletePatient).pack(side=tkinter.LEFT)
# Notes frame
patientNoteFrame = tkinter.LabelFrame(patientMainFrame, text=_("Notes for this patient"),
padx=10)
patientNoteFrame.pack(side=tkinter.LEFT)
self.patientNotesTextEditor = tkinter.Text(patientNoteFrame,
wrap=tkinter.NONE,
height=10, width=30,
background=self.configApp.get('Colors',
'colorPatientEditor'))
self.patientNotesTextEditor.bind('<FocusOut>', self.modifyPatient)
self.patientNotesTextEditor.grid(row=2, columnspan=2)
scrollbarRightNotes = tkinter.Scrollbar(patientNoteFrame,
command=self.patientNotesTextEditor.yview)
scrollbarRightNotes.grid(row=2, column=2, sticky=tkinter.W+tkinter.N+tkinter.S)
scrollbarBottom = tkinter.Scrollbar(patientNoteFrame, orient=tkinter.HORIZONTAL,
command=self.patientNotesTextEditor.xview)
scrollbarBottom.grid(row=3, columnspan=2, sticky=tkinter.N+tkinter.E+tkinter.W)
self.patientNotesTextEditor.config(yscrollcommand=scrollbarRightNotes.set)
self.patientNotesTextEditor.config(xscrollcommand=scrollbarBottom.set)
patientListsFrame = tkinter.Frame(patientMainFrame, padx=10)
patientListsFrame.pack(side=tkinter.LEFT)
# Pathologies listbox for this patient
pathologiesListboxFrame = tkinter.LabelFrame(patientListsFrame,
text=_("Patient pathologies"))
pathologiesListboxFrame.pack(side=tkinter.TOP)
color = self.configApp.get('Colors', 'colorPathologiesList')
self.pathologiesListbox = tkinter.Listbox(pathologiesListboxFrame,
selectmode=tkinter.EXTENDED,
background=color, height=9, width=20,
exportselection=False)
self.pathologiesListbox.grid(row=0, columnspan=2)
CallTypWindow.createToolTip(self.pathologiesListbox,
_("Use Ctrl and Shift keys") + "\n" + \
_("for multiple selection"),
self.d |
schlueter/ansible-lint | lib/ansiblelint/__init__.py | Python | mit | 6,392 | 0.000626 | # Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT | NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from collections import defaultdict
import os
import ansiblelint.utils
class AnsibleL | intRule(object):
def __repr__(self):
return self.id + ": " + self.shortdesc
def verbose(self):
return self.id + ": " + self.shortdesc + "\n " + self.description
def match(self, file="", line=""):
return []
def matchlines(self, file, text):
matches = []
# arrays are 0-based, line numbers are 1-based
# so use prev_line_no as the counter
for (prev_line_no, line) in enumerate(text.split("\n")):
result = self.match(file, line)
if result:
message = None
if isinstance(result, str):
message = result
matches.append(Match(prev_line_no+1, line,
file['path'], self, message))
return matches
def matchtask(self, file="", task=None):
return []
def matchtasks(self, file, text):
matches = []
yaml = ansiblelint.utils.parse_yaml_linenumbers(text)
if yaml:
for task in ansiblelint.utils.get_action_tasks(yaml, file):
if 'skip_ansible_lint' in task.get('tags', []):
continue
if 'action' in task:
result = self.matchtask(file, task)
if result:
message = None
if isinstance(result, str):
message = result
taskstr = "Task/Handler: " + ansiblelint.utils.task_to_str(task)
matches.append(Match(task[ansiblelint.utils.LINE_NUMBER_KEY], taskstr,
file['path'], self, message))
return matches
def matchyaml(self, file, text):
matches = []
yaml = ansiblelint.utils.parse_yaml_linenumbers(text)
if yaml and hasattr(self, 'matchplay'):
for play in yaml:
result = self.matchplay(file, play)
if result:
(section, message) = result
matches.append(Match(play[ansiblelint.utils.LINE_NUMBER_KEY], section,
file['path'], self, message))
return matches
class RulesCollection(object):
def __init__(self):
self.rules = []
def register(self, obj):
self.rules.append(obj)
def __iter__(self):
return iter(self.rules)
def __len__(self):
return len(self.rules)
def extend(self, more):
self.rules.extend(more)
def run(self, playbookfile, tags=set(), skip_tags=set()):
text = ""
matches = list()
with open(playbookfile['path'], 'Ur') as f:
text = f.read()
for rule in self.rules:
if not tags or not set(rule.tags).isdisjoint(tags):
if set(rule.tags).isdisjoint(skip_tags):
matches.extend(rule.matchlines(playbookfile, text))
matches.extend(rule.matchtasks(playbookfile, text))
matches.extend(rule.matchyaml(playbookfile, text))
return matches
def __repr__(self):
return "\n".join([rule.verbose()
for rule in sorted(self.rules, key=lambda x: x.id)])
def listtags(self):
tags = defaultdict(list)
for rule in self.rules:
for tag in rule.tags:
tags[tag].append("[{0}]".format(rule.id))
results = []
for tag in sorted(tags):
results.append("{0} {1}".format(tag, tags[tag]))
return "\n".join(results)
@classmethod
def create_from_directory(cls, rulesdir):
result = cls()
result.rules = ansiblelint.utils.load_plugins(os.path.expanduser(rulesdir))
return result
class Match:
def __init__(self, linenumber, line, filename, rule, message=None):
self.linenumber = linenumber
self.line = line
self.filename = filename
self.rule = rule
self.message = message or rule.shortdesc
def __repr__(self):
formatstr = "[{0}] ({1}) matched {2}:{3} {4}"
return formatstr.format(self.rule.id, self.message,
self.filename, self.linenumber, self.line)
class Runner:
def __init__(self, rules, playbooks, tags, skip_tags):
self.rules = rules
self.playbooks = set()
for pb in playbooks:
self.playbooks.add((pb, 'playbook'))
self.tags = tags
self.skip_tags = skip_tags
def run(self):
files = list()
for playbook in self.playbooks:
files.append({'path': playbook[0], 'type': playbook[1]})
visited = set()
while (visited != self.playbooks):
for arg in self.playbooks - visited:
for file in ansiblelint.utils.find_children(arg):
self.playbooks.add((file['path'], file['type']))
files.append(file)
visited.add(arg)
matches = list()
for file in files:
matches.extend(self.rules.run(file, tags=set(self.tags),
skip_tags=set(self.skip_tags)))
return matches
|
JioCloud/python-openstackclient | openstackclient/volume/client.py | Python | apache-2.0 | 2,241 | 0 | # Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from cinderclient import extension
from cinderclient.v1.contrib import list_extensions
from cinderclient.v1 import volume_snapshots
from cinderclient.v1 import volumes
from openstackclient.common import utils
# Monkey patch for v1 cinderclient
volumes.Volume.NAME_ATTR = 'display_name'
volume_snapshots.Snapshot.NAME_ATTR = 'display_name'
LOG = logging.getLogger(__name__)
DEFAULT_VOLUME_API_VERSION = '1'
API_VERSION_OPTION = 'os_volume_api_version'
API_NAME = "volume"
API_VERSIONS = {
"1": "cinderclient.v1.client.Client"
}
def make_client(instance):
"""Returns a volume service client."""
volume_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS
)
LOG.debug('Instantiating volume client: %s', volume_client)
# Set client http_log_debug to True if verbosity level is high enough
http_log_debug = utils.get_effective_log_level() <= logging.DEBUG
extensions = [extension.Extension('list_extensions', list_extensions)]
client = volume_client(
session=instance.session,
extensions=extensions,
http_log_debug=http_log_debug,
)
return client
def build_option_parser(parser):
"""Hook to add global options"""
parser.add_argument(
'--os-vo | lume-api-version',
metavar='<volume-api-version>',
default=utils.env(
'OS_VOLUME_API_VERSION',
default=DEFAULT_VOLUME_API_VERSION),
help='Volume API version, default=' +
DEFAULT_VOLUME_API | _VERSION +
' (Env: OS_VOLUME_API_VERSION)')
return parser
|
luma/filterchain | tools/cpplint.py | Python | mit | 234,744 | 0.008703 | #!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbo | sity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories th | at start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment' |
DeadSix27/python_cross_compile_script | tools/split.py | Python | mpl-2.0 | 1,806 | 0.048173 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os,re,sys,pprint,shutil
from pathlib import Path
PACKAGES_DIR = "../packages"
def errorExit(msg):
print(msg)
sys.exit(1)
def isPathDisabled(path):
for part in path.parts:
if part.lower().startswith("_disabled"):
return True
return False
depsFolder = Path("_deps_split")
prodFolder = Path("_prods_split")
merged_deps = Path("merged_deps.py" )
merged_prods = Path("merged_prods.py")
if not os.path.isfile(merged_deps):
errorExit("Merged depends file does n | ot exist")
if not os.path.isfile(merged_prods):
errorExit("Merged products file does not exist")
if not os.path.isdir(depsFolder):
os.makedirs(depsFolder)
else:
print("Clearing old split folder:" + str(depsFolder))
shutil.rmtree(depsFolder)
os.makedirs(depsFolder)
if not os.path.isdir(prodFolder):
os.makedirs(prodFolder)
else:
print("Clearing old split folder:" + str(prodFolder))
shutil.rmtree(prodFolder)
os.makedirs(prodFolder)
things = { "merged_deps.py | " : depsFolder, "merged_prods.py" : prodFolder, }
for mergefile_name in things:
mergedFile = None
enableWrite = False
curFile = None
print("Splitting " +mergefile_name+ " into seperate files in " + str(things[mergefile_name]))
with open(mergefile_name, "r", encoding="utf-8") as f:
mergedFile = f.read().split("\n")
fileBuffer = ""
for line in mergedFile:
startR = re.search("^########START:\[(.+)\]$",line)
endR = re.search("^########END:\[(.+)\]$",line)
if endR != None:
enableWrite = False
curFile.write(fileBuffer.rstrip("\n"))
curFile.close()
if enableWrite:
fileBuffer+=line+"\n"
if startR != None:
enableWrite = True
fileBuffer = ""
curFile = open(os.path.join(things[mergefile_name],startR.groups()[0]) ,"w",encoding="utf-8")
print("Done") |
pombredanne/invenio | modules/bibformat/lib/elements/bfe_comments.py | Python | gpl-2.0 | 2,111 | 0.008527 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints comments posted for the record
"""
__revision__ = "$Id$"
from invenio.webcomment import get_first_comments_or_remarks
def format_element(bfo, nbReviews='all', nbComments='all'):
"""
Prints comments posted for the record.
@param nbReviews: The max number of reviews to print
@param nbComments: The max number of comments to print
"""
nb_reviews = nbReviews
if nb_reviews.isdigit():
nb_reviews = int(nb_reviews)
nb_comments = nbComments
if nb_comments.isdigit():
nb_comments = int(nb_comments)
(comments, reviews) = get_first_comments_or_remarks(recID=bfo.recID,
| ln=bfo.lang,
nb_comments=nb_comments,
nb_reviews=nb_reviews,
| voted=-1,
reported=-1,
user_info=bfo.user_info)
return comments + reviews
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
clovertrail/cloudinit-bis | tests/unittests/test_datasource/test_digitalocean.py | Python | gpl-3.0 | 11,493 | 0 | #
# Copyright (C) 2014 Neal Shrader
#
# Author: Neal Shrader <neal@digitalocean.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from cloudinit import helpers
from cloudinit import settings
from cloudinit.sources import DataSourceDigitalOcean
from cloudinit.sources.helpers import digitalocean
from ..helpers import mock, TestCase
DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
"ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"]
DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
# the following JSON was taken from droplet (that's why its a string)
DO_META = json.loads("""
{
"droplet_id": "22532410",
"hostname": "utl-96268",
"vendor_data": "vendordata goes here",
"user_data": "userdata goes here",
"public_keys": "",
"auth_key": "authorization_key",
"region": "nyc3",
"interfaces": {
"private": [
{
"ipv4": {
"ip_address": "10.132.6.205",
"netmask": "255.255.0.0",
"gateway": "10.132.0.1"
},
"mac": "04:01:57:d1:9e:02",
"type | ": "private"
}
],
"public": [
{
"ipv4": {
"ip_address": "192.0.0.20",
"netmask": "255.255.255.0",
"gateway": "104.236.0.1"
},
" | ipv6": {
"ip_address": "2604:A880:0800:0000:1000:0000:0000:0000",
"cidr": 64,
"gateway": "2604:A880:0800:0000:0000:0000:0000:0001"
},
"anchor_ipv4": {
"ip_address": "10.0.0.5",
"netmask": "255.255.0.0",
"gateway": "10.0.0.1"
},
"mac": "04:01:57:d1:9e:01",
"type": "public"
}
]
},
"floating_ip": {
"ipv4": {
"active": false
}
},
"dns": {
"nameservers": [
"2001:4860:4860::8844",
"2001:4860:4860::8888",
"8.8.8.8"
]
}
}
""")
# This has no private interface
DO_META_2 = {
"droplet_id": 27223699,
"hostname": "smtest1",
"vendor_data": "\n".join([
('"Content-Type: multipart/mixed; '
'boundary=\"===============8645434374073493512==\"'),
'MIME-Version: 1.0',
'',
'--===============8645434374073493512==',
'MIME-Version: 1.0'
'Content-Type: text/cloud-config; charset="us-ascii"'
'Content-Transfer-Encoding: 7bit'
'Content-Disposition: attachment; filename="cloud-config"'
'',
'#cloud-config',
'disable_root: false',
'manage_etc_hosts: true',
'',
'',
'--===============8645434374073493512=='
]),
"public_keys": [
"ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"
],
"auth_key": "88888888888888888888888888888888",
"region": "nyc3",
"interfaces": {
"public": [{
"ipv4": {
"ip_address": "45.55.249.133",
"netmask": "255.255.192.0",
"gateway": "45.55.192.1"
},
"anchor_ipv4": {
"ip_address": "10.17.0.5",
"netmask": "255.255.0.0",
"gateway": "10.17.0.1"
},
"mac": "ae:cc:08:7c:88:00",
"type": "public"
}]
},
"floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
"dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
"tags": None,
}
DO_META['public_keys'] = DO_SINGLE_KEY
MD_URL = 'http://169.254.169.254/metadata/v1.json'
def _mock_dmi():
return (True, DO_META.get('id'))
class TestDataSourceDigitalOcean(TestCase):
"""
Test reading the meta-data
"""
def get_ds(self, get_sysinfo=_mock_dmi):
ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
settings.CFG_BUILTIN, None, helpers.Paths({}))
ds.use_ip4LL = False
if get_sysinfo is not None:
ds._get_sysinfo = get_sysinfo
return ds
@mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo')
def test_returns_false_not_on_docean(self, m_read_sysinfo):
m_read_sysinfo.return_value = (False, None)
ds = self.get_ds(get_sysinfo=None)
self.assertEqual(False, ds.get_data())
self.assertTrue(m_read_sysinfo.called)
@mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
def test_metadata(self, mock_readmd):
mock_readmd.return_value = DO_META.copy()
ds = self.get_ds()
ret = ds.get_data()
self.assertTrue(ret)
self.assertTrue(mock_readmd.called)
self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw())
self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw())
self.assertEqual(DO_META.get('region'), ds.availability_zone)
self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id())
self.assertEqual(DO_META.get('hostname'), ds.get_hostname())
# Single key
self.assertEqual([DO_META.get('public_keys')],
ds.get_public_ssh_keys())
self.assertIsInstance(ds.get_public_ssh_keys(), list)
@mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
def test_multiple_ssh_keys(self, mock_readmd):
metadata = DO_META.copy()
metadata['public_keys'] = DO_MULTIPLE_KEYS
mock_readmd.return_value = metadata.copy()
ds = self.get_ds()
ret = ds.get_data()
self.assertTrue(ret)
self.assertTrue(mock_readmd.called)
# Multiple keys
self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys())
self.assertIsInstance(ds.get_public_ssh_keys(), list)
class TestNetworkConvert(TestCase):
def _get_networking(self):
netcfg = digitalocean.convert_network_configuration(
DO_META['interfaces'], DO_META['dns']['nameservers'])
self.assertIn('config', netcfg)
return netcfg
def test_networking_defined(self):
netcfg = self._get_networking()
self.assertIsNotNone(netcfg)
for nic_def in netcfg.get('config'):
print(json.dumps(nic_def, indent=3))
n_type = nic_def.get('type')
n_subnets = nic_def.get('type')
n_name = nic_def.get('name')
n_mac = nic_def.get('mac_address')
self.assertIsNotNone(n_type)
self.assertIsNotNone(n_subnets)
self.assertIsNotNone(n_name)
self.assertIsNotNone(n_mac)
def _get_nic_definition(self, int_type, expected_name):
"""helper function to return if_type (i.e. public) and the expected
name used by cloud-init (i.e eth0)"""
netcfg = self._get_networking()
meta_def = (DO_META.get('interfaces')).get(int_type)[0]
self.assertEqual(int_type, meta_def.get('type'))
for nic_def in netcfg.get('config'):
print(nic_def)
if nic_def.get('name') == expected_name:
return nic_def, meta_def
def _get_match_subn(self, subnets, ip_addr):
"""get the matching subnet definition based on ip address"""
for subn in subnets:
address = subn.get('address')
self.assertIsNotNone(address)
# equals won't work because of ipv6 addressing being in
# cidr notation, i.e fe00::1/64
if ip_addr in address:
print(json.dumps(subn, indent=3))
return subn
def test_public_interface_defined(self):
"""test that the public interface is defined as eth0"""
(nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
self.assertEqual('eth0', nic_def.ge |
iulian787/spack | var/spack/repos/builtin/packages/py-easybuild-framework/package.py | Python | lgpl-2.1 | 1,174 | 0.003407 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyEasybuildFramework(PythonPackage):
"""The core of EasyBuild, a software build and installation framework
for (scientific) software on HPC systems.
"""
homepage = 'https://easybuilders.github.io/easybuild'
url = 'https://pypi.io/packages/source/e/easybuild-framework/easybuild-framework-4.0.0.tar.gz'
maintainers = ['boegel']
version('4.0.0', sha256='f5c40345cc8b9b5750f53263ade6c9c3a8cd3dfab488d58f76ac61a8ca7c5a77')
version('3.1.2', sha256='a03598478574e2982587796afdb792d78b598f4c09ebf4bec1a690c06470c00d')
depends_on('python@2.6:2.8', when='@:3', type=('build', 'run'))
depends_on( | 'python@2.6:2.8,3.5:', when='@4:', type=('build', 'run'))
depends_on('py-setuptools', when='@:3', type=('build' | , 'run'))
depends_on('py-vsc-base@2.5.4:', when='@2.9:3', type='run')
# Only required for tests (python -O -m test.framework.suite)
depends_on('py-vsc-install', when='@:3', type='test')
|
rprichard/rust | src/etc/htmldocck.py | Python | apache-2.0 | 14,476 | 0.002141 | # Copyright 2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
r"""
htmldocck.py is a custom checker script for Rustdoc HTML outputs.
# How and why?
The principle is simple: This script receives a path to generated HTML
documentation and a "template" script, which has a series of check
commands like `@has` or `@matches`. Each command can be used to check if
some pattern is present or not present in the particular file or in
the particular node of HTML tree. In many cases, the template script
happens to be a source code given to rustdoc.
While it indeed is possible to test in smaller portions, it has been
hard to construct tests in this fashion and major rendering errors were
discovered much later. This script is designed for making the black-box
and regression testing of Rustdoc easy. This does not preclude the needs
for unit testing, but can be used to complement related tests by quickly
showing the expected renderings.
In order to avoid one-off dependencies for this task, this script uses
a reasonably working HTML parser and the existing XPath implementation
from Python 2's standard library. Hopefully we won't render
non-well-formed HTML.
# Commands
Commands start with an `@` followed by a command name (letters and
hyphens), and zero or more arguments separated by one or more whitespace
and optionally delimited with single or double quotes. The `@` mark
cannot be preceded by a non-whitespace character. Other lines (including
every text up to the first `@`) are ignored, but it is recommended to
avoid the use of `@` in the template file.
There are a number of supported commands:
* `@has PATH` checks for the existence of given file.
`PATH` is relative to the output directory. It can be given as `-`
which repeats the most recently used `PATH`.
* `@has PATH PATTERN` and `@matches PATH PATTERN` checks for
the occurrence of given `PATTERN` in the given file. Only one
occurrence of given pattern is enough.
For `@has`, `PATTERN` is a whitespace-normalized (every consecutive
whitespace being replaced by one single space character) string.
The entire file is also whitespace-normalized including newlines.
For `@matches`, `PATTERN` is a Python-supported regular expression.
The file remains intact but the regexp is matched with no `MULTILINE`
and `IGNORECASE` option. You can still use a prefix `(?m)` or `(?i)`
to override them, and `\A` and `\Z` for definitely matching
the beginning and end of the file.
(The same distinction goes to other variants of these commands.)
* `@has PATH XPATH PATTERN` and `@matches PATH XPATH PATTERN` checks for
the presence of given `XPATH` in the given HTML file, and also
the occurrence of given `PATTERN` in the matching node or attribute.
Only one occurrence of given pattern in the match is enough.
`PATH` should be a valid and well-formed HTML file. It does *not*
accept arbitrary HTML5; it should have matching open and close tags
and correct entity references at least.
`XPATH` is an XPath expression to match. This is fairly limited:
`tag`, `*`, `.`, `//`, `..`, `[@attr]`, `[@attr='value']`, `[tag]`,
`[POS]` (element located in given `POS`), `[last()-POS]`, `text()`
and `@attr` (both as the last segment) are supported. Some examples:
- `//pre` or `.//pre` matches any element with a name `pre`.
- `//a[@href]` matches any element with an `href` attribute.
- `//*[@class="impl"]//code` matches any element with a name `code`,
which is an ancestor of some element which `class` | attr is `impl`.
- `//h1[@class="fqn"]/span[1]/a[last()]/@class` matches a value of
`class` attribu | te in the last `a` element (can be followed by more
elements that are not `a`) inside the first `span` in the `h1` with
a class of `fqn`. Note that there cannot be no additional elements
between them due to the use of `/` instead of `//`.
Do not try to use non-absolute paths, it won't work due to the flawed
ElementTree implementation. The script rejects them.
For the text matches (i.e. paths not ending with `@attr`), any
subelements are flattened into one string; this is handy for ignoring
highlights for example. If you want to simply check the presence of
given node or attribute, use an empty string (`""`) as a `PATTERN`.
* `@count PATH XPATH COUNT' checks for the occurrence of given XPath
in the given file. The number of occurrences must match the given count.
All conditions can be negated with `!`. `@!has foo/type.NoSuch.html`
checks if the given file does not exist, for example.
"""
import sys
import os.path
import re
import shlex
from collections import namedtuple
from HTMLParser import HTMLParser
from xml.etree import cElementTree as ET
# ⇤/⇥ are not in HTML 4 but are in HTML 5
from htmlentitydefs import entitydefs
entitydefs['larrb'] = u'\u21e4'
entitydefs['rarrb'] = u'\u21e5'
# "void elements" (no closing tag) from the HTML Standard section 12.1.2
VOID_ELEMENTS = set(['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'])
class CustomHTMLParser(HTMLParser):
"""simplified HTML parser.
this is possible because we are dealing with very regular HTML from
rustdoc; we only have to deal with i) void elements and ii) empty
attributes."""
def __init__(self, target=None):
HTMLParser.__init__(self)
self.__builder = target or ET.TreeBuilder()
def handle_starttag(self, tag, attrs):
attrs = dict((k, v or '') for k, v in attrs)
self.__builder.start(tag, attrs)
if tag in VOID_ELEMENTS:
self.__builder.end(tag)
def handle_endtag(self, tag):
self.__builder.end(tag)
def handle_startendtag(self, tag, attrs):
attrs = dict((k, v or '') for k, v in attrs)
self.__builder.start(tag, attrs)
self.__builder.end(tag)
def handle_data(self, data):
self.__builder.data(data)
def handle_entityref(self, name):
self.__builder.data(entitydefs[name])
def handle_charref(self, name):
code = int(name[1:], 16) if name.startswith(('x', 'X')) else int(name, 10)
self.__builder.data(unichr(code).encode('utf-8'))
def close(self):
HTMLParser.close(self)
return self.__builder.close()
Command = namedtuple('Command', 'negated cmd args lineno')
def concat_multi_lines(f):
"""returns a generator out of the file object, which
- removes `\\` then `\n` then a shared prefix with the previous line then
optional whitespace;
- keeps a line number (starting from 0) of the first line being
concatenated."""
lastline = None # set to the last line when the last line has a backslash
firstlineno = None
catenated = ''
for lineno, line in enumerate(f):
line = line.rstrip('\r\n')
# strip the common prefix from the current line if needed
if lastline is not None:
maxprefix = 0
for i in xrange(min(len(line), len(lastline))):
if line[i] != lastline[i]:
break
maxprefix += 1
line = line[maxprefix:].lstrip()
firstlineno = firstlineno or lineno
if line.endswith('\\'):
lastline = line[:-1]
catenated += line[:-1]
else:
yield firstlineno, catenated + line
lastline = None
firstlineno = None
catenated = ''
if lastline is not None:
raise RuntimeError('Trailing backslash in the end of file')
LINE_PATTERN = re.compile(r'''
(?<=(?<!\S)@)(?P<negated>!?)
(?P<cmd>[A-Za-z]+(?:-[A-Za-z]+)*)
(?P<args>.*)$
''', re.X)
def get_commands(template):
with open(tem |
SRabbelier/Melange | tests/new_views/test_document.py | Python | apache-2.0 | 2,777 | 0.001801 | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for program related views.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from soc.models.document import Document
from tests.timeline_utils import TimelineHelper
from tests.test_utils import DjangoTestCase
from tests.profile_utils import GSoCProfileHelper
# TODO: perhaps we should move this out?
from soc.modules.seeder.logic.seeder import logic as seeder_logic
from soc.modules.seeder.logi | c.providers.string imp | ort DocumentKeyNameProvider
class EditProgramTest(DjangoTestCase):
"""Tests program edit page.
"""
def setUp(self):
self.init()
properties = {
'prefix': 'site',
'scope': self.site,
'read_access': 'public',
'key_name': DocumentKeyNameProvider(),
}
self.document = self.seed(Document, properties)
def testShowDocument(self):
url = '/gsoc/document/show/' + self.document.key().name()
response = self.client.get(url)
self.assertGSoCTemplatesUsed(response)
def testCreateDocumentRestriction(self):
# TODO(SRabbelier): test document ACL
pass
def testCreateDocument(self):
self.data.createHost()
url = '/gsoc/document/edit/gsoc_program/%s/doc' % self.gsoc.key().name()
response = self.client.get(url)
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/document/base.html')
self.assertTemplateUsed(response, 'v2/modules/gsoc/_form.html')
# test POST
override = {
'prefix': 'gsoc_program', 'scope': self.gsoc, 'link_id': 'doc',
'key_name': DocumentKeyNameProvider(), 'modified_by': self.data.user,
'home_for': None, 'author': self.data.user, 'is_featured': None,
'write_access': 'admin', 'read_access': 'public',
}
properties = seeder_logic.seed_properties(Document, properties=override)
postdata = properties.copy()
postdata['xsrf_token'] = self.getXsrfToken(url)
response = self.client.post(url, postdata)
self.assertResponseRedirect(response, url)
key_name = properties['key_name']
document = Document.get_by_key_name(key_name)
self.assertPropertiesEqual(properties, document)
|
kljensen/viff | viff/test/__init__.py | Python | gpl-3.0 | 764 | 0 | # Copyright 2007, 2008, 2009 VIFF Development Team.
#
# This file is part of VIFF, the Virtual Ideal Functionality Framework.
#
# VIFF is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser Gene | ral Public License (LGPL) as
# published by the Free Software Foundation, either version 3 o | f the
# License, or (at your option) any later version.
#
# VIFF is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with VIFF. If not, see <http://www.gnu.org/licenses/>.
|
anupamaloke/Dell-EMC-Ansible-Modules-for-iDRAC | library/dellemc_idrac_export_tsr.py | Python | gpl-3.0 | 5,134 | 0.001364 | #! /usr/bin/python
# _*_ coding: utf-8 _*_
#
# Dell EMC OpenManage Ansible Modules
#
# Copyright © 2017 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its
# subsidiaries. Other trademarks may be trademarks of their respective owners.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dellemc_idrac_export_tsr
short_description: Export TSR logs to a network share
version_added: "2.3"
description:
- Export TSR logs to a network share (CIFS, NFS)
options:
idrac_ip:
required: True
description:
- iDRAC IP Address
type: 'str'
idrac_user:
required: True
description:
- iDRAC user name
type: 'str'
idrac_pwd:
required: True
description:
- iDRAC user password
type: 'str'
idrac_port:
required: False
description:
- iDRAC port
default: 443
type: 'int'
share_name:
required: True
description:
- CIFS or NFS Network share
share_user:
required: True
description:
- Network share user in the format 'user@domain' if user is part of a domain else 'user'
type: 'str'
share_pwd:
required: True
description:
- Network share user password
type: 'str'
requirements: ['Dell EMC OpenManage Python SDK']
author: "anupam.aloke@dell.com"
'''
EXAMPLES = '''
---
# Export TSR to a CIFS Network Share
- name: Export TSR to a CIFS network share
dellemc_idrac_export_tsr:
idrac_ip: "192.168.1.1"
idrac_user: "root"
idrac_pwd: "calvin"
share_name: "\\\\192.168.10.10\\share"
share_user: "user1"
share_pwd: "password"
# Export TSR to a NFS Network Share
- name: Export TSR to a NFS network share
dellemc_idrac_export_tsr:
idrac_ip: "192.168.1.1"
idrac_user: "root"
idrac_pwd: "calvin"
share_name: "192.168.10.10:/share"
share_user: "user1"
share_pwd: "password"
'''
RETURN = '''
---
'''
from ansible.module_utils.dellemc_idrac import iDRACConnection
from ansible.module_utils.basic import AnsibleModule
try:
from omsdk.sdkcreds import UserCredentials
from omsdk.sdkfile import FileOnShare
HAS_OMSDK = True
except ImportError:
HAS_OMSDK = False
def export_tech_support_report(idrac, module):
"""
Export Tech Support Report (TSR)
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
msg = {}
msg['changed'] = False
msg['failed'] = False
err = False
try:
tsr_file_name_format = "%ip_%Y%m%d_%H%M%S_tsr.zip"
myshare = FileOnShare(remote=module.params['share_name'],
isFolder=True)
myshare.addcreds(UserCredentials(module.params['share_user'],
module.params['share_pwd']))
tsr_file_name = myshare.new_file(tsr_file_name_format)
msg['msg'] = idrac.confi | g_mgr.export_tsr(tsr_file_name)
if "Status" in msg['msg | '] and msg['msg']['Status'] != "Success":
msg['failed'] = True
except Exception as e:
err = True
msg['msg'] = "Error: %s" % str(e)
msg['failed'] = True
return msg, err
# Main
def main():
module = AnsibleModule(
argument_spec=dict(
# iDRAC Handle
idrac=dict(required=False, type='dict'),
# iDRAC Credentials
idrac_ip=dict(required=True, type='str'),
idrac_user=dict(required=True, type='str'),
idrac_pwd=dict(required=True, type='str', no_log=True),
idrac_port=dict(required=False, default=443, type='int'),
# Network file share
share_name=dict(required=True, type='str'),
share_pwd=dict(required=True, type='str', no_log=True),
share_user=dict(required=True, type='str')
),
supports_check_mode=True)
if not HAS_OMSDK:
module.fail_json(msg="Dell EMC OpenManage Python SDK required for this module")
# Connect to iDRAC
idrac_conn = iDRACConnection(module)
idrac = idrac_conn.connect()
# Export Tech Support Report (TSR)
msg, err = export_tech_support_report(idrac, module)
# Disconnect from iDRAC
idrac_conn.disconnect()
if err:
module.fail_json(**msg)
module.exit_json(**msg)
if __name__ == '__main__':
main()
|
bigfatpanda-training/pandas-practical-python-primer | training/level-2-command-line-interfaces/bfp-reference/exercise_07/file_ops.py | Python | artistic-2.0 | 593 | 0.001686 | """
This modules provides various functions for operating on files.
Functions:
copy_files: Copy file(s) to a specified location.
"""
import subproce | ss
def copy_files(files: list, destination: str):
"""
Copy files to a given destination.
Args:
files: A list of files to copy.
destination: A str specifying the destination for copied
files.
"""
for file in files:
operation_result = subprocess.check_output(
args=['cp', '-vp', file, destination],
stderr=subprocess.STDOUT)
print(operation_res | ult) |
SteveViss/readthedocs.org | readthedocs/rtd_tests/tests/test_version_windows.py | Python | mit | 3,398 | 0.000589 | import unittest
from readthedocs.projects.version_handling import version_windows
class TestVersionWindows(unittest.TestCase):
def setUp(self):
self.versions = [
'0.1.0',
'0.2.0',
'0.2.1',
'0.3.0',
'0.3.1',
'1.1.0',
'1.2.0',
'1.3.0',
'2.1.0',
'2.2.0',
'2.3.0',
'2.3.1',
'2.3.2',
'2.3.3',
'nonsense-version',
]
def test_major(self):
major_versions = version_windows(self.versions, major=1)
self.assertEqual(major_versions, ['2.3.3'])
major_versions = version_windows(self.versions, major=2)
self.assertEqual(major_versions, ['1.3.0', '2.3.3'])
major_versions = version_windows(self.versions, major=3)
self.assertEqual(major_versions, ['0.3.1', '1.3.0', '2.3.3'])
major_versions = version_windows(self.versions, major=4)
self.assertEqual(major_versions, ['0.3.1', '1.3.0', '2.3.3'])
def test_minor(self):
minor_versions = version_windows(self.versions, minor=1)
self.assertEqual(minor_versions, ['2.3.3'])
minor_versions = version_windows(self.versions, minor=2)
self.assertEqual(minor_versions, ['2.2.0', '2.3.3'])
minor_versions = version_windows(self.versions, minor=3)
self.assertEqual(minor_versions, ['2.1.0', '2.2.0', '2.3.3'])
minor_versions = version_windows(self.versions, minor=4)
self.assertEqual(minor_versions, ['2.1.0', '2.2.0', '2.3.3'])
def test_point(self):
point_versions = version_windows(self.versions, point=1)
self.assertEqual(point_versions, ['2.3.3'])
point_versions = version_windows(self.versions, point=2)
self.assertEqual(point_versions, ['2.3.2', '2.3.3'])
point_versions = version_windows(self.versions, point=3)
self.assertEqual(point_versions, ['2.3.1', '2.3.2', '2.3.3'])
point_versions = version_windows(self.versions, point=4)
self.assertEqual(point_versions, ['2.3.0', '2.3.1', '2.3.2', '2.3.3'])
point_versions = version_windows(self.versions, point=5)
self.assertEqual(point_versions, ['2.3.0', '2.3.1', '2.3.2', '2.3.3'])
def test_sort(self):
final_versions = version_windows(self.versions,
major=2, minor=2, point=1)
self.assertEqual(final_versions, ['1.2.0', '1.3.0', '2.2.0', '2.3.3'])
self.assertTrue('2.3.0' not in final_versions)
final_versions = version_windows(self.versions,
major=1, minor=2, point=2)
# There is no 1.x in this list.
# There are two 2.x versions.
# There are two point releases if available.
self.assertEqual(final_versions, ['2.2.0', '2.3.2', '2.3.3'])
final_versio | ns = version_windows(self.versions,
major=1, minor=2, point=3)
self.assertEqual(final_versions, ['2.2.0', | '2.3.1', '2.3.2', '2.3.3'])
final_versions = version_windows(['2.3.2', '2.2.0', '2.3.0', '2.3.3', '2.3.1'],
major=1, minor=2, point=3)
self.assertEqual(final_versions, ['2.2.0', '2.3.1', '2.3.2', '2.3.3'])
if __name__ == '__main__':
unittest.main()
|
ithinksw/philo | philo/models/fields/__init__.py | Python | isc | 4,971 | 0.033394 | from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import validate_slug
from django.db import models
from django.utils import simplejson as json
from django.utils.text import capfirst
from django.utils.translatio | n import ugettext_lazy as _
from philo.forms.fields import JSONFormField
from phi | lo.utils.registry import RegistryIterator
from philo.validators import TemplateValidator, json_validator
#from philo.models.fields.entities import *
class TemplateField(models.TextField):
"""A :class:`TextField` which is validated with a :class:`.TemplateValidator`. ``allow``, ``disallow``, and ``secure`` will be passed into the validator's construction."""
def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure))
class JSONDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ?
if self.field.name not in instance.__dict__:
json_string = getattr(instance, self.field.attname)
instance.__dict__[self.field.name] = json.loads(json_string)
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value))
def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None))
class JSONField(models.TextField):
"""A :class:`TextField` which stores its value on the model instance as a python object and stores its value in the database as JSON. Validated with :func:`.json_validator`."""
default_validators = [json_validator]
def get_attname(self):
return "%s_json" % self.name
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls)
def fix_init_kwarg(self, sender, args, kwargs, **signal_kwargs):
# Anything passed in as self.name is assumed to come from a serializer and
# will be treated as a json string.
if self.name in kwargs:
value = kwargs.pop(self.name)
# Hack to handle the xml serializer's handling of "null"
if value is None:
value = 'null'
kwargs[self.attname] = value
def formfield(self, *args, **kwargs):
kwargs["form_class"] = JSONFormField
return super(JSONField, self).formfield(*args, **kwargs)
class SlugMultipleChoiceField(models.Field):
"""Stores a selection of multiple items with unique slugs in the form of a comma-separated list. Also knows how to correctly handle :class:`RegistryIterator`\ s passed in as choices."""
__metaclass__ = models.SubfieldBase
description = _("Comma-separated slug field")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if not value:
return []
if isinstance(value, list):
return value
return value.split(',')
def get_prep_value(self, value):
return ','.join(value)
def formfield(self, **kwargs):
# This is necessary because django hard-codes TypedChoiceField for things with choices.
defaults = {
'widget': forms.CheckboxSelectMultiple,
'choices': self.get_choices(include_blank=False),
'label': capfirst(self.verbose_name),
'required': not self.blank,
'help_text': self.help_text
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
form_class = forms.TypedMultipleChoiceField
return form_class(**defaults)
def validate(self, value, model_instance):
invalid_values = []
for val in value:
try:
validate_slug(val)
except ValidationError:
invalid_values.append(val)
if invalid_values:
# should really make a custom message.
raise ValidationError(self.error_messages['invalid_choice'] % invalid_values)
def _get_choices(self):
if isinstance(self._choices, RegistryIterator):
return self._choices.copy()
elif hasattr(self._choices, 'next'):
choices, self._choices = itertools.tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^philo\.models\.fields\.SlugMultipleChoiceField"])
add_introspection_rules([], ["^philo\.models\.fields\.TemplateField"])
add_introspection_rules([], ["^philo\.models\.fields\.JSONField"]) |
leighpauls/k2cro4 | tools/telemetry/telemetry/platform.py | Python | bsd-3-clause | 672 | 0.010417 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Platform(object):
"""The platform that the target browser | is running on.
Provides a limited interface to obtain stats from the platform itself, where
possible.
"""
def GetSurfaceCollector(self, trace_tag):
"""Platforms may be able to collect GL surface stats."""
class StubSurfaceCollector(object):
def __init__(self, trace_tag):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
return StubSurfaceCo | llector(trace_tag)
|
gabriel-detassigny/kodi-synology-download | resources/lib/kodiutils.py | Python | mit | 1,657 | 0.000604 | # -*- coding: utf-8 -*-
import xbmc
import xbmcaddon
import re
import sys
import logging
if sys.version_info >= (2, 7):
import json as json
else:
import simplejson as json
# read settings
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(__name__)
def notification(header, message, time=5000, icon=ADDON.getAddonInfo('icon'), sound=True):
xbmcgui.Dialog().notification(header, | message, icon, time, sound)
def show_settings():
ADDON.openSettings()
def get_setting(setting):
return ADDON.getSetting(setting).strip().decode('utf-8')
def set_setting(setting, value):
ADDON.setSetting(setting, str(value))
def get_setting_as_bool(setting):
return get_setting(setting).lower() == "true"
def get_setting_as_float(setting):
try:
return float(get_se | tting(setting))
except ValueError:
return 0
def get_setting_as_int(setting):
try:
return int(get_setting_as_float(setting))
except ValueError:
return 0
def get_string(string_id):
return ADDON.getLocalizedString(string_id).encode('utf-8', 'ignore')
def kodi_json_request(params):
data = json.dumps(params)
request = xbmc.executeJSONRPC(data)
try:
response = json.loads(request)
except UnicodeDecodeError:
response = json.loads(request.decode('utf-8', 'ignore'))
try:
if 'result' in response:
return response['result']
return None
except KeyError:
logger.warn("[%s] %s" %
(params['method'], response['error']['message']))
return None
|
OCA/account-invoicing | account_invoice_pricelist/models/account_move.py | Python | agpl-3.0 | 9,635 | 0.000623 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class AccountMove(models.Model):
_inherit = "account.move"
pricelist_id = fields.Many2one(
comodel_name="product.pricelist",
string="Pricelist",
readonly=True,
states={"draft": [("readonly", False)]},
)
@api.constrains("pricelist_id", "currency_id")
def _check_currency(self):
for sel in self.filtered(lambda a: a.pricelist_id and a.is_invoice()):
if sel.pricelist_id.currency_id != sel.currency_id:
raise UserError(
_("Pricelist and Invoice need to use the same currency.")
)
@api.onchange("partner_id", "company_id")
def _onchange_partner_id_account_invoice_pricelist(self):
if self.is_invoice():
if (
self.partner_id
and self.move_type in ("out_invoice", "out_refund")
and self.partner_id.property_product_pricelist
):
self.pricelist_id = self.partner_id.property_product_pricelist
self._set_pricelist_currency()
@api.onchange("pricelist_id")
def _set_pricelist_currency(self):
if (
self.is_invoice()
and self.pricelist_id
and self.currency_id != self.pricelist_id.currency_id
):
self.currency_id = self.pricelist_id.currency_id
def button_update_prices_from_pricelist(self):
for inv in self.filtered(lambda r: r.state == "draft"):
inv.invoice_line_ids._onchange_product_id_account_invoice_pricelist()
self.filtered(lambda r: r.state == "draft").with_context(
check_move_validity=False
)._move_autocomplete_invoice_lines_values()
self.filtered(lambda r: r.state == "draft").with_context(
check_move_validity=False
)._recompute_tax_lines()
def _reverse_move_vals(self, default_values, cancel=True):
move_vals = super(AccountMove, self)._reverse_move_vals(
default_values, cancel=cancel
)
if self.pricelist_id:
move_vals["pricelist_id"] = self.pricelist_id.id
return move_vals
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
@api.onchange("product_id", "quantity")
def _onchange_product_id_account_invoice_pricelist(self):
for sel in self:
if not sel.move_id.pricelist_id:
return
sel.with_context(check_move_validity=False).update(
{"price_unit": sel._get_price_with_pricelist()}
)
@api.onchange("product_uom_id")
def _onchange_uom_id(self):
for sel in self:
if (
sel.move_id.is_invoice()
and sel.move_id.state == "draft"
and sel.move_id.pricelist_id
):
price_unit = sel._get_computed_price_unit()
taxes = sel._get_computed_taxes()
if taxes and sel.move_id.fiscal_position_id:
price_subtotal = sel._get_price_total_and_subtotal(
price_unit=price_unit, taxes=taxes
)["price_subtotal"]
accounting_vals = sel._get_fields_onchange_subtotal(
price_subtotal=price_subtotal,
currency=self.move_id.company_currency_id,
)
amount_currency = accounting_vals["amount_currency"]
price_unit = sel._get_fields_onchange_balance(
amount_currency=amount_currency
).get("price_unit", price_unit)
sel.with_context(check_move_validity=False).update(
{"price_unit": price_unit}
)
else:
super(AccountMoveLine, self)._onchange_uom_id()
def _get_real_price_currency(self, product, rule_id, qty, uom, pricelist_id):
PricelistItem = self.env["product.pricelist.item"]
field_name = "lst_price"
currency_id = None
product_currency = product.currency_id
if rule_id:
pricelist_item = PricelistItem.browse(rule_id)
while (
pricelist_item.base == "pricelist"
and pricelist_item.base_pricelist_id
and pricelist_item.base_pricelist_id.discount_policy
== "without_discount"
):
price, rule_id = pricelist_item.base_pricelist_id.with_context(
uom=uom.id
).get_product_price_rule(product, qty, self.move_id.partner_id)
pricelist_item = PricelistItem.browse(rule_id)
if pricelist_item.base == "standard_price":
field_name = "standard_price"
product_currency = product.cost_currency_id
elif (
pricelist_item.base == "pricelist" and pricelist_item.base_pricel | ist_id
):
f | ield_name = "price"
product = product.with_context(
pricelist=pricelist_item.base_pricelist_id.id
)
product_currency = pricelist_item.base_pricelist_id.currency_id
currency_id = pricelist_item.pricelist_id.currency_id
if not currency_id:
currency_id = product_currency
cur_factor = 1.0
else:
if currency_id.id == product_currency.id:
cur_factor = 1.0
else:
cur_factor = currency_id._get_conversion_rate(
product_currency,
currency_id,
self.company_id or self.env.company,
self.move_id.invoice_date or fields.Date.today(),
)
product_uom = self.env.context.get("uom") or product.uom_id.id
if uom and uom.id != product_uom:
uom_factor = uom._compute_price(1.0, product.uom_id)
else:
uom_factor = 1.0
return product[field_name] * uom_factor * cur_factor, currency_id
def _calculate_discount(self, base_price, final_price):
discount = (base_price - final_price) / base_price * 100
if (discount < 0 and base_price > 0) or (discount > 0 and base_price < 0):
discount = 0.0
return discount
def _get_price_with_pricelist(self):
price_unit = 0.0
if self.move_id.pricelist_id and self.product_id and self.move_id.is_invoice():
if self.move_id.pricelist_id.discount_policy == "with_discount":
product = self.product_id.with_context(
lang=self.move_id.partner_id.lang,
partner=self.move_id.partner_id.id,
quantity=self.quantity,
date_order=self.move_id.invoice_date,
date=self.move_id.invoice_date,
pricelist=self.move_id.pricelist_id.id,
product_uom_id=self.product_uom_id.id,
fiscal_position=(
self.move_id.partner_id.property_account_position_id.id
),
)
tax_obj = self.env["account.tax"]
recalculated_price_unit = (
product.price * self.product_id.uom_id.factor
) / (self.product_uom_id.factor or 1.0)
price_unit = tax_obj._fix_tax_included_price_company(
recalculated_price_unit,
product.taxes_id,
self.tax_ids,
self.company_id,
)
self.with_context(check_move_validity=False).discount = 0.0
else:
product_context = dict(
self.env.context,
partner_id=self.move_id.partner_id.id,
date=self.move_id.invoice_date or fields.Date.today(),
uom=self.product_uom_id.id,
)
final_price, rule_id = self.move_id |
daniel-j/lutris | lutris/installer/__init__.py | Python | gpl-3.0 | 42 | 0 | """Install scri | pt interpreter package.""" | |
eri-trabiccolo/exaile | xlgui/panel/radio.py | Python | gpl-2.0 | 20,745 | 0.00294 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU | General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and | Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import gio
import glib
import gobject
import gtk
import xl.radio, xl.playlist
from xl import (
event,
common,
settings,
trax
)
from xl.nls import gettext as _
import xlgui.panel.playlists as playlistpanel
from xlgui.panel import menus
from xlgui import (
guiutil,
icons,
panel
)
from xlgui.widgets.common import DragTreeView
from xlgui.widgets import dialogs
class RadioException(Exception): pass
class ConnectionException(RadioException): pass
class RadioPanel(panel.Panel, playlistpanel.BasePlaylistPanelMixin):
"""
The Radio Panel
"""
__gsignals__ = {
'playlist-selected': (gobject.SIGNAL_RUN_LAST, None, (object,)),
'append-items': (gobject.SIGNAL_RUN_LAST, None, (object, bool)),
'replace-items': (gobject.SIGNAL_RUN_LAST, None, (object,)),
'queue-items': (gobject.SIGNAL_RUN_LAST, None, (object,)),
}
__gsignals__.update(playlistpanel.BasePlaylistPanelMixin._gsignals_)
ui_info = ('radio.ui', 'RadioPanelWindow')
_radiopanel = None
def __init__(self, parent, collection,
radio_manager, station_manager, name):
"""
Initializes the radio panel
"""
panel.Panel.__init__(self, parent, name)
playlistpanel.BasePlaylistPanelMixin.__init__(self)
self.collection = collection
self.manager = radio_manager
self.playlist_manager = station_manager
self.nodes = {}
self.load_nodes = {}
self.complete_reload = {}
self.loaded_nodes = []
self._setup_tree()
self._setup_widgets()
self.playlist_image = icons.MANAGER.pixbuf_from_icon_name(
'music-library', gtk.ICON_SIZE_SMALL_TOOLBAR)
# menus
self.playlist_menu = menus.RadioPanelPlaylistMenu(self)
self.track_menu = menus.TrackPanelMenu(self)
self._connect_events()
self.load_streams()
RadioPanel._radiopanel = self
def load_streams(self):
"""
Loads radio streams from plugins
"""
for name in self.playlist_manager.playlists:
pl = self.playlist_manager.get_playlist(name)
if pl is not None:
self.playlist_nodes[pl] = self.model.append(self.custom,
[self.playlist_image,
pl.name, pl])
self._load_playlist_nodes(pl)
self.tree.expand_row(self.model.get_path(self.custom), False)
for name, value in self.manager.stations.iteritems():
self.add_driver(value)
def _add_driver_cb(self, type, object, driver):
glib.idle_add(self.add_driver, driver)
def add_driver(self, driver):
"""
Adds a driver to the radio panel
"""
node = self.model.append(self.radio_root, [self.folder, str(driver), driver])
self.nodes[driver] = node
self.load_nodes[driver] = self.model.append(node, [self.refresh_image,
_('Loading streams...'), None])
self.tree.expand_row(self.model.get_path(self.radio_root), False)
if settings.get_option('gui/radio/%s_station_expanded' %
driver.name, False):
self.tree.expand_row(self.model.get_path(node), False)
def _remove_driver_cb(self, type, object, driver):
glib.idle_add(self.remove_driver, driver)
def remove_driver(self, driver):
"""
Removes a driver from the radio panel
"""
if driver in self.nodes:
self.model.remove(self.nodes[driver])
del self.nodes[driver]
def _setup_widgets(self):
"""
Sets up the various widgets required for this panel
"""
self.status = self.builder.get_object('status_label')
@guiutil.idle_add()
def _set_status(self, message, timeout=0):
self.status.set_text(message)
if timeout:
glib.timeout_add_seconds(timeout, self._set_status, '', 0)
def _connect_events(self):
"""
Connects events used in this panel
"""
self.builder.connect_signals({
'on_add_button_clicked': self._on_add_button_clicked,
})
self.tree.connect('row-expanded', self.on_row_expand)
self.tree.connect('row-collapsed', self.on_collapsed)
self.tree.connect('row-activated', self.on_row_activated)
self.tree.connect('key-release-event', self.on_key_released)
event.add_callback(self._add_driver_cb, 'station_added',
self.manager)
event.add_callback(self._remove_driver_cb, 'station_removed',
self.manager)
def _on_add_button_clicked(self, *e):
dialog = dialogs.MultiTextEntryDialog(self.parent,
_("Add Radio Station"))
dialog.add_field(_("Name:"))
url_field = dialog.add_field(_("URL:"))
clipboard = gtk.clipboard_get()
text = clipboard.wait_for_text()
if text is not None:
location = gio.File(uri=text)
if location.get_uri_scheme() is not None:
url_field.set_text(text)
result = dialog.run()
dialog.hide()
if result == gtk.RESPONSE_OK:
(name, uri) = dialog.get_values()
self._do_add_playlist(name, uri)
@common.threaded
def _do_add_playlist(self, name, uri):
from xl import playlist, trax
if playlist.is_valid_playlist(uri):
pl = playlist.import_playlist(uri)
pl.name = name
else:
pl = playlist.Playlist(name)
tracks = trax.get_tracks_from_uri(uri)
pl.extend(tracks)
self.playlist_manager.save_playlist(pl)
self._add_to_tree(pl)
@guiutil.idle_add()
def _add_to_tree(self, pl):
self.playlist_nodes[pl] = self.model.append(self.custom,
[self.playlist_image, pl.name, pl])
self._load_playlist_nodes(pl)
def _setup_tree(self):
"""
Sets up the tree that displays the radio panel
"""
box = self.builder.get_object('RadioPanel')
self.tree = playlistpanel.PlaylistDragTreeView(self, True, True)
self.tree.set_headers_visible(False)
self.targets = [('text/uri-list', 0, 0)]
# columns
text = gtk.CellRendererText()
if settings.get_option('gui/ellipsize_text_in_panels', False):
import pango
text.set_property( 'ellipsize-set', True)
text.set_property( 'ellipsize', pango.ELLIPSIZE_END)
icon = gtk.CellRendererPixbuf()
col = gtk.TreeViewColumn('radio')
col.pack_start(icon, False)
col.pack_start(text, True)
col.set_attributes(icon, pixbuf=0)
col.set_cell_data_func(text, self.cell_data_func)
self.tree.append_column(col)
self.model = gtk.TreeStore(gtk.gdk.Pixbuf, str, object)
self.tree.se |
noisy/steemprojects.com | profiles/urls.py | Python | mit | 1,096 | 0.009124 | from django.conf.urls import url
from profiles import views
urlpatterns = [
url(
regex=r"^edit/$",
view=v | iews.ProfileEditUpdateView.as_view() | ,
name="profile_edit"
),
url(
regex="^confirm_role/(?P<membership_id>[-\w]+)/(?P<action>verify|deny)/$",
view=views.profile_confirm_role,
name="profile_confirm_role",
),
url(
regex="^deny_account/(?P<type_name>[\w]+)/(?P<account_name>[-\.\w]+)/$",
view=views.profile_deny_account,
name="profile_deny_account",
),
url(
regex="^confirm/$",
view=views.profile_confirm,
name="profile_confirm",
),
url(r"^$", views.profile_list, name="profile_list"),
url(r"^(?P<github_account>[-\w]+)/$", views.profile_detail, name="profile_detail"),
url(r"^github/(?P<github_account>[-\w]+)/$", views.profile_detail, name="github_profile_detail"),
url(r"^steem/(?P<steem_account>[-\.\w]+)/$", views.profile_detail, name="steem_profile_detail"),
url(r"^id/(?P<id>[-\w]+)/$", views.profile_detail, name="id_profile_detail"),
]
|
danielSbastos/gistified | migrations/versions/ace8d095a26b_.py | Python | mit | 769 | 0.006502 | """empty message
Revision ID: ace8d095a26b
Revises:
Create Date: 2017-10-01 20:39:54.984192
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ace8d095a26b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('gist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=120), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint( | 'id')
)
# | ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('gist')
# ### end Alembic commands ###
|
github/codeql | python/ql/test/query-tests/Security/CWE-326-WeakCryptoKey/weak_crypto.py | Python | mit | 2,155 | 0.003248 | from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptod | ome.PublicKey impo | rt DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
|
liqd/a4-meinberlin | meinberlin/apps/budgeting/migrations/0008_auto_20170529_1302.py | Python | agpl-3.0 | 663 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ckeditor.fields
from django | .db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_budgeting', '0007_update-strings'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='description',
field=ckeditor.fields.RichTextField(verbose_name='Description'),
),
migrations.AlterField(
model_name='proposal',
| name='name',
field=models.CharField(max_length=120, verbose_name='Name'),
),
]
|
bogobog/hierarchy_config_parser | bin/config_variable_processor/range_list.py | Python | gpl-2.0 | 299 | 0.070234 |
def range_list( parser, | *args ):
if len( args ) < 2:
raise Exception( 'Insufficient arguments.' )
if len( args ) == 3:
sep = args[2]
else:
sep = ' '
items = list( str( i ) for i in range( int( | args[0] ), int( args[1] ) + 1 ) )
return sep.join( items )
|
bairdj/beveridge | src/create_model.py | Python | mit | 3,408 | 0.006455 | import argparse
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
from beveridge.models import ModelStorage
import pickle
parser = argparse.ArgumentParser(description="Create model from CSV stats data.")
parser.add_argument('file')
parser.add_argument('outfile')
args = parser.parse_args()
#Create DataFrame in Pandas
data = pd.read_csv(args.file)
#Drop team
del data['team']
#Cleanse to numeric data
data = data.apply(lambda x: pd.to_numeric(x, errors='coerce'))
#Delete any completely empty columns
data = data.dropna(axis=1, how='all')
#Delete any rows with empty values
data = data.dropna(axis=0, how='any')
#Set up some columns
data['home'] = data['home'].astype('bool')
data['win'] = data['win'].astype('bool')
#Build relative columns
data['relRebounds'] = data['rebounds'] / data['oppRebounds']
data['relDisposals'] = data['disposals'] / data['oppDisposals']
data['relKicks'] = data['kicks'] / data['oppKicks']
data['relHandballs'] = data['handballs'] / data['oppHandballs']
data['relClearances'] = data['clearances'] / data['oppClearances']
data['relHitouts'] = data['hitouts'] / data['oppHitouts']
data['relMarks'] = data['marks'] / data['oppMarks']
data['relInside50s'] = data['inside50s'] / data['oppInside50s']
data['relTackles'] = data['tackles'] / data['oppTackles']
data['relClangers'] = data['clangers'] / data['oppClangers']
data['relFrees'] = data['frees'] / data['oppFrees']
data['relContested'] = data['contested'] / data['oppContested']
data['relUncontested'] = data['uncontested'] / data['oppUncontested']
data['relContestedMarks'] = data['contestedMarks'] / data['oppContestedMarks']
data['relMarksIn50'] = data['marksIn50'] / data['oppMarksIn50']
data['relOnePercenters'] = data['onePercenters'] / data['oppOnePercenters']
data['relBounces'] = data['bounces'] / data['oppBounces']
#Try building a logistic regression model
print("Building initial logistic regression model.")
model = LogisticRegression()
#Only use the relative columns. I've tested with the absolute values and they are much less useful than relative.
trainColumns = pd.Series(['relRebounds', 'relDisposals', 'relKicks', 'relHandballs', 'relClearances', 'relHitouts', 'relMarks', 'relInside50s', 'relTackles', 'relClangers', 'relFrees', 'relContested', 'relUncontested', 'relContestedMarks', 'relMarksIn50', 'relOnePercenters', 'relBounces', 'home'])
model.fit(data[trainColumns], data['win'])
print("Training data accuracy: {:%}".format(model.score(data[trainColumns], data['win'])))
#Recursive feature selection with cross-validation
print("Running feature selection.")
fs = RFECV(model)
fs.fit(data[trainColumns], data['win'])
print("Accuracy after feature selection: {:%}".format(fs.score(data[trainColumns], data['win'])))
filteredColumns = trainColumns[fs.support_]
#Ignoring filtered columns for the random forest. Seems to produce better results
#Create a random forest model
print("Building random forest")
rf = Ran | domForestClassifier(n_estimators=100, min_samples_split=0.02, class_weight='balanced')
rf.fit(data[trainColumns], data['win'])
print("Random forest accuracy: {:%}".form | at(rf.score(data[trainColumns], data['win'])))
#Save random forest model to given filename
with open(args.outfile, 'wb') as file:
storage = ModelStorage(trainColumns, rf)
pickle.dump(storage, file) |
mhbu50/erpnext | erpnext/manufacturing/doctype/production_plan_sales_order/production_plan_sales_order.py | Python | gpl-3.0 | 222 | 0.004505 | # Copyright (c) 2015, Frappe | Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from frappe.model.document import Document
class Produc | tionPlanSalesOrder(Document):
pass
|
shadowjig/ponos | initalize_db.py | Python | gpl-3.0 | 127 | 0 | from Ponos import init_db
from env_vars import *
import sql | ite3
import os
print(DB_PATH)
open | (DB_PATH, 'w').close()
init_db()
|
mauroalberti/simSurf | geosurf_pure/spatial.py | Python | gpl-3.0 | 67,189 | 0.037553 | # -*- coding: utf-8 -*-
from __future__ import division
from math import sqrt, floor, ceil, sin, cos, tan, radians, asin, acos, atan, atan2, degrees
import numpy as np
import copy
from .utils import array_from_function, almost_zero
from array_utils import point_solution, formula_to_grid
from .errors import AnaliticSurfaceIOException, AnaliticSurfaceCalcException
MINIMUM_SEPARATION_THRESHOLD = 1e-10
MINIMUM_VECTOR_MAGNITUDE = 1e-10
class Point2D( object ):
def __init__( self, x = np.nan, y = np.nan ):
self._x = x
self._y = y
def clone( self ):
return Point2D( self._x, self._y )
def distance( self, another ):
return sqrt( (self._x - another._x)**2 + (self._y - another._y)**2 )
def point_3d( self, z = 0.0 ):
return Point3D( self._x, self._y, z )
def traslate_with_vector(self, displacement_vector ):
return Point2D( self._x + displacement_vector._x , self._y + displacement_vector._y )
def is_coincident_with( self, another, tolerance = 1.0e-7 ):
if self.distance(another) > tolerance:
return False
else:
return True
def crs_project( self, srcCrs, destCrs ):
qgis_pt = qgs_point_2d( self._x, self._y )
destCrs_qgis_pt = project_qgs_point( qgis_pt, srcCrs, destCrs )
return Point2D( destCrs_qgis_pt.x(), destCrs_qgis_pt.y() )
class Segment2D( object ):
def __init__(self, start_pt_2d, end_pt_2d ):
self._start_pt = start_pt_2d.clone()
self._end_pt = end_pt_2d.clone()
def clone( self ):
return Segment2D( self._start_pt, self._end_pt )
def vector_2d( self ):
return Vector2D( self._end_pt._x - self._start_pt._x,
self._end_pt._y - self._start_pt._y )
def length_2d( self ):
return self._start_pt.distance( self._end_pt )
def delta_x( self ):
return self._end_pt._x - self._start_pt._x
def delta_y( self ):
return self._end_pt._y - self._start_pt._y
def scale( self, scale_factor ):
delta_x = self.delta_x() * scale_factor
delta_y = self.delta_y() * scale_factor
return Segment2D( self._start_pt, Point2D( self._start_pt._x + delta_x, self._start_pt._y + delta_y ) )
def segment_3d( self ):
return Segment3D( self._start_pt.point_3d(), self._end_pt.point_3d() )
def densify( self, densify_distance ):
assert densify_distance > 0.0
segment_length = self.length()
assert segment_length > 0.0
generator_vector = self.vector_2d().versor_2d().scale( densify_distance )
interpolated_line = Line2D( [ self._start_pt ] )
n = 0
while ( True ):
n += 1
new_pt = self._start_pt.traslate_with_vector( generator_vector.scale( n ) )
if self._start_pt.distance(new_pt) >= segment_length:
break
interpolated_line = interpolated_line.add_pt( new_pt )
interpolated_line = interpolated_line.add_pt( self._end_pt )
return interpolated_line
class Vector2D( object ):
def __init__(self, x = np.nan, y = np.nan ):
self._x = x
self._y = y
def clone(self):
return Vector2D( self._x, self._y )
def length( self ):
return sqrt( self._x * self._x + self._y * self._y )
def scale(self, scale_factor ):
return Vector2D( self._x * scale_factor, self._y * scale_factor )
def versor_2d( self ):
return self.scale( 1.0 / self.length() )
def add(self, another ):
return Vector2D( self._x + another._x, self._y + another._y )
def minus(self, another):
return self.add( another.scale(-1) )
def vector_3d( self, z = 0.0 ):
return Vector3D( self._x, self._y, z )
class Line2D( object ):
def __init__( self, pt_2d_list = [] ):
self._pts = [ pt_2d.clone() for pt_2d in pt_2d_list ]
def clone( self ):
return Line2D( self._pts )
def add_pt(self, pt_2d ):
return Line2D( self._pts + [ pt_2d ] )
def add_pts(self, pt_2d_list ):
return Line2D( self._pts + pt_2d_list )
def num_points( self ):
return len( self._pts )
def x_list(self):
return [pt._x for pt in self._pts ]
def y_list(self):
return [pt._y for pt in self._pts ]
def xy_lists( self ):
return self.x_list(), self.y_list()
def x_min( self ):
return min( self.x_list() )
def x_max(self):
return max( self.x_list() )
def y_min(self):
return min( self.y_list() )
def y_max(self):
return max( self.y_list() )
def remove_coincident_successive_points( self ):
assert self.num_points() > 0
new_line = Line2D( [ self._pts[ 0 ] ] )
for ndx in range(1, self.num_points() ):
if not self._pts[ndx].is_coincident_with( new_line._pts[-1] ):
new_line = new_line.add_pt( self._pts[ndx] )
return new_line
def to_segments( self ):
pts_pairs = zip( self._pts[:-1], self._pts[1:] )
return [ Segment2D( pt_a, pt_b ) for ( pt_a, pt_b ) in pts_pairs ]
def densify( self, sample_distance ):
assert sample_distance > 0.0
densified_line_list = [ segment.densify( sample_distance ) for segment in self.to_segments() ]
assert len( densified_line_list ) > 0
return MultiLine2D( densified_line_list ).to_line().remove_coincident_successive_points()
def length( self ):
length = 0.0
for ndx in range( self.num_points()-1 ):
length += self._pts[ndx].distance( self._pts[ndx+1] )
return length
def incremental_length( self ):
incremental_length_list = []
length = 0.0
incremental_length_list.append( length )
for ndx in range( self.num_points()-1 ):
length += self._pts[ndx].distance( self._pts[ndx+1] )
incremental_length_list.append( length )
return incremental_length_list
def | crs_project( self, srcCrs, destCrs ):
points = []
for point in self._pts:
destCrs_point = point.crs_project( srcCrs, destCrs )
points.append( destCrs_point )
return Line2D( points )
class MultiLine2D(object):
# MultiLine2D is a list of Line2D objects
|
def __init__( self, lines_list = [] ):
self._lines = [ line_2d.clone() for line_2d in lines_list ]
def add( self, line ):
return MultiLine2D( self._lines + [ line ] )
def clone( self ):
return MultiLine2D( self._lines )
def num_parts( self ):
return len( self._lines )
def num_points( s |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.