text stringlengths 4 1.02M | meta dict |
|---|---|
# Copyright 2014-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import backends
from .. import build
from .. import mesonlib
import uuid, os, sys
from ..coredata import MesonException
class XCodeBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.project_uid = self.environment.coredata.guid.replace('-', '')[:24]
self.project_conflist = self.gen_id()
self.indent = ' '
self.indent_level = 0
self.xcodetypemap = {'c' : 'sourcecode.c.c',
'a' : 'archive.ar',
'cc': 'sourcecode.cpp.cpp',
'cxx' : 'sourcecode.cpp.cpp',
'cpp' : 'sourcecode.cpp.cpp',
'c++' : 'sourcecode.cpp.cpp',
'm' : 'sourcecode.c.objc',
'mm' : 'sourcecode.cpp.objcpp',
'h' : 'sourcecode.c.h',
'hpp' : 'sourcecode.cpp.h',
'hxx' : 'sourcecode.cpp.h',
'hh' : 'sourcecode.cpp.hh',
'inc' : 'sourcecode.c.h',
'dylib' : 'compiled.mach-o.dylib',
'o' : 'compiled.mach-o.objfile',}
self.maingroup_id = self.gen_id()
self.all_id = self.gen_id()
self.all_buildconf_id = self.gen_id()
self.buildtypes = ['debug']
self.test_id = self.gen_id()
self.test_command_id = self.gen_id()
self.test_buildconf_id = self.gen_id()
def gen_id(self):
return str(uuid.uuid4()).upper().replace('-', '')[:24]
def get_target_dir(self, target):
dirname = os.path.join(target.get_subdir(), self.environment.coredata.get_builtin_option('buildtype'))
os.makedirs(os.path.join(self.environment.get_build_dir(), dirname), exist_ok=True)
return dirname
def write_line(self, text):
self.ofile.write(self.indent*self.indent_level + text)
if not text.endswith('\n'):
self.ofile.write('\n')
def generate(self, interp):
self.interpreter = interp
self.serialise_tests()
self.generate_filemap()
self.generate_buildmap()
self.generate_buildstylemap()
self.generate_build_phase_map()
self.generate_build_configuration_map()
self.generate_build_configurationlist_map()
self.generate_project_configurations_map()
self.generate_buildall_configurations_map()
self.generate_test_configurations_map()
self.generate_native_target_map()
self.generate_source_phase_map()
self.generate_target_dependency_map()
self.generate_pbxdep_map()
self.generate_containerproxy_map()
self.proj_dir = os.path.join(self.environment.get_build_dir(), self.build.project_name + '.xcodeproj')
os.makedirs(self.proj_dir, exist_ok=True)
self.proj_file = os.path.join(self.proj_dir, 'project.pbxproj')
self.ofile = open(self.proj_file, 'w')
self.generate_prefix()
self.generate_pbx_aggregate_target()
self.generate_pbx_build_file()
self.generate_pbx_build_style()
self.generate_pbx_container_item_proxy()
self.generate_pbx_file_reference()
self.generate_pbx_group()
self.generate_pbx_native_target()
self.generate_pbx_project()
self.generate_pbx_shell_build_phase()
self.generate_pbx_sources_build_phase()
self.generate_pbx_target_dependency()
self.generate_xc_build_configuration()
self.generate_xc_configurationList()
self.generate_suffix()
# for some reason, the entire file was not being flushed to the disk.
# closing it explicitly forces a flush and fixes the issue
self.ofile.close()
def get_xcodetype(self, fname):
return self.xcodetypemap[fname.split('.')[-1]]
def generate_filemap(self):
self.filemap = {} # Key is source file relative to src root.
self.target_filemap = {}
for name, t in self.build.targets.items():
for s in t.sources:
if isinstance(s, mesonlib.File):
s = os.path.join(s.subdir, s.fname)
self.filemap[s] = self.gen_id()
for o in t.objects:
if isinstance(o, str):
o = os.path.join(t.subdir, o)
self.filemap[o] = self.gen_id()
self.target_filemap[name] = self.gen_id()
def generate_buildmap(self):
self.buildmap = {}
for t in self.build.targets.values():
for s in t.sources:
s = os.path.join(s.subdir, s.fname)
self.buildmap[s] = self.gen_id()
for o in t.objects:
o = os.path.join(t.subdir, o)
if isinstance(o, str):
self.buildmap[o] = self.gen_id()
def generate_buildstylemap(self):
self.buildstylemap = {'debug' : self.gen_id()}
def generate_build_phase_map(self):
self.buildphasemap = {}
for t in self.build.targets:
self.buildphasemap[t] = self.gen_id()
def generate_build_configuration_map(self):
self.buildconfmap = {}
for t in self.build.targets:
bconfs = {'debug' : self.gen_id()}
self.buildconfmap[t] = bconfs
def generate_project_configurations_map(self):
self.project_configurations = {'debug' : self.gen_id()}
def generate_buildall_configurations_map(self):
self.buildall_configurations = {'debug' : self.gen_id()}
def generate_test_configurations_map(self):
self.test_configurations = {'debug' : self.gen_id()}
def generate_build_configurationlist_map(self):
self.buildconflistmap = {}
for t in self.build.targets:
self.buildconflistmap[t] = self.gen_id()
def generate_native_target_map(self):
self.native_targets = {}
for t in self.build.targets:
self.native_targets[t] = self.gen_id()
def generate_target_dependency_map(self):
self.target_dependency_map = {}
for tname, t in self.build.targets.items():
for target in t.link_targets:
self.target_dependency_map[(tname, target.get_basename())] = self.gen_id()
def generate_pbxdep_map(self):
self.pbx_dep_map = {}
for t in self.build.targets:
self.pbx_dep_map[t] = self.gen_id()
def generate_containerproxy_map(self):
self.containerproxy_map = {}
for t in self.build.targets:
self.containerproxy_map[t] = self.gen_id()
def generate_source_phase_map(self):
self.source_phase = {}
for t in self.build.targets:
self.source_phase[t] = self.gen_id()
def generate_pbx_aggregate_target(self):
self.ofile.write('\n/* Begin PBXAggregateTarget section */\n')
self.write_line('%s /* ALL_BUILD */ = {' % self.all_id)
self.indent_level+=1
self.write_line('isa = PBXAggregateTarget;')
self.write_line('buildConfigurationList = %s;' % self.all_buildconf_id)
self.write_line('buildPhases = (')
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level+=1
for t in self.build.targets:
self.write_line('%s /* PBXTargetDependency */,' % self.pbx_dep_map[t])
self.indent_level-=1
self.write_line(');')
self.write_line('name = ALL_BUILD;')
self.write_line('productName = ALL_BUILD;')
self.indent_level-=1
self.write_line('};')
self.write_line('%s /* RUN_TESTS */ = {' % self.test_id)
self.indent_level +=1
self.write_line('isa = PBXAggregateTarget;')
self.write_line('buildConfigurationList = %s;' % self.test_buildconf_id)
self.write_line('buildPhases = (')
self.indent_level+=1
self.write_line('%s /* test run command */,' % self.test_command_id)
self.indent_level-=1
self.write_line(');')
self.write_line('dependencies = (')
self.write_line(');')
self.write_line('name = RUN_TESTS;')
self.write_line('productName = RUN_TESTS;')
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End PBXAggregateTarget section */\n')
def generate_pbx_build_file(self):
self.ofile.write('\n/* Begin PBXBuildFile section */\n')
templ = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */; settings = { COMPILER_FLAGS = "%s"; }; };\n'
otempl = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */;};\n'
for t in self.build.targets.values():
for s in t.sources:
if isinstance(s, mesonlib.File):
s = s.fname
if isinstance(s, str):
s = os.path.join(t.subdir, s)
idval = self.buildmap[s]
fullpath = os.path.join(self.environment.get_source_dir(), s)
fileref = self.filemap[s]
fullpath2 = fullpath
compiler_args = ''
self.ofile.write(templ % (idval, fullpath, fileref, fullpath2, compiler_args))
for o in t.objects:
o = os.path.join(t.subdir, o)
idval = self.buildmap[o]
fileref = self.filemap[o]
fullpath = os.path.join(self.environment.get_source_dir(), o)
fullpath2 = fullpath
self.ofile.write(otempl % (idval, fullpath, fileref, fullpath2))
self.ofile.write('/* End PBXBuildFile section */\n')
def generate_pbx_build_style(self):
self.ofile.write('\n/* Begin PBXBuildStyle section */\n')
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */ = {\n' % (idval, name))
self.indent_level += 1
self.write_line('isa = PBXBuildStyle;\n')
self.write_line('buildSettings = {\n')
self.indent_level += 1
self.write_line('COPY_PHASE_STRIP = NO;\n')
self.indent_level -= 1
self.write_line('};\n')
self.write_line('name = "%s";\n' % name)
self.indent_level -= 1
self.write_line('};\n')
self.ofile.write('/* End PBXBuildStyle section */\n')
def generate_pbx_container_item_proxy(self):
self.ofile.write('\n/* Begin PBXContainerItemProxy section */\n')
for t in self.build.targets:
self.write_line('%s /* PBXContainerItemProxy */ = {' % self.containerproxy_map[t])
self.indent_level += 1
self.write_line('isa = PBXContainerItemProxy;')
self.write_line('containerPortal = %s /* Project object */;' % self.project_uid)
self.write_line('proxyType = 1;')
self.write_line('remoteGlobalIDString = %s;' % self.native_targets[t])
self.write_line('remoteInfo = "%s";' % t)
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End PBXContainerItemProxy section */\n')
def generate_pbx_file_reference(self):
self.ofile.write('\n/* Begin PBXFileReference section */\n')
src_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; fileEncoding = 4; name = "%s"; path = "%s"; sourceTree = SOURCE_ROOT; };\n'
for fname, idval in self.filemap.items():
fullpath = os.path.join(self.environment.get_source_dir(), fname)
xcodetype = self.get_xcodetype(fname)
name = os.path.split(fname)[-1]
path = fname
self.ofile.write(src_templ % (idval, fullpath, xcodetype, name, path))
target_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; path = %s; refType = %d; sourceTree = BUILT_PRODUCTS_DIR; };\n'
for tname, idval in self.target_filemap.items():
t = self.build.targets[tname]
fname = t.get_filename()
reftype = 0
if isinstance(t, build.Executable):
typestr = 'compiled.mach-o.executable'
path = t.get_filename()
elif isinstance(t, build.SharedLibrary):
# OSX has a completely different shared library
# naming scheme so do this manually.
typestr = self.get_xcodetype('dummy.dylib')
path = t.get_osx_filename()
else:
typestr = self.get_xcodetype(fname)
path = '"%s"' % t.get_filename()
self.ofile.write(target_templ % (idval, tname, typestr, path, reftype))
self.ofile.write('/* End PBXFileReference section */\n')
def generate_pbx_group(self):
groupmap = {}
target_src_map = {}
for t in self.build.targets:
groupmap[t] = self.gen_id()
target_src_map[t] = self.gen_id()
self.ofile.write('\n/* Begin PBXGroup section */\n')
sources_id = self.gen_id()
resources_id = self.gen_id()
products_id = self.gen_id()
self.write_line('%s = {' % self.maingroup_id)
self.indent_level+=1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level+=1
self.write_line('%s /* Sources */,' % sources_id)
self.write_line('%s /* Resources */,' % resources_id)
self.write_line('%s /* Products */,' % products_id)
self.indent_level-=1
self.write_line(');')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# Sources
self.write_line('%s /* Sources */ = {' % sources_id)
self.indent_level+=1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level+=1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (groupmap[t], t))
self.indent_level-=1
self.write_line(');')
self.write_line('name = Sources;')
self.write_line('sourcetree = "<group>";')
self.indent_level-=1
self.write_line('};')
self.write_line('%s /* Resources */ = {' % resources_id)
self.indent_level+=1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.write_line(');')
self.write_line('name = Resources;')
self.write_line('sourceTree = "<group>";')
self.indent_level-=1
self.write_line('};')
# Targets
for t in self.build.targets:
self.write_line('%s /* %s */ = {' % (groupmap[t], t))
self.indent_level+=1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level+=1
self.write_line('%s /* Source files */,' % target_src_map[t])
self.indent_level-=1
self.write_line(');')
self.write_line('name = "%s";' % t)
self.write_line('sourceTree = "<group>";')
self.indent_level-=1
self.write_line('};')
self.write_line('%s /* Source files */ = {' % target_src_map[t])
self.indent_level+=1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level+=1
for s in self.build.targets[t].sources:
s = os.path.join(s.subdir, s.fname)
if isinstance(s, str):
self.write_line('%s /* %s */,' % (self.filemap[s], s))
for o in self.build.targets[t].objects:
o = os.path.join(self.build.targets[t].subdir, o)
self.write_line('%s /* %s */,' % (self.filemap[o], o))
self.indent_level-=1
self.write_line(');')
self.write_line('name = "Source files";')
self.write_line('sourceTree = "<group>";')
self.indent_level-=1
self.write_line('};')
# And finally products
self.write_line('%s /* Products */ = {' % products_id)
self.indent_level+=1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level+=1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.target_filemap[t], t))
self.indent_level-=1
self.write_line(');')
self.write_line('name = Products;')
self.write_line('sourceTree = "<group>";')
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End PBXGroup section */\n')
def generate_pbx_native_target(self):
self.ofile.write('\n/* Begin PBXNativeTarget section */\n')
for tname, idval in self.native_targets.items():
t = self.build.targets[tname]
self.write_line('%s /* %s */ = {' % (idval, tname))
self.indent_level+=1
self.write_line('isa = PBXNativeTarget;')
self.write_line('buildConfigurationList = %s /* Build configuration list for PBXNativeTarget "%s" */;'\
% (self.buildconflistmap[tname], tname))
self.write_line('buildPhases = (')
self.indent_level+=1
self.write_line('%s /* Sources */,' % self.buildphasemap[tname])
self.indent_level-=1
self.write_line(');')
self.write_line('buildRules = (')
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level+=1
for lt in self.build.targets[tname].link_targets:
# NOT DOCUMENTED, may need to make different links
# to same target have different targetdependency item.
idval = self.pbx_dep_map[lt.get_id()]
self.write_line('%s /* PBXTargetDependency */,' % idval)
self.indent_level -=1
self.write_line(");")
self.write_line('name = "%s";' % tname)
self.write_line('productName = "%s";' % tname)
self.write_line('productReference = %s /* %s */;' % (self.target_filemap[tname], tname))
if isinstance(t, build.Executable):
typestr = 'com.apple.product-type.tool'
elif isinstance(t, build.StaticLibrary):
typestr = 'com.apple.product-type.library.static'
elif isinstance(t, build.SharedLibrary):
typestr = 'com.apple.product-type.library.dynamic'
else:
raise MesonException('Unknown target type for %s' % tname)
self.write_line('productType = "%s";' % typestr)
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End PBXNativeTarget section */\n')
def generate_pbx_project(self):
self.ofile.write('\n/* Begin PBXProject section */\n')
self.write_line('%s /* Project object */ = {' % self.project_uid)
self.indent_level += 1
self.write_line('isa = PBXProject;')
self.write_line('attributes = {')
self.indent_level += 1
self.write_line('BuildIndependentTargetsInParallel = YES;')
self.indent_level -= 1
self.write_line('};')
conftempl = 'buildConfigurationList = %s /* build configuration list for PBXProject "%s"*/;'
self.write_line(conftempl % (self.project_conflist, self.build.project_name))
self.write_line('buildSettings = {')
self.write_line('};')
self.write_line('buildStyles = (')
self.indent_level += 1
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */,' % (idval, name))
self.indent_level -= 1
self.write_line(');')
self.write_line('compatibilityVersion = "Xcode 3.2";')
self.write_line('hasScannedForEncodings = 0;')
self.write_line('mainGroup = %s;' % self.maingroup_id)
self.write_line('projectDirPath = "%s";' % self.build_to_src)
self.write_line('projectRoot = "";')
self.write_line('targets = (')
self.indent_level += 1
self.write_line('%s /* ALL_BUILD */,' % self.all_id)
self.write_line('%s /* RUN_TESTS */,' % self.test_id)
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.native_targets[t], t))
self.indent_level -= 1
self.write_line(');')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXProject section */\n')
def generate_pbx_shell_build_phase(self):
self.ofile.write('\n/* Begin PBXShellScriptBuildPhase section */\n')
self.write_line('%s = {' % self.test_command_id)
self.indent_level += 1
self.write_line('isa = PBXShellScriptBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.write_line(');')
self.write_line('inputPaths = (')
self.write_line(');')
self.write_line('outputPaths = (')
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.write_line('shellPath = /bin/sh;')
script_root = self.environment.get_script_dir()
test_script = os.path.join(script_root, 'meson_test.py')
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
cmd = [sys.executable, test_script, test_data, '--wd', self.environment.get_build_dir()]
cmdstr = ' '.join(["'%s'" % i for i in cmd])
self.write_line('shellScript = "%s";' % cmdstr)
self.write_line('showEnvVarsInLog = 0;')
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End PBXShellScriptBuildPhase section */\n')
def generate_pbx_sources_build_phase(self):
self.ofile.write('\n/* Begin PBXSourcesBuildPhase section */\n')
for name, phase_id in self.source_phase.items():
self.write_line('%s /* Sources */ = {' % self.buildphasemap[name])
self.indent_level+=1
self.write_line('isa = PBXSourcesBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.indent_level+=1
for s in self.build.targets[name].sources:
s = os.path.join(s.subdir, s.fname)
if not self.environment.is_header(s):
self.write_line('%s /* %s */,' % (self.buildmap[s], os.path.join(self.environment.get_source_dir(), s)))
self.indent_level-=1
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End PBXSourcesBuildPhase section */\n')
def generate_pbx_target_dependency(self):
self.ofile.write('\n/* Begin PBXTargetDependency section */\n')
for t in self.build.targets:
idval = self.pbx_dep_map[t] # VERIFY: is this correct?
self.write_line('%s /* PBXTargetDependency */ = {' % idval)
self.indent_level += 1
self.write_line('isa = PBXTargetDependency;')
self.write_line('target = %s /* %s */;' % (self.native_targets[t], t))
self.write_line('targetProxy = %s /* PBXContainerItemProxy */;' % self.containerproxy_map[t])
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End PBXTargetDependency section */\n')
def generate_xc_build_configuration(self):
self.ofile.write('\n/* Begin XCBuildConfiguration section */\n')
# First the setup for the toplevel project.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.project_configurations[buildtype], buildtype))
self.indent_level+=1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level+=1
self.write_line('ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";')
self.write_line('ONLY_ACTIVE_ARCH = YES;')
self.write_line('SDKROOT = "macosx";')
self.write_line('SYMROOT = "%s/build";' % self.environment.get_build_dir())
self.indent_level-=1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level-=1
self.write_line('};')
# Then the all target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level+=1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = ALL_BUILD;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level-=1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level-=1
self.write_line('};')
# Then the test target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.test_configurations[buildtype], buildtype))
self.indent_level+=1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = RUN_TESTS;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level-=1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level-=1
self.write_line('};')
# Now finally targets.
langnamemap = {'c' : 'C', 'cpp' : 'CPLUSPLUS', 'objc' : 'OBJC', 'objcpp' : 'OBJCPLUSPLUS'}
for target_name, target in self.build.targets.items():
for buildtype in self.buildtypes:
dep_libs = []
links_dylib = False
headerdirs = []
for d in target.include_dirs:
for sd in d.incdirs:
cd = os.path.join(d.curdir, sd)
headerdirs.append(os.path.join(self.environment.get_source_dir(), cd))
headerdirs.append(os.path.join(self.environment.get_build_dir(), cd))
for l in target.link_targets:
abs_path = os.path.join(self.environment.get_build_dir(),
l.subdir, buildtype, l.get_osx_filename())
dep_libs.append("'%s'" % abs_path)
if isinstance(l, build.SharedLibrary):
links_dylib = True
if links_dylib:
dep_libs = ['-Wl,-search_paths_first', '-Wl,-headerpad_max_install_names'] + dep_libs
dylib_version = None
if isinstance(target, build.SharedLibrary):
ldargs = ['-dynamiclib', '-Wl,-headerpad_max_install_names'] + dep_libs
install_path = os.path.join(self.environment.get_build_dir(), target.subdir, buildtype)
dylib_version = target.version
else:
ldargs = dep_libs
install_path = ''
if dylib_version is not None:
product_name = target.get_basename() + '.' + dylib_version
else:
product_name = target.get_basename()
ldargs += target.link_args
ldstr = ' '.join(ldargs)
valid = self.buildconfmap[target_name][buildtype]
langargs = {}
for lang in self.environment.coredata.compilers:
if lang not in langnamemap:
continue
gargs = self.build.global_args.get(lang, [])
targs = target.get_extra_args(lang)
args = gargs + targs
if len(args) > 0:
langargs[langnamemap[lang]] = args
symroot = os.path.join(self.environment.get_build_dir(), target.subdir)
self.write_line('%s /* %s */ = {' % (valid, buildtype))
self.indent_level+=1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
if dylib_version is not None:
self.write_line('DYLIB_CURRENT_VERSION = "%s";' % dylib_version)
self.write_line('EXECUTABLE_PREFIX = "%s";' % target.prefix)
if target.suffix == '':
suffix = ''
else:
suffix = '.' + target.suffix
self.write_line('EXECUTABLE_SUFFIX = "%s";' % suffix)
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = YES;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
if len(headerdirs) > 0:
quotedh = ','.join(['"\\"%s\\""' % i for i in headerdirs])
self.write_line('HEADER_SEARCH_PATHS=(%s);' % quotedh)
self.write_line('INSTALL_PATH = "%s";' % install_path)
self.write_line('LIBRARY_SEARCH_PATHS = "";')
if isinstance(target, build.SharedLibrary):
self.write_line('LIBRARY_STYLE = DYNAMIC;')
for langname, args in langargs.items():
argstr = ' '.join(args)
self.write_line('OTHER_%sFLAGS = "%s";' % (langname, argstr))
self.write_line('OTHER_LDFLAGS = "%s";' % ldstr)
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = %s;' % product_name)
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % symroot)
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level-=1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level-=1
self.write_line('};')
self.ofile.write('/* End XCBuildConfiguration section */\n')
def generate_xc_configurationList(self):
self.ofile.write('\n/* Begin XCConfigurationList section */\n')
self.write_line('%s /* Build configuration list for PBXProject "%s" */ = {' % (self.project_conflist, self.build.project_name))
self.indent_level+=1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level+=1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.project_configurations[buildtype], buildtype))
self.indent_level-=1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level-=1
self.write_line('};')
# Now the all target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.all_buildconf_id)
self.indent_level+=1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level+=1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level-=1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level-=1
self.write_line('};')
# Test target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.test_buildconf_id)
self.indent_level+=1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level+=1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.test_configurations[buildtype], buildtype))
self.indent_level-=1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level-=1
self.write_line('};')
for target_name in self.build.targets:
listid = self.buildconflistmap[target_name]
self.write_line('%s /* Build configuration list for PBXNativeTarget "%s" */ = {' % (listid, target_name))
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
typestr = 'debug'
idval = self.buildconfmap[target_name][typestr]
self.write_line('%s /* %s */,' % (idval, typestr))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = "%s";' % typestr)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End XCConfigurationList section */\n')
def generate_prefix(self):
self.ofile.write('// !$*UTF8*$!\n{\n')
self.indent_level += 1
self.write_line('archiveVersion = 1;\n')
self.write_line('classes = {\n')
self.write_line('};\n')
self.write_line('objectVersion = 46;\n')
self.write_line('objects = {\n')
self.indent_level += 1
def generate_suffix(self):
self.indent_level -= 1
self.write_line('};\n')
self.write_line('rootObject = ' + self.project_uid + ';')
self.indent_level -= 1
self.write_line('}\n')
| {
"content_hash": "f15f6e71d0d9af670ae06b76505815a1",
"timestamp": "",
"source": "github",
"line_count": 783,
"max_line_length": 161,
"avg_line_length": 47.04214559386973,
"alnum_prop": 0.5484878101753814,
"repo_name": "winksaville/meson",
"id": "eb8b0b9da5d2ff67f6a4bb3a326d4bb087a4bae8",
"size": "36834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/backend/xcodebackend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "184"
},
{
"name": "C",
"bytes": "35142"
},
{
"name": "C#",
"bytes": "631"
},
{
"name": "C++",
"bytes": "12658"
},
{
"name": "Emacs Lisp",
"bytes": "1226"
},
{
"name": "FORTRAN",
"bytes": "1359"
},
{
"name": "Groff",
"bytes": "175"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "519"
},
{
"name": "Lex",
"bytes": "110"
},
{
"name": "Objective-C",
"bytes": "462"
},
{
"name": "Objective-C++",
"bytes": "87"
},
{
"name": "Protocol Buffer",
"bytes": "46"
},
{
"name": "Python",
"bytes": "680054"
},
{
"name": "Rust",
"bytes": "372"
},
{
"name": "Shell",
"bytes": "2040"
},
{
"name": "Swift",
"bytes": "835"
},
{
"name": "Vala",
"bytes": "3274"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
import subprocess, time, sys
from copy import deepcopy
clear = 'cls'
if sys.platform != 'win32':
clear = 'clear'
class ParseError(Exception):
def __init__(self, message='Invalid input.'):
Exception.__init__(self, message)
class Stack(object):
"""A database of pages to display pages to the user.
Public methods:
push ----- Go to another page
back ----- Go to the previous page
Calling the menu calls the last page on the page stack.
The push and back methods are intended to be used right before
calling the menu again.
"""
def __init__(self):
"""Create a Menu object."""
self._pages = []
@property
def pages(self):
return self._pages[:]
#-----Public methods-----
def push(self, page):
"""Push the specified page to the page stack.
Arguments:
page ----- page object
Side Effects:
Modifies the private pages property.
"""
if not callable(page):
raise TypeError('Only callable objects can be ' +
'added to the page stack.')
self._pages.append(page)
def back(self):
"""Remove the last page from the page stack.
Side Effects:
Modifies the private pages property.
"""
self._pages = self._pages[:-1]
def home(self):
"""Remove all pages from the page stack except for the first.
Side Effects:
Modifies the private pages property."""
self._pages = self._pages[:1]
#-----Magic methods-----
def __call__(self):
self._pages[-1]()
def __getitem__(self, i):
return deepcopy(self._pages[i])
class Page(object):
""""""
def __init__(self, title, body, options, order,
parse=lambda x: (x, (), {})):
"""Create a page object.
Arguments:
title ----- A string that let's the user know what the
page is about.
body ------- A string that gives the user any information.
options ---- a dictionary of option objects where each key is
the relative item's (option object's) key
order ------ An ordered container object as the order in which
to display the options to the page
Keyword Arguments:
parse ------ A callable object that parses the data entered by
the user when the page is called.
It should return an a key to access one of the page's options,
and arguments and keyword arguments to call the option with.
It should raise a ParseError if the data entered by the user
is invalid.
"""
self._title = title
self._body = body
self._options = options
self._order = order
self._parse = parse
#-----Public properties-----
@property
def options(self):
return self._options.copy()
@property
def order(self):
return self._order[:]
@property
def title(self):
return self._title
@property
def body(self):
return self._body
#-----Public property prescriptors-----
@options.setter
def options(self, other):
for key, value in other.iteritems():
if not callable(item):
raise TypeError(
'{} must be a callable object'.format(type(item)))
self._options = options
@order.setter
def order(self, other):
for item in other:
if item not in self._options.keys():
raise TypeError('{} must be a valid option key.'.format(item))
self._order = other
@body.setter
def body(self, other):
if not hasattr(other, '__str__'):
raise TypeError('{} must be a string'.format(other))
self._body = other.__str__()
#-----Magic methods-----
def __str__(self):
s = ''
if self._title:
s += '[{}]\n\n'.format(self.title)
if self._body:
s += self._body + '\n\n'
for key in self._order:
s += self.options[key].__str__() + '\n'
return s
def __call__(self):
key = ''
i = len(self.body)
while True:
subprocess.call(clear, shell=True)
print self
if len(self.body) > i:
self.body = self.body[:i]
try:
key, args, kwargs = self._parse(raw_input('> '))
return self.options[key](*args, **kwargs)
except ParseError as e:
self.body += '\n\n'+e.args[0]
class Option(object):
"""An option to display and call.
Options can be displayed to the user by printing them. They can also be
called for their functionality.
Public Properties:
key ---- the string that should be used as a key to access
the option when creating an option dictionary.
name --- the string display to the user as the option's name."""
def __init__(self, key, name, function):
"""Create an option object
Arguments:
key --------- a string to access the option from a
dictionary.
name -------- a string to display as the option name.
function ---- a callable object that gives the option
functionality.
"""
# Defensive programming
try:
assert type(key) in (str, int, float, long), TypeError(
'key must be a string or number')
assert type(name) is str, TypeError('name must be a string')
assert callable(function), TypeError(
'function must be a callable object')
except AssertionError as e:
raise e.args[0]
self._key = key
self._name = name
self.__function = function
#-----Public properties-----
# Immutable
@property
def key(self):
return self._key
@property
def name(self):
return self._name
#-----Private properties-----
@property
def _function(self):
# The functionality of the option
return self.__function
#-----Magic methods-----
def __str__(self):
return '> {0.key} - {0.name}'.format(self)
def __call__(self, *args, **kwargs):
return self._function(*args, **kwargs)
| {
"content_hash": "429d7fbe5d3bc949460c5b7844419db3",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 78,
"avg_line_length": 27.82127659574468,
"alnum_prop": 0.532425818293056,
"repo_name": "josieheartthrob/guescii",
"id": "516f515e8e734341d835148e239aa801ac38747c",
"size": "6538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shellpages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30316"
}
],
"symlink_target": ""
} |
from django.db import models
from django.dispatch import receiver
from django.db.models.signals import pre_delete, post_save, m2m_changed, post_delete
from website.utils.fileutils import UniquePathAndRename
from image_cropping import ImageRatioField
from .project import Project
from .sponsor import Sponsor
class Banner(models.Model):
FRONTPAGE = "FRONTPAGE"
PEOPLE = "PEOPLE"
PUBLICATIONS = "PUBLICATIONS"
TALKS = "TALKS"
PROJECTS = "PROJECTS"
INDPROJECT = "INDPROJECT"
NEWSLISTING = "NEWSLISTING"
VIDEOS = "VIDEOS"
PAGE_CHOICES = (
(FRONTPAGE, "Front Page"),
(NEWSLISTING, "News Listings"),
(PEOPLE, "People"),
(PUBLICATIONS, "Publications"),
(TALKS, "Talks"),
(PROJECTS, "Projects"),
(INDPROJECT, "Ind_Project"),
(VIDEOS, "Videos")
)
page = models.CharField(max_length=50, choices=PAGE_CHOICES, default=FRONTPAGE)
image = models.ImageField(blank=True, upload_to=UniquePathAndRename("banner", True), max_length=255)
# This field is only needed if the banner has been assigned to a specific project. The field is used by project_ind to select project specific banners so we don't have to add each project to the PAGE_CHOICES dictionary.
project = models.ForeignKey(Project, blank=True, null=True, on_delete=models.CASCADE)
project.help_text = "If this banner is for a specific project, set the page to Ind_Project. You must also set this field to the desired project for your banner to be displayed on that projects page."
# def image_preview(self):
# if self.image:
# return u'<img src="%s" style="width:100%%"/>' % self.image.url
# else:
# return '(Please upload an image)'
# image_preview.short_description = 'Image Preview'
# image_preview.allow_tags = True
cropping = ImageRatioField('image', '2000x500', free_crop=True)
image.help_text = 'You must select "Save and continue editing" at the bottom of the page after uploading a new image for cropping. Please note that since we are using a responsive design with fixed height banners, your selected image may appear differently on various screens.'
title = models.CharField(max_length=50, blank=True, null=True)
caption = models.CharField(max_length=1024, blank=True, null=True)
alt_text = models.CharField(max_length=1024, blank=True, null=True)
link = models.CharField(max_length=1024, blank=True, null=True)
favorite = models.BooleanField(default=False)
favorite.help_text = 'Check this box if this image should appear before other (non-favorite) banner images on the same page.'
date_added = models.DateField(auto_now=True)
def admin_thumbnail(self):
if self.image:
return u'<img src="%s" height="100"/>' % (self.image.url)
else:
return "No image found"
admin_thumbnail.short_description = 'Thumbnail'
admin_thumbnail.allow_tags = True
def __str__(self):
return "Title={} Page={} Project={}".format(self.title, self.page, self.project)
@receiver(pre_delete, sender=Banner)
def banner_delete(sender, instance, **kwargs):
if instance.image:
instance.image.delete(False) | {
"content_hash": "c18009289d0bcfeacdd85496fbf9aa99",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 281,
"avg_line_length": 47.470588235294116,
"alnum_prop": 0.6920693928128873,
"repo_name": "jonfroehlich/makeabilitylabwebsite",
"id": "3300f0ce5907d503b6f50a7d4c2d5d98b14a95a9",
"size": "3228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/models/banner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "11651"
},
{
"name": "CSS",
"bytes": "38643"
},
{
"name": "Dockerfile",
"bytes": "2981"
},
{
"name": "HTML",
"bytes": "161157"
},
{
"name": "JavaScript",
"bytes": "121316"
},
{
"name": "PHP",
"bytes": "11467"
},
{
"name": "Python",
"bytes": "184535"
},
{
"name": "Shell",
"bytes": "1095"
},
{
"name": "TeX",
"bytes": "1338"
}
],
"symlink_target": ""
} |
"""Provides device conditions for sensors."""
from typing import Dict, List
import voluptuous as vol
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ABOVE,
CONF_BELOW,
CONF_ENTITY_ID,
CONF_TYPE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CO,
DEVICE_CLASS_CO2,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_POWER_FACTOR,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
DEVICE_CLASS_VOLTAGE,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv
from homeassistant.helpers.entity_registry import (
async_entries_for_device,
async_get_registry,
)
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_IS_BATTERY_LEVEL = "is_battery_level"
CONF_IS_CO = "is_carbon_monoxide"
CONF_IS_CO2 = "is_carbon_dioxide"
CONF_IS_CURRENT = "is_current"
CONF_IS_ENERGY = "is_energy"
CONF_IS_HUMIDITY = "is_humidity"
CONF_IS_ILLUMINANCE = "is_illuminance"
CONF_IS_POWER = "is_power"
CONF_IS_POWER_FACTOR = "is_power_factor"
CONF_IS_PRESSURE = "is_pressure"
CONF_IS_SIGNAL_STRENGTH = "is_signal_strength"
CONF_IS_TEMPERATURE = "is_temperature"
CONF_IS_TIMESTAMP = "is_timestamp"
CONF_IS_VOLTAGE = "is_voltage"
CONF_IS_VALUE = "is_value"
ENTITY_CONDITIONS = {
DEVICE_CLASS_BATTERY: [{CONF_TYPE: CONF_IS_BATTERY_LEVEL}],
DEVICE_CLASS_CO: [{CONF_TYPE: CONF_IS_CO}],
DEVICE_CLASS_CO2: [{CONF_TYPE: CONF_IS_CO2}],
DEVICE_CLASS_CURRENT: [{CONF_TYPE: CONF_IS_CURRENT}],
DEVICE_CLASS_ENERGY: [{CONF_TYPE: CONF_IS_ENERGY}],
DEVICE_CLASS_HUMIDITY: [{CONF_TYPE: CONF_IS_HUMIDITY}],
DEVICE_CLASS_ILLUMINANCE: [{CONF_TYPE: CONF_IS_ILLUMINANCE}],
DEVICE_CLASS_POWER: [{CONF_TYPE: CONF_IS_POWER}],
DEVICE_CLASS_POWER_FACTOR: [{CONF_TYPE: CONF_IS_POWER_FACTOR}],
DEVICE_CLASS_PRESSURE: [{CONF_TYPE: CONF_IS_PRESSURE}],
DEVICE_CLASS_SIGNAL_STRENGTH: [{CONF_TYPE: CONF_IS_SIGNAL_STRENGTH}],
DEVICE_CLASS_TEMPERATURE: [{CONF_TYPE: CONF_IS_TEMPERATURE}],
DEVICE_CLASS_TIMESTAMP: [{CONF_TYPE: CONF_IS_TIMESTAMP}],
DEVICE_CLASS_VOLTAGE: [{CONF_TYPE: CONF_IS_VOLTAGE}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_IS_VALUE}],
}
CONDITION_SCHEMA = vol.All(
cv.DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(
[
CONF_IS_BATTERY_LEVEL,
CONF_IS_CO,
CONF_IS_CO2,
CONF_IS_CURRENT,
CONF_IS_ENERGY,
CONF_IS_HUMIDITY,
CONF_IS_ILLUMINANCE,
CONF_IS_POWER,
CONF_IS_POWER_FACTOR,
CONF_IS_PRESSURE,
CONF_IS_SIGNAL_STRENGTH,
CONF_IS_TEMPERATURE,
CONF_IS_TIMESTAMP,
CONF_IS_VOLTAGE,
CONF_IS_VALUE,
]
),
vol.Optional(CONF_BELOW): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_ABOVE): vol.Any(vol.Coerce(float)),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions."""
conditions: List[Dict[str, str]] = []
entity_registry = await async_get_registry(hass)
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = DEVICE_CLASS_NONE
state = hass.states.get(entry.entity_id)
unit_of_measurement = (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if state else None
)
if not state or not unit_of_measurement:
continue
if ATTR_DEVICE_CLASS in state.attributes:
device_class = state.attributes[ATTR_DEVICE_CLASS]
templates = ENTITY_CONDITIONS.get(
device_class, ENTITY_CONDITIONS[DEVICE_CLASS_NONE]
)
conditions.extend(
{
**template,
"condition": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for template in templates
)
return conditions
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Evaluate state based on configuration."""
if config_validation:
config = CONDITION_SCHEMA(config)
numeric_state_config = {
condition.CONF_CONDITION: "numeric_state",
condition.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
if CONF_ABOVE in config:
numeric_state_config[condition.CONF_ABOVE] = config[CONF_ABOVE]
if CONF_BELOW in config:
numeric_state_config[condition.CONF_BELOW] = config[CONF_BELOW]
return condition.async_numeric_state_from_config(numeric_state_config)
async def async_get_condition_capabilities(hass, config):
"""List condition capabilities."""
state = hass.states.get(config[CONF_ENTITY_ID])
unit_of_measurement = (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if state else None
)
if not state or not unit_of_measurement:
raise InvalidDeviceAutomationConfig(
"No state or unit of measurement found for "
f"condition entity {config[CONF_ENTITY_ID]}"
)
return {
"extra_fields": vol.Schema(
{
vol.Optional(
CONF_ABOVE, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(
CONF_BELOW, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
}
)
}
| {
"content_hash": "a8bc68658f6f3b5822116c28dbfdfb6a",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 77,
"avg_line_length": 32.32142857142857,
"alnum_prop": 0.6176795580110497,
"repo_name": "partofthething/home-assistant",
"id": "e2efac7b141f69d26ea49c7abe141b6dc5e2565f",
"size": "6335",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/device_condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import numpy as np
import pickle
import cv2
import cv2.cv as cv
import time
import sys
from sklearn.externals import joblib
caffe_root = '/home/hayden/caffe-recurrent/' # this file is expected to be in {caffe_root}/examples
sys.path.insert(0, caffe_root + 'python')
import caffe
def main():
layers = ['pool5']
vid_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/VID/'
feat_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/FEATURES/VGG16/RAW/'
classifications_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/CLASSIFICATIONS/FC/001/'
classifier_path = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/SVM2/'
video_name = 'AUSO_2014_M_SF_Nadal_Federer'
start = None # if none start from the beginning
end = None # if none end at end of video
fps = 1
max_frames_in_file = 25000 # so not massive files
# MODEL_FILE = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/001/VGG_Tennis_deploy.prototxt'
# PRETRAINED = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/001/VGG_Tennis_001_iter_20000.caffemodel'
MODEL_FILE = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/006/deploy.prototxt'
PRETRAINED = '/media/hayden/Storage/DATASETS/SPORT/TENNIS01/MODELS/FC/006/snapshot_iter_65900.caffemodel'
caffe.set_mode_gpu() # GPU MODE
net = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=(1, 1))
classify(vid_path, video_name, classifications_path, feat_path, classifier_path, layers, start, end, fps, max_frames_in_file,net)
#######################################################################################################################
def classify(vid_path, video_name, classifications_path, feat_path, classifier_path, layers, start, end, fps, max_frames_in_file,net):
# load net and video to extract features
capture = cv2.VideoCapture(vid_path + video_name)
if start is not None:
capture.set(cv.CV_CAP_PROP_POS_FRAMES, start)
else:
start = 0
if end is None:
end = int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT))
total = end-start
for layer in layers:
start_time = time.clock()
events = ['Nothing','Hit','Serve','Nadal','Federer','Forehand','Backhand']
labels = np.zeros((len(events),total),dtype=np.uint8)
# # load classifier
# classifier_names = ['OTHERvHITvSERVE', 'NADALvFEDERER', 'FOREHANDvBACKHAND']
# classifiers = []
# for classifier_name in classifier_names:
# print 'Loading Classifier: '+classifier_path+classifier_name+'_'+layer+'.pkl'
# classifiers.append(joblib.load(classifier_path+classifier_name+'_'+layer+'.pkl'))
# load features
for current in range(start,end):
if current%max_frames_in_file == 0:
a = current
if (current+max_frames_in_file)>int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT)):
b = int(capture.get(cv.CV_CAP_PROP_FRAME_COUNT))
else:
b = current+max_frames_in_file
print 'Loading features: '+feat_path+video_name+'/'+layer+'_'+str(a)+'_'+str(b-1)
with open(feat_path+video_name+'/'+layer+'_'+str(a)+'_'+str(b-1), 'rb') as f:
features = pickle.load(f)
print '=============='
print current
test_features_in = np.reshape(features[int(current%max_frames_in_file)],(1,1,1,len(features[int(current%max_frames_in_file)])))
print net.predict(test_features_in,oversample=False)
# for c in range(len(classifier_names)):
# predicted_class = classifiers[c].predict([features[int(current%max_frames_in_file)]])[0]
# if c == 0:
# labels[predicted_class][current] = 1
# elif c == 1:
# labels[predicted_class+3][current] = 1
# elif c == 2:
# labels[predicted_class+5][current] = 1
# else:
# pass
if (current/float(total))*100 % 2 == 0:
tr=(total-current)/((1+current)/(time.clock()-start_time))
print 'Perc: %f; Overall Time Remaining: %02d:%02d:%02d;' % ((current/float(total))*100,int((tr/60)/60),int((tr/60)%60),int(tr%60))
# print 'Saving labels: '+classifications_path+video_name+'/'+layer
# with open(classifications_path+video_name+'/'+layer, 'wb') as f:
# pickle.dump(labels, f)
if __name__ == '__main__':
main()
| {
"content_hash": "44ac7cb8af73524692653b19a3a4b16b",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 147,
"avg_line_length": 44.77450980392157,
"alnum_prop": 0.5966717757827896,
"repo_name": "HaydenFaulkner/phd",
"id": "38680f1b935b946f0ea5cb1af66b1371060a1740",
"size": "4567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tennis/tennis_nn_classification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1243227"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import datetime
import functools
import socket
import sys
import threading
import time
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.log import app_log
from tornado.stack_context import ExceptionStackContext, StackContext, wrap, NullContext
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipIfNonUnix, skipOnTravis
try:
from concurrent import futures
except ImportError:
futures = None
class TestIOLoop(AsyncTestCase):
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(sock.fileno(), lambda fd, events: None,
IOLoop.READ)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(Exception, self.io_loop.add_handler,
sock.fileno(), lambda fd, events: None,
IOLoop.READ)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# Issue #635: add_callback() should raise a clean exception
# if called while another thread is closing the IOLoop.
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
try:
other_ioloop.add_callback(lambda: None)
except RuntimeError as e:
self.assertEqual("IOLoop is closing", str(e))
break
def test_handle_callback_exception(self):
# IOLoop.handle_callback_exception can be overridden to catch
# exceptions in callbacks.
def handle_callback_exception(callback):
self.assertIs(sys.exc_info()[0], ZeroDivisionError)
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
# remove the test StackContext that would see this uncaught
# exception as a test failure.
self.io_loop.add_callback(lambda: 1 / 0)
self.wait()
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(self.io_loop.time() + 0.01,
functools.partial(server.send, b'asdf'))
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600,
lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_remove_timeout_from_timeout(self):
calls = [False, False]
# Schedule several callbacks and wait for them all to come due at once.
# t2 should be cancelled by t1, even though it is already scheduled to
# be run before the ioloop even looks at it.
now = self.io_loop.time()
def t1():
calls[0] = True
self.io_loop.remove_timeout(t2_handle)
self.io_loop.add_timeout(now + 0.01, t1)
def t2():
calls[1] = True
t2_handle = self.io_loop.add_timeout(now + 0.02, t2)
self.io_loop.add_timeout(now + 0.03, self.stop)
time.sleep(0.03)
self.wait()
self.assertEqual(calls, [True, False])
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = []
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0),
results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
self.assertEqual(results, [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None,
IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection,
IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
# Use a NullContext to keep the exception from being caught by
# AsyncTestCase.
with NullContext():
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
with NullContext():
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# An added callback runs in the test's stack_context, so will be
# re-arised in wait().
self.io_loop.add_callback(lambda: 1 / 0)
with self.assertRaises(ZeroDivisionError):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipIfNonUnix
def test_remove_handler_from_handler(self):
# Create two sockets with simultaneous read events.
client, server = socket.socketpair()
try:
client.send(b'abc')
server.send(b'abc')
# After reading from one fd, remove the other from the IOLoop.
chunks = []
def handle_read(fd, events):
chunks.append(fd.recv(1024))
if fd is client:
self.io_loop.remove_handler(server)
else:
self.io_loop.remove_handler(client)
self.io_loop.add_handler(client, handle_read, self.io_loop.READ)
self.io_loop.add_handler(server, handle_read, self.io_loop.READ)
self.io_loop.call_later(0.03, self.stop)
self.wait()
# Only one fd was read; the other was cleanly removed.
self.assertEqual(chunks, [b'abc'])
finally:
client.close()
server.close()
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_current(self):
def f():
self.current_io_loop = IOLoop.current()
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
class TestIOLoopAddCallback(AsyncTestCase):
def setUp(self):
super(TestIOLoopAddCallback, self).setUp()
self.active_contexts = []
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback(callback, *args, **kwargs)
@contextlib.contextmanager
def context(self, name):
self.active_contexts.append(name)
yield
self.assertEqual(self.active_contexts.pop(), name)
def test_pre_wrap(self):
# A pre-wrapped callback is run in the context in which it was
# wrapped, not when it was added to the IOLoop.
def f1():
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop()
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped)
self.wait()
def test_pre_wrap_with_args(self):
# Same as test_pre_wrap, but the function takes arguments.
# Implementation note: The function must not be wrapped in a
# functools.partial until after it has been passed through
# stack_context.wrap
def f1(foo, bar):
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop((foo, bar))
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped, 1, bar=2)
result = self.wait()
self.assertEqual(result, (1, 2))
class TestIOLoopAddCallbackFromSignal(TestIOLoopAddCallback):
# Repeat the add_callback tests using add_callback_from_signal
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback_from_signal(callback, *args, **kwargs)
@unittest.skipIf(futures is None, "futures module not present")
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
self.io_loop.add_future(pool.submit(lambda: None),
lambda future: self.stop(future))
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
def test_add_future_stack_context(self):
ready = threading.Event()
def task():
# we must wait for the ioloop callback to be scheduled before
# the task completes to ensure that add_future adds the callback
# asynchronously (which is the scenario in which capturing
# the stack_context matters)
ready.wait(1)
assert ready.isSet(), "timed out"
raise Exception("worker")
def callback(future):
self.future = future
raise Exception("callback")
def handle_exception(typ, value, traceback):
self.exception = value
self.stop()
return True
# stack_context propagates to the ioloop callback, but the worker
# task just has its exceptions caught and saved in the Future.
with futures.ThreadPoolExecutor(1) as pool:
with ExceptionStackContext(handle_exception):
self.io_loop.add_future(pool.submit(task), callback)
ready.set()
self.wait()
self.assertEqual(self.exception.args[0], "callback")
self.assertEqual(self.future.exception().args[0], "worker")
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
self.assertEqual(self.io_loop.run_sync(lambda: 42), 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "dfb6cb2b1af7d52e58258e4387540c33",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 88,
"avg_line_length": 37.51802656546489,
"alnum_prop": 0.6020129476026704,
"repo_name": "jonashagstedt/tornado",
"id": "5ea304463e2b187e3adbd8d96ffade8b62f63b3d",
"size": "19796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado/test/ioloop_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1070"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "JavaScript",
"bytes": "6073"
},
{
"name": "Makefile",
"bytes": "812"
},
{
"name": "Python",
"bytes": "1346231"
},
{
"name": "Ruby",
"bytes": "1733"
},
{
"name": "Shell",
"bytes": "5045"
}
],
"symlink_target": ""
} |
import pytest
from mitmproxy.test import tflow
from mitmproxy.addons import view
from mitmproxy import flowfilter
from mitmproxy import options
from mitmproxy.test import taddons
def tft(*, method="get", start=0):
f = tflow.tflow()
f.request.method = method
f.request.timestamp_start = start
return f
def test_order_refresh():
v = view.View()
sargs = []
def save(*args, **kwargs):
sargs.extend([args, kwargs])
v.sig_view_refresh.connect(save)
tf = tflow.tflow(resp=True)
with taddons.context(options=options.Options()) as tctx:
tctx.configure(v, console_order="time")
v.add(tf)
tf.request.timestamp_start = 1
assert not sargs
v.update(tf)
assert sargs
def test_order_generators():
v = view.View()
tf = tflow.tflow(resp=True)
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 0
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == tf.request.method
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == tf.request.url
sz = view.OrderKeySize(v)
assert sz.generate(tf) == len(tf.request.raw_content) + len(tf.response.raw_content)
def test_simple():
v = view.View()
f = tft(start=1)
assert v.store_count() == 0
v.request(f)
assert list(v) == [f]
assert v.get_by_id(f.id)
assert not v.get_by_id("nonexistent")
# These all just call update
v.error(f)
v.response(f)
v.intercept(f)
v.resume(f)
v.kill(f)
assert list(v) == [f]
v.request(f)
assert list(v) == [f]
assert len(v._store) == 1
assert v.store_count() == 1
f2 = tft(start=3)
v.request(f2)
assert list(v) == [f, f2]
v.request(f2)
assert list(v) == [f, f2]
assert len(v._store) == 2
assert v.inbounds(0)
assert not v.inbounds(-1)
assert not v.inbounds(100)
f3 = tft(start=2)
v.request(f3)
assert list(v) == [f, f3, f2]
v.request(f3)
assert list(v) == [f, f3, f2]
assert len(v._store) == 3
f.marked = not f.marked
f2.marked = not f2.marked
v.clear_not_marked()
assert list(v) == [f, f2]
assert len(v) == 2
assert len(v._store) == 2
v.clear()
assert len(v) == 0
assert len(v._store) == 0
def test_filter():
v = view.View()
f = flowfilter.parse("~m get")
v.request(tft(method="get"))
v.request(tft(method="put"))
v.request(tft(method="get"))
v.request(tft(method="put"))
assert(len(v)) == 4
v.set_filter(f)
assert [i.request.method for i in v] == ["GET", "GET"]
assert len(v._store) == 4
v.set_filter(None)
assert len(v) == 4
v.toggle_marked()
assert len(v) == 0
v.toggle_marked()
assert len(v) == 4
v[1].marked = True
v.toggle_marked()
assert len(v) == 1
assert v[0].marked
v.toggle_marked()
assert len(v) == 4
def test_order():
v = view.View()
with taddons.context(options=options.Options()) as tctx:
v.request(tft(method="get", start=1))
v.request(tft(method="put", start=2))
v.request(tft(method="get", start=3))
v.request(tft(method="put", start=4))
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
tctx.configure(v, console_order="method")
assert [i.request.method for i in v] == ["GET", "GET", "PUT", "PUT"]
v.set_reversed(True)
assert [i.request.method for i in v] == ["PUT", "PUT", "GET", "GET"]
tctx.configure(v, console_order="time")
assert [i.request.timestamp_start for i in v] == [4, 3, 2, 1]
v.set_reversed(False)
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
def test_reversed():
v = view.View()
v.request(tft(start=1))
v.request(tft(start=2))
v.request(tft(start=3))
v.set_reversed(True)
assert v[0].request.timestamp_start == 3
assert v[-1].request.timestamp_start == 1
assert v[2].request.timestamp_start == 1
with pytest.raises(IndexError):
v[5]
with pytest.raises(IndexError):
v[-5]
assert v._bisect(v[0]) == 1
assert v._bisect(v[2]) == 3
def test_update():
v = view.View()
flt = flowfilter.parse("~m get")
v.set_filter(flt)
f = tft(method="get")
v.request(f)
assert f in v
f.request.method = "put"
v.update(f)
assert f not in v
f.request.method = "get"
v.update(f)
assert f in v
v.update(f)
assert f in v
class Record:
def __init__(self):
self.calls = []
def __bool__(self):
return bool(self.calls)
def __repr__(self):
return repr(self.calls)
def __call__(self, *args, **kwargs):
self.calls.append((args, kwargs))
def test_signals():
v = view.View()
rec_add = Record()
rec_update = Record()
rec_remove = Record()
rec_refresh = Record()
def clearrec():
rec_add.calls = []
rec_update.calls = []
rec_remove.calls = []
rec_refresh.calls = []
v.sig_view_add.connect(rec_add)
v.sig_view_update.connect(rec_update)
v.sig_view_remove.connect(rec_remove)
v.sig_view_refresh.connect(rec_refresh)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
# Simple add
v.add(tft())
assert rec_add
assert not any([rec_update, rec_remove, rec_refresh])
# Filter change triggers refresh
clearrec()
v.set_filter(flowfilter.parse("~m put"))
assert rec_refresh
assert not any([rec_update, rec_add, rec_remove])
v.set_filter(flowfilter.parse("~m get"))
# An update that results in a flow being added to the view
clearrec()
v[0].request.method = "PUT"
v.update(v[0])
assert rec_remove
assert not any([rec_update, rec_refresh, rec_add])
# An update that does not affect the view just sends update
v.set_filter(flowfilter.parse("~m put"))
clearrec()
v.update(v[0])
assert rec_update
assert not any([rec_remove, rec_refresh, rec_add])
# An update for a flow in state but not view does not do anything
f = v[0]
v.set_filter(flowfilter.parse("~m get"))
assert not len(v)
clearrec()
v.update(f)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
def test_focus_follow():
v = view.View()
with taddons.context(options=options.Options()) as tctx:
tctx.configure(v, console_focus_follow=True, view_filter="~m get")
v.add(tft(start=5))
assert v.focus.index == 0
v.add(tft(start=4))
assert v.focus.index == 0
assert v.focus.flow.request.timestamp_start == 4
v.add(tft(start=7))
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod = tft(method="put", start=6)
v.add(mod)
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod.request.method = "GET"
v.update(mod)
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 6
def test_focus():
# Special case - initialising with a view that already contains data
v = view.View()
v.add(tft())
f = view.Focus(v)
assert f.index is 0
assert f.flow is v[0]
# Start empty
v = view.View()
f = view.Focus(v)
assert f.index is None
assert f.flow is None
v.add(tft(start=1))
assert f.index == 0
assert f.flow is v[0]
# Try to set to something not in view
with pytest.raises(ValueError):
f.__setattr__("flow", tft())
with pytest.raises(ValueError):
f.__setattr__("index", 99)
v.add(tft(start=0))
assert f.index == 1
assert f.flow is v[1]
v.add(tft(start=2))
assert f.index == 1
assert f.flow is v[1]
f.index = 0
assert f.index == 0
f.index = 1
v.remove(v[1])
assert f.index == 1
assert f.flow is v[1]
v.remove(v[1])
assert f.index == 0
assert f.flow is v[0]
v.remove(v[0])
assert f.index is None
assert f.flow is None
v.add(tft(method="get", start=0))
v.add(tft(method="get", start=1))
v.add(tft(method="put", start=2))
v.add(tft(method="get", start=3))
f.flow = v[2]
assert f.flow.request.method == "PUT"
filt = flowfilter.parse("~m get")
v.set_filter(filt)
assert f.index == 2
filt = flowfilter.parse("~m oink")
v.set_filter(filt)
assert f.index is None
def test_settings():
v = view.View()
f = tft()
with pytest.raises(KeyError):
v.settings[f]
v.add(f)
v.settings[f]["foo"] = "bar"
assert v.settings[f]["foo"] == "bar"
assert len(list(v.settings)) == 1
v.remove(f)
with pytest.raises(KeyError):
v.settings[f]
assert not v.settings.keys()
v.add(f)
v.settings[f]["foo"] = "bar"
assert v.settings.keys()
v.clear()
assert not v.settings.keys()
def test_configure():
v = view.View()
with taddons.context(options=options.Options()) as tctx:
tctx.configure(v, view_filter="~q")
with pytest.raises(Exception, match="Invalid interception filter"):
tctx.configure(v, view_filter="~~")
tctx.configure(v, console_order="method")
with pytest.raises(Exception, match="Unknown flow order"):
tctx.configure(v, console_order="no")
tctx.configure(v, console_order_reversed=True)
tctx.configure(v, console_focus_follow=True)
assert v.focus_follow
| {
"content_hash": "7e28e5c4649babe89844752e5dce0f15",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 88,
"avg_line_length": 24.216836734693878,
"alnum_prop": 0.5890656273043295,
"repo_name": "xaxa89/mitmproxy",
"id": "7fa3819ef989d252b69e16099b730a24801bbdfc",
"size": "9493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mitmproxy/addons/test_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17714"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "150625"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1535155"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
} |
__author__ = 'Feng Wang'
from os import walk, path
from SingleFile.PyClustering import clustering
def wrapper(input_path, output_path, k, dist):
"""
A wrapper for PyClustering.clustering.
Do K-means clustering for all data file in a folder.
@param input_path: Data folder
@param k: Number of centers in K-means
@param dist: Distance measure
"""
#my_path = r"C:\Users\D062988\Documents\DS\clustering008\CSV"
files = []
for (dir_path, dir_names, file_names) in walk(input_path):
files.extend(file_names)
break
for filename in files:
idx = clustering(input_path + "\\" + filename, k, dist, False)
print "-- Write to output file"
out = ','.join([str(i) for i in idx])
parent = path.dirname(path.abspath(input_path))
f = open(output_path + "\\" + filename.split(".")[0] + ".clusters", 'w')
f.write(out)
f.close()
print "\n"
if __name__ == '__main__':
wrapper(r"C:\Users\D062988\Documents\DS\clustering008\CSV", "", -1, 'b')
## from timeit import Timer
## t = Timer("test()", "from __main__ import test")
## print t.timeit(number=1) | {
"content_hash": "4bb08c053dc4347fb4e601bd90d6e063",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 30.358974358974358,
"alnum_prop": 0.5945945945945946,
"repo_name": "wdwind/ImageTrends",
"id": "875525b500a1641c21426da0b6cd154d65b3df03",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/PyClustering/PyClusteringWrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7139"
},
{
"name": "Java",
"bytes": "98785"
},
{
"name": "PHP",
"bytes": "27675"
},
{
"name": "PigLatin",
"bytes": "1298"
},
{
"name": "Python",
"bytes": "22257"
}
],
"symlink_target": ""
} |
"""Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys, os, zipimport, time, re, imp, types
from urlparse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The bootstrapping script for instance, will check if this
# attribute is present to decide wether to reinstall the package
_distribute = True
def _bypass_ensure_directory(name, mode=0777):
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.iteritems():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.iteritems():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.iteritems():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform(); m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
from distutils.util import get_platform
except ImportError:
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry,True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None, replacement=True):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if _override_setuptools(req) and replacement:
req = Requirement.parse('distribute')
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
map(shadow_set.add, self) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError,v:
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
map(shadow_set.add, resolvees)
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:])
def __setstate__(self, (entries, keys, by_key, callbacks)):
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self.cached_files[target_path] = 1
return target_path
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0555) & 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return StringIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec script_code in namespace, namespace
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
try:
# CPython >=3.3
import _frozen_importlib
except ImportError:
pass
else:
register_loader_type(_frozen_importlib.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
zip_stat = self.zipinfo[zip_path]
t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
date_time = (
(d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
(t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
)
timestamp = time.mktime(date_time)
try:
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, don't bother extracting
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, somebody did it just ahead of
# us, so we're done
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = zipimport._zip_directory_cache[importer.archive]
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
for line in open(os.path.join(path_item, entry)):
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
try:
# CPython >=3.3
import _frozen_importlib
except ImportError:
pass
else:
register_finder(_frozen_importlib.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
try:
# CPython >=3.3
import _frozen_importlib
except ImportError:
pass
else:
register_namespace_handler(_frozen_importlib.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if part in ['', '.']:
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self,
location=None, metadata=None, project_name=None, version=None,
py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra: extra = safe_extra(extra)
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
map(declare_namespace, self._get_metadata('namespace_packages.txt'))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError,attr
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if self.project_name == 'setuptools':
try:
version = self.version
except ValueError:
version = ''
if '0.7' in version:
raise ValueError(
"A 0.7-series setuptools cannot be installed "
"with distribute. Found one at %s" % str(self.location))
if not loc:
return
if path is sys.path:
self.check_version_conflict()
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= map(_normalize_cached, path)
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='distribute':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
def dummy_marker(marker):
def marker_fn(environment=None, override=None):
return True
marker_fn.__doc__ = marker
return marker_fn
try:
from markerlib import as_function
except ImportError:
as_function = dummy_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = parse_requirements(distvers).next()
parsed.marker_fn = as_function(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = set(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(set(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution }
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = lines.next(); p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key <> self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F': return False
elif action=='T': return True
elif action=='+': last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s, replacement=True):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
founded_req = reqs[0]
# if asked for setuptools distribution
# and if distribute is installed, we want to give
# distribute instead
if _override_setuptools(founded_req) and replacement:
distribute = list(parse_requirements('distribute'))
if len(distribute) == 1:
return distribute[0]
return founded_req
else:
return founded_req
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<' : '--T',
'<=': 'T-T',
'>' : 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _override_setuptools(req):
"""Return True when distribute wants to override a setuptools dependency.
We want to override when the requirement is setuptools and the version is
a variant of 0.6.
"""
if req.project_name == 'setuptools':
if not len(req.specs):
# Just setuptools: ok
return True
for comparator, version in req.specs:
if comparator in ['==', '>=', '>']:
if '0.7' in version:
# We want some setuptools not from the 0.6 series.
return False
return True
return False
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
| {
"content_hash": "83849f8acfccefa57fdd5949e2405c32",
"timestamp": "",
"source": "github",
"line_count": 2837,
"max_line_length": 94,
"avg_line_length": 32.87275290800141,
"alnum_prop": 0.6002251769247265,
"repo_name": "jkshaver/virtualenv-1.8.2",
"id": "27b9f83451e2e93f6313f4332e6fbc5311158556",
"size": "93260",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/distribute-0.6.28-py2.7.egg/pkg_resources.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "88031"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Python",
"bytes": "5886122"
},
{
"name": "Shell",
"bytes": "4476"
}
],
"symlink_target": ""
} |
from numpy import random
import pandas as pd
class Generator(object):
def __init__(self):
self.alphabet = 'abcdefghijklmnopqrstuvwxyz'
self.people = {'name' : [], 'height' : [], 'city' : []}
self.cities = {
'name' : [
'Berkeley',
'Denver',
'Mexico City',
'Amsterdam',
'Los Angeles'
],
'height' : [
52,
1730,
2250,
-2,
71
]
}
def output_cities(self, fp_cities):
pd.DataFrame(self.cities).to_csv(fp_cities, index=False)
def output_people(self, fp_people):
self.people['height'] = list(random.normal(1.5, 0.25, 1000))
for i in range(0, 1000):
self.people['city'].append(self.cities['name'][random.randint(0, len(self.cities))])
self.people['name'].append(''.join([self.alphabet[i] for i in random.randint(0, len(self.alphabet), 6)]).title())
pd.DataFrame(self.people).to_csv(fp_people, index=False)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--fp-cities')
parser.add_argument('--fp-people')
args = parser.parse_args()
generator = Generator()
if args.fp_cities:
generator.output_cities(args.fp_cities)
print("Cities written to {}".format(args.fp_cities))
if args.fp_people:
generator.output_people(args.fp_people)
print("People randomized and written to {}".format(args.fp_people))
| {
"content_hash": "1be1ed32f1af8e6809020b2fab80f721",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 125,
"avg_line_length": 32.26,
"alnum_prop": 0.536887786732796,
"repo_name": "deniederhut/workshop_pyintensive",
"id": "0f146ad4fc18acda961a095dac6804e0acb33acf",
"size": "1632",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/generate_tables.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "595118"
},
{
"name": "Python",
"bytes": "154296"
},
{
"name": "Shell",
"bytes": "783"
}
],
"symlink_target": ""
} |
import parse as ps
import codecs
p=ps.parser()
result='hostname,tld,port,path,queue,segment\n'
with codecs.open('../data/result.txt',mode='r',encoding='utf-8') as f:
data=f.read()
data_arr=data.split(',')
data_arr=data_arr[:2000000]
hostnames={}
num=0
for line in data_arr:
line_result=''
line=line[10:-2]
_, hostname, tld, port, path, queue, segment=p.parse(line)
parts_arr=[hostname, tld, port, path, queue, segment]
if not hostname:
continue
if hostname in hostnames:
if hostnames[hostname]>50:
continue
else:
hostnames[hostname]+=1
else:
hostnames[hostname]=1
if num>100000:
break
num+=1
for part in parts_arr:
if part:
line_result+=part+','
else:
line_result+=','
line_result=line_result[:-1]+'\n'
result+=line_result
with codecs.open('good.csv', mode='w',encoding='utf-8') as f:
f.write(result)
| {
"content_hash": "b73a71503d795f2f8cda8e6c70c04a68",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 23.023809523809526,
"alnum_prop": 0.59358841778697,
"repo_name": "xmeng17/Malicious-URL-Detection",
"id": "9dab16b3a071ce8316a2c8ec6090a630fecd861b",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse/process_good.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21525"
}
],
"symlink_target": ""
} |
import sys
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot import inventory
class CollectLevelUpReward(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
current_level = 0
previous_level = 0
def initialize(self):
self._process_config()
self.current_level = inventory.player().level
self.previous_level = 0
def work(self):
if self._should_run():
self.current_level = inventory.player().level
if self.collect_reward:
# let's check level reward on bot initialization
# to be able get rewards for old bots
if self.previous_level == 0:
self._collect_level_reward()
# level up situation
elif self.current_level > self.previous_level:
self.emit_event(
'level_up',
formatted='Level up from {previous_level} to {current_level}',
data={
'previous_level': self.previous_level,
'current_level': self.current_level
}
)
self._collect_level_reward()
if self.level_limit != -1 and self.current_level >= self.level_limit:
sys.exit("You have reached your target level! Exiting now.")
self.previous_level = self.current_level
def _process_config(self):
self.level_limit = self.config.get('level_limit', -1)
self.collect_reward = self.config.get('collect_reward', True)
def _should_run(self):
return self.level_limit != -1 or self.collect_reward
def _collect_level_reward(self):
request = self.bot.api.create_request()
request.level_up_rewards(level=self.current_level)
response_dict = request.call()
if 'status_code' in response_dict and response_dict['status_code'] == 1:
data = (response_dict
.get('responses', {})
.get('LEVEL_UP_REWARDS', {})
.get('items_awarded', []))
for item in data:
if 'item_id' in item and str(item['item_id']) in self.bot.item_list:
got_item = self.bot.item_list[str(item['item_id'])]
item['name'] = got_item
count = 'item_count' in item and item['item_count'] or 0
inventory.items().get(item['item_id']).add(count)
self.emit_event(
'level_up_reward',
formatted='Received level up reward: {items}',
data={
# [{'item_id': 3, 'name': u'Ultraball', 'item_count': 10}, {'item_id': 103, 'name': u'Hyper Potion', 'item_count': 10}]
'items': ', '.join(["{}x {}".format(x['item_count'], x['name']) for x in data])
}
)
| {
"content_hash": "2a0d2893a2e2b72a95bc55d934003295",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 139,
"avg_line_length": 39.346666666666664,
"alnum_prop": 0.5164351067434768,
"repo_name": "goedzo/PokemonGo-Bot",
"id": "2338dcc597ba2eb2294759045c146382234b09bf",
"size": "2951",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pokemongo_bot/cell_workers/collect_level_up_reward.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "26769"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "HTML",
"bytes": "5645"
},
{
"name": "JavaScript",
"bytes": "317991"
},
{
"name": "Python",
"bytes": "863163"
},
{
"name": "Shell",
"bytes": "9090"
}
],
"symlink_target": ""
} |
from __future__ import division
from vistrails.core.bundles.pyimport import py_import
import vistrails.core.requirements
from vistrails.gui.modules.source_configure import SourceConfigurationWidget
from PyQt4 import QtCore, QtGui
from vistrails.gui.theme import CurrentTheme
def TextEditor(parent=None):
try:
py_import('PyQt4.Qsci', {'linux-debian': 'python-qscintilla2',
'linux-ubuntu': 'python-qscintilla2'}, True)
except ImportError:
return OldTextEditor(parent)
else:
return NewTextEditor(parent)
def NewTextEditor(parent):
vistrails.core.requirements.require_python_module('PyQt4.Qsci')
from PyQt4.Qsci import QsciScintilla
class _TextEditor(QsciScintilla):
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
## set the default font of the editor
## and take the same font for line numbers
font = CurrentTheme.PYTHON_SOURCE_EDITOR_FONT
self.setFont(font)
fm = QtGui.QFontMetrics(font)
## Line numbers
# conventionally, margin 0 is for line numbers
self.setMarginWidth(0, fm.width( "0000" ) + 4)
self.setMarginLineNumbers(0, True)
self.setAutoIndent(True)
## Edge Mode shows a red vetical bar at 80 chars
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QtGui.QColor("#CCCCCC"))
## Folding visual : we will use boxes
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
## Braces matching
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
## Editing line color
# self.setCaretLineVisible(True)
# self.setCaretLineBackgroundColor(QtGui.QColor("#CDA869"))
## Margins colors
# line numbers margin
self.setMarginsBackgroundColor(QtGui.QColor("#FFFFFF"))
self.setMarginsForegroundColor(QtGui.QColor("#000000"))
# folding margin colors (foreground,background)
self.setFoldMarginColors(QtGui.QColor("#DDDDDD"),QtGui.QColor("#DDDDDD"))
# do not use tabs
self.setIndentationsUseTabs(False)
self.setTabWidth(4)
self.setTabIndents(True)
# set autocompletion
self.setAutoCompletionThreshold(2)
self.setAutoCompletionSource(QsciScintilla.AcsDocument)
self.setAutoCompletionCaseSensitivity(True)
self.setAutoCompletionReplaceWord(True)
self.setAutoCompletionFillupsEnabled(True)
def setPlainText(self, text):
""" setPlainText(text: str) -> None
redirect to setText
"""
self.setText(text)
def toPlainText(self):
""" setPlainText(text: str) -> None
redirect to self.text()
"""
text = self.text()
return text.replace('\r\n', '\n').replace('\r', '\n')
# def focusOutEvent(self, event):
# if self.parent():
# QtCore.QCoreApplication.sendEvent(self.parent(), event)
# QsciScintilla.focusOutEvent(self, event)
return _TextEditor(parent)
class OldTextEditor(QtGui.QTextEdit):
def __init__(self, parent=None):
QtGui.QTextEdit.__init__(self, parent)
self.setAcceptRichText(False)
self.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.formatChanged(None)
self.setCursorWidth(8)
self.connect(self,
QtCore.SIGNAL('currentCharFormatChanged(QTextCharFormat)'),
self.formatChanged)
def formatChanged(self, f):
self.setFont(CurrentTheme.PYTHON_SOURCE_EDITOR_FONT)
def keyPressEvent(self, event):
""" keyPressEvent(event: QKeyEvent) -> Nont
Handle tab with 4 spaces
"""
if event.key()==QtCore.Qt.Key_Tab:
self.insertPlainText(' ')
else:
# super(PythonEditor, self).keyPressEvent(event)
QtGui.QTextEdit.keyPressEvent(self, event)
class TextConfigurationWidget(SourceConfigurationWidget):
def __init__(self, module, controller, parent=None):
SourceConfigurationWidget.__init__(self, module, controller,
TextEditor, False, False, parent, False, portName='value')
| {
"content_hash": "9f07833ddc1d5cd8ea3c360851906666",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 101,
"avg_line_length": 37.51219512195122,
"alnum_prop": 0.5966623320329432,
"repo_name": "hjanime/VisTrails",
"id": "6c6c4461138bab39a42110f19673a7be0d6628fb",
"size": "6527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/gui/modules/string_configure.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import random
import string
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('gen_series')
PER_RUN = 50
class GenSeries(object):
"""
Purely for debugging purposes. Not great quality :)
gen_series_data:
series: NUM
seasons: NUM
episodes: NUM
qualities:
- LIST
This will also auto configure series plugin for testing
"""
def __init__(self):
self.entries = []
schema = {
'type': 'object',
'minProperties': 1
}
@plugin.priority(200)
def on_task_start(self, task, config):
log.info('Generating test data ...')
series = []
for num in range(config['series']):
series.append('series %d name' % num)
for season in range(int(config['seasons'])):
for episode in range(int(config['episodes'])):
for quality in config['qualities']:
entry = Entry()
entry['title'] = 'series %d name - S%02dE%02d - %s' % \
(num, season + 1, episode + 1, quality)
entry['url'] = 'http://localhost/mock/%s' % \
''.join([random.choice(string.letters + string.digits) for x in range(1, 30)])
self.entries.append(entry)
log.info('Generated %d entries' % len(self.entries))
# configure series plugin, bad way but this is debug shit
task.config['series'] = series
def on_task_input(self, task, config):
entries = []
for num, entry in enumerate(self.entries):
entries.append(entry)
if num == PER_RUN - 1:
break
self.entries = self.entries[len(entries):]
return entries
def on_task_exit(self, task, config):
if self.entries:
log.info('There are still %d left to be processed!' % len(self.entries))
# rerun ad infinitum, also commits session between them
task._rerun = True
task._rerun_count = 0
@event('plugin.register')
def register_plugin():
plugin.register(GenSeries, 'gen_series_data', api_ver=2, debug=True)
| {
"content_hash": "2df841dd3b983c917c50905fda38445e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 117,
"avg_line_length": 31.294871794871796,
"alnum_prop": 0.5620647275706677,
"repo_name": "oxc/Flexget",
"id": "464ccb4a13d40d0c93e3e4c78d182d74fda7da80",
"size": "2441",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "flexget/plugins/input/gen_series.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9267"
},
{
"name": "HTML",
"bytes": "49610"
},
{
"name": "JavaScript",
"bytes": "239825"
},
{
"name": "Python",
"bytes": "2749010"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
} |
import os
import json
def test_compendium(script_runner, tmpdir):
ret = script_runner.run('o2r-meta', '-debug', 'extract',
'-i', 'extract/compendium',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
assert "total files processed: 1" in ret.stdout, "should process 1 file"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert metadata['displayfile'] == "paper.html"
assert metadata['mainfile'] == "paper.rmd"
def test_compendium_basedir(script_runner, tmpdir):
ret = script_runner.run('o2r-meta', '-debug', 'extract',
'-i', 'extract/compendium',
'-o', str(tmpdir),
'-b', 'extract/compendium',
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr shosuld be empty"
assert "total files processed: 1" in ret.stdout, "should process 1 file"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert metadata['displayfile'] == "paper.html"
assert metadata['mainfile'] == "paper.rmd"
def test_minimal(script_runner, tmpdir):
ret = script_runner.run('o2r-meta', '-debug', 'extract',
'-i', 'extract/minimal',
'-o', str(tmpdir),
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
assert "total files processed: 2" in ret.stdout, "should process 2 files"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert metadata['displayfile'] == "extract/minimal/display.html"
assert metadata['mainfile'] == "extract/minimal/main.Rmd"
def test_minimal_basedir(script_runner, tmpdir):
ret = script_runner.run('o2r-meta', '-debug', 'extract',
'-i', 'extract/minimal',
'-o', str(tmpdir),
'-b', 'extract/minimal',
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
assert "total files processed: 2" in ret.stdout, "should process 2 files"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert metadata['displayfile'] == "display.html", "displayfile path should be relative to basedir"
assert metadata['mainfile'] == "main.Rmd", "mainfile path should be relative to basedir"
def test_best_displayfile_candidate(script_runner, tmpdir):
ret = script_runner.run('o2r-meta', '-debug', 'extract',
'-i', 'extract/displayfiles/best_by_name',
'-o', str(tmpdir),
'-b', 'extract/displayfiles/best_by_name',
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert metadata['displayfile'] == "display.html", "best matching file should be displayfile"
assert len(metadata['displayfile_candidates']) == 7, "should have 7 candidates"
assert "display.pdf" not in metadata['displayfile_candidates'], "should not list pdf as displayfile candidate"
assert metadata['displayfile_candidates'][0] == "display.html", "best matching displayfile should be first in candidate list"
def test_best_mainfile_candidate(script_runner, tmpdir):
ret = script_runner.run('o2r-meta', '-debug', 'extract',
'-i', 'extract/displayfiles/best_by_name',
'-o', str(tmpdir),
'-b', 'extract/displayfiles/best_by_name',
'-xo', '-m')
print(ret.stdout)
print(ret.stderr)
assert ret.success, "process should return success"
assert ret.stderr == '', "stderr should be empty"
metadata = json.load(open(os.path.join(str(tmpdir), 'metadata_raw.json')))
assert metadata['mainfile'] == "main.Rmd", "best matching file should be displayfile"
assert len(metadata['mainfile_candidates']) == 4, "should have 4 candidates"
assert metadata['mainfile_candidates'][0] == "main.Rmd", "best matching displayfile should be first in candidate list"
| {
"content_hash": "d258193f16908d0977af5e121d98cbdc",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 129,
"avg_line_length": 42.29807692307692,
"alnum_prop": 0.6462832461923165,
"repo_name": "o2r-project/o2r-meta",
"id": "28c68a8c5c2c9d3d665d372774f8539bb1221e8d",
"size": "4419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/extract/test_extract_maindisplay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2594"
},
{
"name": "HTML",
"bytes": "1391353"
},
{
"name": "Python",
"bytes": "126586"
},
{
"name": "R",
"bytes": "1646"
},
{
"name": "Shell",
"bytes": "465"
}
],
"symlink_target": ""
} |
"""
test_django-frontend-decouple
------------
Tests for `django-frontend-decouple` models module.
"""
import os
import shutil
import unittest
from frontend import models
class TestFrontend(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| {
"content_hash": "7af3ae91d185b09fd714b31fb38c2062",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 51,
"avg_line_length": 13.833333333333334,
"alnum_prop": 0.6566265060240963,
"repo_name": "DjenieLabs/django-frontend-decouple",
"id": "01fb1159f3d8e4610a11afb50f55bfbe1e240431",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41536"
},
{
"name": "HTML",
"bytes": "65874"
},
{
"name": "Makefile",
"bytes": "1263"
},
{
"name": "Python",
"bytes": "8702"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class Operations(object):
"""Operations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Version of the API to be used with the client request. Constant value: "2018-01-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-01-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""List available operations.
List the available operations supported by the Microsoft.EventGrid
resource provider.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Operation
:rtype:
~azure.mgmt.eventgrid.models.OperationPaged[~azure.mgmt.eventgrid.models.Operation]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/providers/Microsoft.EventGrid/operations'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.OperationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.OperationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| {
"content_hash": "1d6af6a76b853a036971b2952f4ffbf1",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 144,
"avg_line_length": 37.766666666666666,
"alnum_prop": 0.6301853486319505,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "ab97c65dfadcf2bdc01d5c81da926adf0593a677",
"size": "3873",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-eventgrid/azure/mgmt/eventgrid/operations/operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""
Rotation around the z-axis.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import InstructionSet
from qiskit import QuantumCircuit
from qiskit import QuantumRegister
from qiskit.extensions.standard import header # pylint: disable=unused-import
class RZGate(Gate):
"""rotation around the z-axis."""
def __init__(self, phi, qubit, circ=None):
"""Create new rz single qubit gate."""
super().__init__("rz", [phi], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
phi = self.param[0]
return self._qasmif("rz(%s) %s[%d];" % (phi, qubit[0].openqasm_name, qubit[1]))
def inverse(self):
"""Invert this gate.
rz(phi)^dagger = rz(-phi)
"""
self.param[0] = -self.param[0]
return self
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.rz(self.param[0], self.arg[0]))
def rz(self, phi, q):
"""Apply Rz to q."""
if isinstance(q, QuantumRegister):
instructions = InstructionSet()
for j in range(q.size):
instructions.add(self.rz(phi, (q, j)))
return instructions
self._check_qubit(q)
return self._attach(RZGate(phi, q, self))
QuantumCircuit.rz = rz
CompositeGate.rz = rz
| {
"content_hash": "65bc37ba1ff71b74d4eb05cf167beb24",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 87,
"avg_line_length": 26.647058823529413,
"alnum_prop": 0.6129506990434143,
"repo_name": "atilag/qiskit-sdk-py",
"id": "4959b8f6bab850367317a37fcf37a038612fd36f",
"size": "2094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiskit/extensions/standard/rz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "328941"
},
{
"name": "CMake",
"bytes": "18631"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Python",
"bytes": "1237474"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
import json
import unittest
from models import *
class TestAgent(unittest.TestCase):
def setUp(self):
self.agent = Agent(label="Agent")
def test_init(self):
self.assert_(isinstance(self.agent, Agent))
def test_rdf_properties(self):
self.assert_(hasattr(self.agent, 'authorityAssigner'))
self.assert_(hasattr(self.agent, 'authoritySource'))
self.assert_(hasattr(self.agent, 'authorizedAccessPoint'))
self.assert_(hasattr(self.agent, 'hasAuthority'))
self.assert_(hasattr(self.agent, 'identifier'))
self.assert_(hasattr(self.agent, 'label'))
self.assert_(hasattr(self.agent, 'relatedTo'))
def test_to_dict(self):
self.assertItemsEqual(self.agent.as_dict(),
{u'@type': 'Agent',
u'hasAuthority': None,
u'authorityAssigner': None,
'identifiers': {},
'label': 'Agent',
u'relatedTo': None,
u'authoritySource': None,
u'identifier': None,
u'authorizedAccessPoint': None})
self.assertItemsEqual(self.agent.as_dict(show_null=False),
{'identifiers': {},
'label': 'Agent',
'@type': 'Agent'})
def test_to_json(self):
self.assertEquals(json.loads(self.agent.as_json()),
json.loads("""{
"@type": "Agent",
"authorityAssigner": null,
"authoritySource": null,
"authorizedAccessPoint": null,
"hasAuthority": null,
"identifier": null,
"identifiers": {},
"label": "Agent",
"relatedTo": null}"""))
def test_type(self):
self.assertEquals(getattr(self.agent, '@type'), 'Agent')
def tearDown(self):
pass
class TestAnnotation(unittest.TestCase):
def setUp(self):
self.annotation = Annotation()
def test_init(self):
self.assert_(isinstance(self.annotation, Annotation))
def test_rdf_properties(self):
self.assert_(hasattr(self.annotation, 'annotates'))
self.assert_(hasattr(self.annotation, 'annotationAssertedBy'))
self.assert_(hasattr(self.annotation, 'annotationBody'))
self.assert_(hasattr(self.annotation, 'annotationSource'))
self.assert_(hasattr(self.annotation, 'assertionDate'))
self.assert_(hasattr(self.annotation, 'authorizedAccessPoint'))
self.assert_(hasattr(self.annotation, 'identifier'))
self.assert_(hasattr(self.annotation, 'label'))
self.assert_(hasattr(self.annotation, 'relatedTo'))
def test_type(self):
self.assertEquals(getattr(self.annotation, '@type'), 'Annotation')
def tearDown(self):
pass
class TestArchival(unittest.TestCase):
def setUp(self):
self.archival = Archival()
def test_init(self):
self.assert_(isinstance(self.archival, Archival))
def test_rdf_properties(self):
self.assert_(hasattr(self.archival, 'abbreviatedTitle'))
self.assert_(hasattr(self.archival, 'ansi'))
self.assert_(hasattr(self.archival, 'arrangement'))
self.assert_(hasattr(self.archival, 'aspectRatio'))
self.assert_(hasattr(self.archival, 'authorizedAccessPoint'))
self.assert_(hasattr(self.archival, 'awardNote'))
self.assert_(hasattr(self.archival, 'carrierCategory'))
self.assert_(hasattr(self.archival, 'coden'))
self.assert_(hasattr(self.archival, 'colorContent'))
self.assert_(hasattr(self.archival, 'contentAccessibility'))
self.assert_(hasattr(self.archival, 'contentsNote'))
self.assert_(hasattr(self.archival, 'custodialHistory'))
self.assert_(hasattr(self.archival, 'dimensions'))
self.assert_(hasattr(self.archival, 'distribution'))
self.assert_(hasattr(self.archival, 'doi'))
self.assert_(hasattr(self.archival, 'duration'))
self.assert_(hasattr(self.archival, 'ean'))
self.assert_(hasattr(self.archival, 'edition'))
self.assert_(hasattr(self.archival, 'editionResponsibility'))
self.assert_(hasattr(self.archival, 'extent'))
self.assert_(hasattr(self.archival, 'fingerprint'))
self.assert_(hasattr(self.archival, 'formatOfMusic'))
self.assert_(hasattr(self.archival, 'frequency'))
self.assert_(hasattr(self.archival, 'frequencyNote'))
self.assert_(hasattr(self.archival, 'graphicScaleNote'))
self.assert_(hasattr(self.archival, 'hasEquivalent'))
self.assert_(hasattr(self.archival, 'hdl'))
self.assert_(hasattr(self.archival, 'identifier'))
self.assert_(hasattr(self.archival, 'illustrationNote'))
self.assert_(hasattr(self.archival, 'instanceOf'))
self.assert_(hasattr(self.archival, 'instanceTitle'))
self.assert_(hasattr(self.archival, 'isbn'))
self.assert_(hasattr(self.archival, 'isbn10'))
self.assert_(hasattr(self.archival, 'isbn13'))
self.assert_(hasattr(self.archival, 'ismn'))
self.assert_(hasattr(self.archival, 'iso'))
self.assert_(hasattr(self.archival, 'isrc'))
self.assert_(hasattr(self.archival, 'issn'))
self.assert_(hasattr(self.archival, 'issueNumber'))
self.assert_(hasattr(self.archival, 'issuedWith'))
self.assert_(hasattr(self.archival, 'keyTitle'))
self.assert_(hasattr(self.archival, 'label'))
self.assert_(hasattr(self.archival, 'lcOverseasAcq'))
self.assert_(hasattr(self.archival, 'lccn'))
self.assert_(hasattr(self.archival, 'legalDeposit'))
self.assert_(hasattr(self.archival, 'local'))
self.assert_(hasattr(self.archival, 'manufacture'))
self.assert_(hasattr(self.archival, 'matrixNumber'))
self.assert_(hasattr(self.archival, 'mediaCategory'))
self.assert_(hasattr(self.archival, 'modeOfIssuance'))
self.assert_(hasattr(self.archival, 'musicPlate'))
self.assert_(hasattr(self.archival, 'musicPublisherNumber'))
self.assert_(hasattr(self.archival, 'nban'))
self.assert_(hasattr(self.archival, 'nbn'))
self.assert_(hasattr(self.archival, 'notation'))
self.assert_(hasattr(self.archival, 'otherPhysicalFormat'))
self.assert_(hasattr(self.archival, 'postalRegistration'))
self.assert_(hasattr(self.archival, 'preferredCitation'))
self.assert_(hasattr(self.archival, 'production'))
self.assert_(hasattr(self.archival, 'provider'))
self.assert_(hasattr(self.archival, 'providerStatement'))
self.assert_(hasattr(self.archival, 'publication'))
self.assert_(hasattr(self.archival, 'publisherNumber'))
self.assert_(hasattr(self.archival, 'relatedInstance'))
self.assert_(hasattr(self.archival, 'relatedTo'))
self.assert_(hasattr(self.archival, 'reportNumber'))
self.assert_(hasattr(self.archival, 'reproduction'))
self.assert_(hasattr(self.archival, 'responsibilityStatement'))
self.assert_(hasattr(self.archival, 'serialFirstIssue'))
self.assert_(hasattr(self.archival, 'serialLastIssue'))
self.assert_(hasattr(self.archival, 'sici'))
self.assert_(hasattr(self.archival, 'soundContent'))
self.assert_(hasattr(self.archival, 'stockNumber'))
self.assert_(hasattr(self.archival, 'strn'))
self.assert_(hasattr(self.archival, 'studyNumber'))
self.assert_(hasattr(self.archival, 'supplementaryContentNote'))
self.assert_(hasattr(self.archival, 'titleStatement'))
self.assert_(hasattr(self.archival, 'upc'))
self.assert_(hasattr(self.archival, 'uri'))
self.assert_(hasattr(self.archival, 'urn'))
self.assert_(hasattr(self.archival, 'videorecordingNumber'))
def tearDown(self):
pass
class TestArrangement(unittest.TestCase):
def setUp(self):
self.arrangement = Arrangement()
def test_init(self):
self.assertEquals(type(self.arrangement), Arrangement)
def test_rdf_properties(self):
self.assert_(hasattr(self.arrangement, 'authorizedAccessPoint'))
self.assert_(hasattr(self.arrangement, 'identifier'))
self.assert_(hasattr(self.arrangement, 'label'))
self.assert_(hasattr(self.arrangement, 'materialArrangement'))
self.assert_(hasattr(self.arrangement, 'materialHierarchicalLevel'))
self.assert_(hasattr(self.arrangement, 'materialOrganization'))
self.assert_(hasattr(self.arrangement, 'materialPart'))
self.assert_(hasattr(self.arrangement, 'relatedTo'))
def tearDown(self):
pass
class TestAudio(unittest.TestCase):
def setUp(self):
self.audio = Audio()
def test_init(self):
self.assertEquals(type(self.audio), Audio)
def test_rdf_properties(self):
self.assert_(hasattr(self.audio, 'absorbed'))
self.assert_(hasattr(self.audio, 'absorbedBy'))
self.assert_(hasattr(self.audio, 'absorbedInPart'))
self.assert_(hasattr(self.audio, 'absorbedInPartBy'))
self.assert_(hasattr(self.audio, 'authorizedAccessPoint'))
self.assert_(hasattr(self.audio, 'classification'))
self.assert_(hasattr(self.audio, 'classificationDdc'))
self.assert_(hasattr(self.audio, 'classificationLcc'))
self.assert_(hasattr(self.audio, 'classificationNlm'))
self.assert_(hasattr(self.audio, 'classificationUdc'))
self.assert_(hasattr(self.audio, 'containedIn'))
self.assert_(hasattr(self.audio, 'contains'))
self.assert_(hasattr(self.audio, 'contentCategory'))
self.assert_(hasattr(self.audio, 'continuedBy'))
self.assert_(hasattr(self.audio, 'continuedInPartBy'))
self.assert_(hasattr(self.audio, 'continues'))
self.assert_(hasattr(self.audio, 'continuesInPart'))
self.assert_(hasattr(self.audio, 'dataSource'))
self.assert_(hasattr(self.audio, 'dissertationDegree'))
self.assert_(hasattr(self.audio, 'dissertationIdentifier'))
self.assert_(hasattr(self.audio, 'dissertationInstitution'))
self.assert_(hasattr(self.audio, 'dissertationNote'))
self.assert_(hasattr(self.audio, 'dissertationYear'))
self.assert_(hasattr(self.audio, 'event'))
self.assert_(hasattr(self.audio, 'expressionOf'))
self.assert_(hasattr(self.audio, 'findingAid'))
self.assert_(hasattr(self.audio, 'geographicCoverageNote'))
self.assert_(hasattr(self.audio, 'hasDerivative'))
self.assert_(hasattr(self.audio, 'hasExpression'))
self.assert_(hasattr(self.audio, 'hasInstance'))
self.assert_(hasattr(self.audio, 'identifier'))
self.assert_(hasattr(self.audio, 'index'))
self.assert_(hasattr(self.audio, 'isDerivativeOf'))
self.assert_(hasattr(self.audio, 'isan'))
self.assert_(hasattr(self.audio, 'issnL'))
self.assert_(hasattr(self.audio, 'istc'))
self.assert_(hasattr(self.audio, 'iswc'))
self.assert_(hasattr(self.audio, 'label'))
self.assert_(hasattr(self.audio, 'languageNote'))
self.assert_(hasattr(self.audio, 'mergedToForm'))
self.assert_(hasattr(self.audio, 'originDate'))
self.assert_(hasattr(self.audio, 'originPlace'))
self.assert_(hasattr(self.audio, 'originalVersion'))
self.assert_(hasattr(self.audio, 'otherEdition'))
self.assert_(hasattr(self.audio, 'precedes'))
self.assert_(hasattr(self.audio, 'relatedTo'))
self.assert_(hasattr(self.audio, 'relatedWork'))
self.assert_(hasattr(self.audio, 'separatedFrom'))
self.assert_(hasattr(self.audio, 'series'))
self.assert_(hasattr(self.audio, 'splitInto'))
self.assert_(hasattr(self.audio, 'subject'))
self.assert_(hasattr(self.audio, 'subseries'))
self.assert_(hasattr(self.audio, 'subseriesOf'))
self.assert_(hasattr(self.audio, 'succeeds'))
self.assert_(hasattr(self.audio, 'supersededBy'))
self.assert_(hasattr(self.audio, 'supersededInPartBy'))
self.assert_(hasattr(self.audio, 'supersedes'))
self.assert_(hasattr(self.audio, 'supersedesInPart'))
self.assert_(hasattr(self.audio, 'supplement'))
self.assert_(hasattr(self.audio, 'supplementTo'))
self.assert_(hasattr(self.audio, 'temporalCoverageNote'))
self.assert_(hasattr(self.audio, 'translation'))
self.assert_(hasattr(self.audio, 'translationOf'))
self.assert_(hasattr(self.audio, 'unionOf'))
self.assert_(hasattr(self.audio, 'workTitle'))
def tearDown(self):
pass
class TestAuthority(unittest.TestCase):
def setUp(self):
self.authority = Authority()
def test_init(self):
self.assert_(isinstance(self.authority, Authority))
def test_rdf_properties(self):
self.assert_(hasattr(self.authority, 'authorityAssigner'))
self.assert_(hasattr(self.authority, 'authoritySource'))
self.assert_(hasattr(self.authority, 'authorizedAccessPoint'))
self.assert_(hasattr(self.authority, 'hasAuthority'))
self.assert_(hasattr(self.authority, 'identifier'))
self.assert_(hasattr(self.authority, 'label'))
self.assert_(hasattr(self.authority, 'relatedTo'))
def test_hasAnnotation(self):
pass
def tearDown(self):
pass
class TestCartography(unittest.TestCase):
def setUp(self):
self.cartography = Cartography()
def test_init(self):
self.assert_(isinstance(self.cartography, Cartography))
def test_rdf_properties(self):
self.assert_(hasattr(self.cartography, 'absorbed'))
self.assert_(hasattr(self.cartography, 'absorbedBy'))
self.assert_(hasattr(self.cartography, 'absorbedInPart'))
self.assert_(hasattr(self.cartography, 'absorbedInPartBy'))
self.assert_(hasattr(self.cartography, 'authorizedAccessPoint'))
self.assert_(hasattr(self.cartography, 'cartographicAscensionAndDeclination'))
self.assert_(hasattr(self.cartography, 'cartographicCoordinates'))
self.assert_(hasattr(self.cartography, 'cartographicEquinox'))
self.assert_(hasattr(self.cartography, 'cartographicExclusionGRing'))
self.assert_(hasattr(self.cartography, 'cartographicOuterGRing'))
self.assert_(hasattr(self.cartography, 'cartographicProjection'))
self.assert_(hasattr(self.cartography, 'cartographicScale'))
self.assert_(hasattr(self.cartography, 'classification'))
self.assert_(hasattr(self.cartography, 'classificationDdc'))
self.assert_(hasattr(self.cartography, 'classificationLcc'))
self.assert_(hasattr(self.cartography, 'classificationNlm'))
self.assert_(hasattr(self.cartography, 'classificationUdc'))
self.assert_(hasattr(self.cartography, 'containedIn'))
self.assert_(hasattr(self.cartography, 'contains'))
self.assert_(hasattr(self.cartography, 'contentCategory'))
self.assert_(hasattr(self.cartography, 'continuedBy'))
self.assert_(hasattr(self.cartography, 'continuedInPartBy'))
self.assert_(hasattr(self.cartography, 'continues'))
self.assert_(hasattr(self.cartography, 'continuesInPart'))
self.assert_(hasattr(self.cartography, 'dataSource'))
self.assert_(hasattr(self.cartography, 'dissertationDegree'))
self.assert_(hasattr(self.cartography, 'dissertationIdentifier'))
self.assert_(hasattr(self.cartography, 'dissertationInstitution'))
self.assert_(hasattr(self.cartography, 'dissertationNote'))
self.assert_(hasattr(self.cartography, 'dissertationYear'))
self.assert_(hasattr(self.cartography, 'event'))
self.assert_(hasattr(self.cartography, 'expressionOf'))
self.assert_(hasattr(self.cartography, 'findingAid'))
self.assert_(hasattr(self.cartography, 'geographicCoverageNote'))
self.assert_(hasattr(self.cartography, 'hasDerivative'))
self.assert_(hasattr(self.cartography, 'hasExpression'))
self.assert_(hasattr(self.cartography, 'hasInstance'))
self.assert_(hasattr(self.cartography, 'identifier'))
self.assert_(hasattr(self.cartography, 'index'))
self.assert_(hasattr(self.cartography, 'isDerivativeOf'))
self.assert_(hasattr(self.cartography, 'isan'))
self.assert_(hasattr(self.cartography, 'issnL'))
self.assert_(hasattr(self.cartography, 'istc'))
self.assert_(hasattr(self.cartography, 'iswc'))
self.assert_(hasattr(self.cartography, 'label'))
self.assert_(hasattr(self.cartography, 'languageNote'))
self.assert_(hasattr(self.cartography, 'mergedToForm'))
self.assert_(hasattr(self.cartography, 'originDate'))
self.assert_(hasattr(self.cartography, 'originPlace'))
self.assert_(hasattr(self.cartography, 'originalVersion'))
self.assert_(hasattr(self.cartography, 'otherEdition'))
self.assert_(hasattr(self.cartography, 'precedes'))
self.assert_(hasattr(self.cartography, 'relatedTo'))
self.assert_(hasattr(self.cartography, 'relatedWork'))
self.assert_(hasattr(self.cartography, 'separatedFrom'))
self.assert_(hasattr(self.cartography, 'series'))
self.assert_(hasattr(self.cartography, 'splitInto'))
self.assert_(hasattr(self.cartography, 'subject'))
self.assert_(hasattr(self.cartography, 'subseries'))
self.assert_(hasattr(self.cartography, 'subseriesOf'))
self.assert_(hasattr(self.cartography, 'succeeds'))
self.assert_(hasattr(self.cartography, 'supersededBy'))
self.assert_(hasattr(self.cartography, 'supersededInPartBy'))
self.assert_(hasattr(self.cartography, 'supersedes'))
self.assert_(hasattr(self.cartography, 'supersedesInPart'))
self.assert_(hasattr(self.cartography, 'supplement'))
self.assert_(hasattr(self.cartography, 'supplementTo'))
self.assert_(hasattr(self.cartography, 'temporalCoverageNote'))
self.assert_(hasattr(self.cartography, 'translation'))
self.assert_(hasattr(self.cartography, 'translationOf'))
self.assert_(hasattr(self.cartography, 'unionOf'))
self.assert_(hasattr(self.cartography, 'workTitle'))
def tearDown(self):
pass
class TestCategory(unittest.TestCase):
def setUp(self):
self.category = Category()
def test_init(self):
self.assertEquals(type(self.category), Category)
def test_rdf_properties(self):
self.assert_(hasattr(self.category, 'authorizedAccessPoint'))
self.assert_(hasattr(self.category, 'categorySource'))
self.assert_(hasattr(self.category, 'categoryType'))
self.assert_(hasattr(self.category, 'categoryValue'))
self.assert_(hasattr(self.category, 'identifier'))
self.assert_(hasattr(self.category, 'label'))
self.assert_(hasattr(self.category, 'relatedTo'))
def tearDown(self):
pass
class TestClassification(unittest.TestCase):
def setUp(self):
self.classification = Classification()
def test_init(self):
self.assertEquals(type(self.classification), Classification)
def test_rdf_properties(self):
self.assert_(hasattr(self.classification, 'authorizedAccessPoint'))
self.assert_(hasattr(self.classification, 'classificationAssigner'))
self.assert_(hasattr(self.classification, 'classificationDesignation'))
self.assert_(hasattr(self.classification, 'classificationEdition'))
self.assert_(hasattr(self.classification, 'classificationItem'))
self.assert_(hasattr(self.classification, 'classificationNumber'))
self.assert_(hasattr(self.classification, 'classificationNumberUri'))
self.assert_(hasattr(self.classification, 'classificationScheme'))
self.assert_(hasattr(self.classification, 'classificationSpanEnd'))
self.assert_(hasattr(self.classification, 'classificationStatus'))
self.assert_(hasattr(self.classification, 'classificationTable'))
self.assert_(hasattr(self.classification, 'classificationTableSeq'))
self.assert_(hasattr(self.classification, 'identifier'))
self.assert_(hasattr(self.classification, 'label'))
self.assert_(hasattr(self.classification, 'relatedTo'))
def tearDown(self):
pass
class TestCollection(unittest.TestCase):
def setUp(self):
self.collection = Collection()
def test_init(self):
self.assertEquals(type(self.collection), Collection)
def test_rdf_properties(self):
self.assert_(hasattr(self.collection, 'abbreviatedTitle'))
self.assert_(hasattr(self.collection, 'ansi'))
self.assert_(hasattr(self.collection, 'arrangement'))
self.assert_(hasattr(self.collection, 'aspectRatio'))
self.assert_(hasattr(self.collection, 'authorizedAccessPoint'))
self.assert_(hasattr(self.collection, 'awardNote'))
self.assert_(hasattr(self.collection, 'carrierCategory'))
self.assert_(hasattr(self.collection, 'coden'))
self.assert_(hasattr(self.collection, 'colorContent'))
self.assert_(hasattr(self.collection, 'contentAccessibility'))
self.assert_(hasattr(self.collection, 'contentsNote'))
self.assert_(hasattr(self.collection, 'custodialHistory'))
self.assert_(hasattr(self.collection, 'dimensions'))
self.assert_(hasattr(self.collection, 'distribution'))
self.assert_(hasattr(self.collection, 'doi'))
self.assert_(hasattr(self.collection, 'duration'))
self.assert_(hasattr(self.collection, 'ean'))
self.assert_(hasattr(self.collection, 'edition'))
self.assert_(hasattr(self.collection, 'editionResponsibility'))
self.assert_(hasattr(self.collection, 'extent'))
self.assert_(hasattr(self.collection, 'fingerprint'))
self.assert_(hasattr(self.collection, 'formatOfMusic'))
self.assert_(hasattr(self.collection, 'frequency'))
self.assert_(hasattr(self.collection, 'frequencyNote'))
self.assert_(hasattr(self.collection, 'graphicScaleNote'))
self.assert_(hasattr(self.collection, 'hasEquivalent'))
self.assert_(hasattr(self.collection, 'hdl'))
self.assert_(hasattr(self.collection, 'identifier'))
self.assert_(hasattr(self.collection, 'illustrationNote'))
self.assert_(hasattr(self.collection, 'instanceOf'))
self.assert_(hasattr(self.collection, 'instanceTitle'))
self.assert_(hasattr(self.collection, 'isbn'))
self.assert_(hasattr(self.collection, 'isbn10'))
self.assert_(hasattr(self.collection, 'isbn13'))
self.assert_(hasattr(self.collection, 'ismn'))
self.assert_(hasattr(self.collection, 'iso'))
self.assert_(hasattr(self.collection, 'isrc'))
self.assert_(hasattr(self.collection, 'issn'))
self.assert_(hasattr(self.collection, 'issueNumber'))
self.assert_(hasattr(self.collection, 'issuedWith'))
self.assert_(hasattr(self.collection, 'keyTitle'))
self.assert_(hasattr(self.collection, 'label'))
self.assert_(hasattr(self.collection, 'lcOverseasAcq'))
self.assert_(hasattr(self.collection, 'lccn'))
self.assert_(hasattr(self.collection, 'legalDeposit'))
self.assert_(hasattr(self.collection, 'local'))
self.assert_(hasattr(self.collection, 'manufacture'))
self.assert_(hasattr(self.collection, 'matrixNumber'))
self.assert_(hasattr(self.collection, 'mediaCategory'))
self.assert_(hasattr(self.collection, 'modeOfIssuance'))
self.assert_(hasattr(self.collection, 'musicPlate'))
self.assert_(hasattr(self.collection, 'musicPublisherNumber'))
self.assert_(hasattr(self.collection, 'nban'))
self.assert_(hasattr(self.collection, 'nbn'))
self.assert_(hasattr(self.collection, 'notation'))
self.assert_(hasattr(self.collection, 'otherPhysicalFormat'))
self.assert_(hasattr(self.collection, 'postalRegistration'))
self.assert_(hasattr(self.collection, 'preferredCitation'))
self.assert_(hasattr(self.collection, 'production'))
self.assert_(hasattr(self.collection, 'provider'))
self.assert_(hasattr(self.collection, 'providerStatement'))
self.assert_(hasattr(self.collection, 'publication'))
self.assert_(hasattr(self.collection, 'publisherNumber'))
self.assert_(hasattr(self.collection, 'relatedInstance'))
self.assert_(hasattr(self.collection, 'relatedTo'))
self.assert_(hasattr(self.collection, 'reportNumber'))
self.assert_(hasattr(self.collection, 'reproduction'))
self.assert_(hasattr(self.collection, 'responsibilityStatement'))
self.assert_(hasattr(self.collection, 'serialFirstIssue'))
self.assert_(hasattr(self.collection, 'serialLastIssue'))
self.assert_(hasattr(self.collection, 'sici'))
self.assert_(hasattr(self.collection, 'soundContent'))
self.assert_(hasattr(self.collection, 'stockNumber'))
self.assert_(hasattr(self.collection, 'strn'))
self.assert_(hasattr(self.collection, 'studyNumber'))
self.assert_(hasattr(self.collection, 'supplementaryContentNote'))
self.assert_(hasattr(self.collection, 'titleStatement'))
self.assert_(hasattr(self.collection, 'upc'))
self.assert_(hasattr(self.collection, 'uri'))
self.assert_(hasattr(self.collection, 'urn'))
self.assert_(hasattr(self.collection, 'videorecordingNumber'))
def tearDown(self):
pass
class TestCoverArt(unittest.TestCase):
def setUp(self):
self.coverart = CoverArt()
def test_init(self):
self.assertEquals(type(self.coverart), CoverArt)
def test_rdf_properties(self):
self.assert_(hasattr(self.coverart, 'annotates'))
self.assert_(hasattr(self.coverart, 'annotationAssertedBy'))
self.assert_(hasattr(self.coverart, 'annotationBody'))
self.assert_(hasattr(self.coverart, 'annotationSource'))
self.assert_(hasattr(self.coverart, 'assertionDate'))
self.assert_(hasattr(self.coverart, 'authorizedAccessPoint'))
self.assert_(hasattr(self.coverart, 'coverArt'))
self.assert_(hasattr(self.coverart, 'coverArtFor'))
self.assert_(hasattr(self.coverart, 'coverArtThumb'))
self.assert_(hasattr(self.coverart, 'identifier'))
self.assert_(hasattr(self.coverart, 'label'))
self.assert_(hasattr(self.coverart, 'relatedTo'))
def tearDown(self):
pass
class TestDataset(unittest.TestCase):
def setUp(self):
self.dataset = Dataset()
def test_init(self):
self.assertEquals(type(self.dataset), Dataset)
def test_rdf_properties(self):
self.assert_(hasattr(self.dataset, 'absorbed'))
self.assert_(hasattr(self.dataset, 'absorbedBy'))
self.assert_(hasattr(self.dataset, 'absorbedInPart'))
self.assert_(hasattr(self.dataset, 'absorbedInPartBy'))
self.assert_(hasattr(self.dataset, 'authorizedAccessPoint'))
self.assert_(hasattr(self.dataset, 'classification'))
self.assert_(hasattr(self.dataset, 'classificationDdc'))
self.assert_(hasattr(self.dataset, 'classificationLcc'))
self.assert_(hasattr(self.dataset, 'classificationNlm'))
self.assert_(hasattr(self.dataset, 'classificationUdc'))
self.assert_(hasattr(self.dataset, 'containedIn'))
self.assert_(hasattr(self.dataset, 'contains'))
self.assert_(hasattr(self.dataset, 'contentCategory'))
self.assert_(hasattr(self.dataset, 'continuedBy'))
self.assert_(hasattr(self.dataset, 'continuedInPartBy'))
self.assert_(hasattr(self.dataset, 'continues'))
self.assert_(hasattr(self.dataset, 'continuesInPart'))
self.assert_(hasattr(self.dataset, 'dataSource'))
self.assert_(hasattr(self.dataset, 'dissertationDegree'))
self.assert_(hasattr(self.dataset, 'dissertationIdentifier'))
self.assert_(hasattr(self.dataset, 'dissertationInstitution'))
self.assert_(hasattr(self.dataset, 'dissertationNote'))
self.assert_(hasattr(self.dataset, 'dissertationYear'))
self.assert_(hasattr(self.dataset, 'event'))
self.assert_(hasattr(self.dataset, 'expressionOf'))
self.assert_(hasattr(self.dataset, 'findingAid'))
self.assert_(hasattr(self.dataset, 'geographicCoverageNote'))
self.assert_(hasattr(self.dataset, 'hasDerivative'))
self.assert_(hasattr(self.dataset, 'hasExpression'))
self.assert_(hasattr(self.dataset, 'hasInstance'))
self.assert_(hasattr(self.dataset, 'identifier'))
self.assert_(hasattr(self.dataset, 'index'))
self.assert_(hasattr(self.dataset, 'isDerivativeOf'))
self.assert_(hasattr(self.dataset, 'isan'))
self.assert_(hasattr(self.dataset, 'issnL'))
self.assert_(hasattr(self.dataset, 'istc'))
self.assert_(hasattr(self.dataset, 'iswc'))
self.assert_(hasattr(self.dataset, 'label'))
self.assert_(hasattr(self.dataset, 'languageNote'))
self.assert_(hasattr(self.dataset, 'mergedToForm'))
self.assert_(hasattr(self.dataset, 'originDate'))
self.assert_(hasattr(self.dataset, 'originPlace'))
self.assert_(hasattr(self.dataset, 'originalVersion'))
self.assert_(hasattr(self.dataset, 'otherEdition'))
self.assert_(hasattr(self.dataset, 'precedes'))
self.assert_(hasattr(self.dataset, 'relatedTo'))
self.assert_(hasattr(self.dataset, 'relatedWork'))
self.assert_(hasattr(self.dataset, 'separatedFrom'))
self.assert_(hasattr(self.dataset, 'series'))
self.assert_(hasattr(self.dataset, 'splitInto'))
self.assert_(hasattr(self.dataset, 'subject'))
self.assert_(hasattr(self.dataset, 'subseries'))
self.assert_(hasattr(self.dataset, 'subseriesOf'))
self.assert_(hasattr(self.dataset, 'succeeds'))
self.assert_(hasattr(self.dataset, 'supersededBy'))
self.assert_(hasattr(self.dataset, 'supersededInPartBy'))
self.assert_(hasattr(self.dataset, 'supersedes'))
self.assert_(hasattr(self.dataset, 'supersedesInPart'))
self.assert_(hasattr(self.dataset, 'supplement'))
self.assert_(hasattr(self.dataset, 'supplementTo'))
self.assert_(hasattr(self.dataset, 'temporalCoverageNote'))
self.assert_(hasattr(self.dataset, 'translation'))
self.assert_(hasattr(self.dataset, 'translationOf'))
self.assert_(hasattr(self.dataset, 'unionOf'))
self.assert_(hasattr(self.dataset, 'workTitle'))
def tearDown(self):
pass
class TestDescriptionAdminInfo(unittest.TestCase):
def setUp(self):
self.descriptionadmininfo = DescriptionAdminInfo()
def test_init(self):
self.assertEquals(type(self.descriptionadmininfo), DescriptionAdminInfo)
def test_rdf_properties(self):
self.assert_(hasattr(self.descriptionadmininfo, 'authorizedAccessPoint'))
self.assert_(hasattr(self.descriptionadmininfo, 'identifier'))
self.assert_(hasattr(self.descriptionadmininfo, 'label'))
self.assert_(hasattr(self.descriptionadmininfo, 'relatedTo'))
def tearDown(self):
pass
class TestElectronic(unittest.TestCase):
def setUp(self):
self.electronic = Electronic()
def test_init(self):
self.assertEquals(type(self.electronic), Electronic)
def test_rdf_properties(self):
self.assert_(hasattr(self.electronic, 'abbreviatedTitle'))
self.assert_(hasattr(self.electronic, 'ansi'))
self.assert_(hasattr(self.electronic, 'arrangement'))
self.assert_(hasattr(self.electronic, 'aspectRatio'))
self.assert_(hasattr(self.electronic, 'authorizedAccessPoint'))
self.assert_(hasattr(self.electronic, 'awardNote'))
self.assert_(hasattr(self.electronic, 'carrierCategory'))
self.assert_(hasattr(self.electronic, 'coden'))
self.assert_(hasattr(self.electronic, 'colorContent'))
self.assert_(hasattr(self.electronic, 'contentAccessibility'))
self.assert_(hasattr(self.electronic, 'contentsNote'))
self.assert_(hasattr(self.electronic, 'custodialHistory'))
self.assert_(hasattr(self.electronic, 'dimensions'))
self.assert_(hasattr(self.electronic, 'distribution'))
self.assert_(hasattr(self.electronic, 'doi'))
self.assert_(hasattr(self.electronic, 'duration'))
self.assert_(hasattr(self.electronic, 'ean'))
self.assert_(hasattr(self.electronic, 'edition'))
self.assert_(hasattr(self.electronic, 'editionResponsibility'))
self.assert_(hasattr(self.electronic, 'extent'))
self.assert_(hasattr(self.electronic, 'fingerprint'))
self.assert_(hasattr(self.electronic, 'formatOfMusic'))
self.assert_(hasattr(self.electronic, 'frequency'))
self.assert_(hasattr(self.electronic, 'frequencyNote'))
self.assert_(hasattr(self.electronic, 'graphicScaleNote'))
self.assert_(hasattr(self.electronic, 'hasEquivalent'))
self.assert_(hasattr(self.electronic, 'hdl'))
self.assert_(hasattr(self.electronic, 'identifier'))
self.assert_(hasattr(self.electronic, 'illustrationNote'))
self.assert_(hasattr(self.electronic, 'instanceOf'))
self.assert_(hasattr(self.electronic, 'instanceTitle'))
self.assert_(hasattr(self.electronic, 'isbn'))
self.assert_(hasattr(self.electronic, 'isbn10'))
self.assert_(hasattr(self.electronic, 'isbn13'))
self.assert_(hasattr(self.electronic, 'ismn'))
self.assert_(hasattr(self.electronic, 'iso'))
self.assert_(hasattr(self.electronic, 'isrc'))
self.assert_(hasattr(self.electronic, 'issn'))
self.assert_(hasattr(self.electronic, 'issueNumber'))
self.assert_(hasattr(self.electronic, 'issuedWith'))
self.assert_(hasattr(self.electronic, 'keyTitle'))
self.assert_(hasattr(self.electronic, 'label'))
self.assert_(hasattr(self.electronic, 'lcOverseasAcq'))
self.assert_(hasattr(self.electronic, 'lccn'))
self.assert_(hasattr(self.electronic, 'legalDeposit'))
self.assert_(hasattr(self.electronic, 'local'))
self.assert_(hasattr(self.electronic, 'manufacture'))
self.assert_(hasattr(self.electronic, 'matrixNumber'))
self.assert_(hasattr(self.electronic, 'mediaCategory'))
self.assert_(hasattr(self.electronic, 'modeOfIssuance'))
self.assert_(hasattr(self.electronic, 'musicPlate'))
self.assert_(hasattr(self.electronic, 'musicPublisherNumber'))
self.assert_(hasattr(self.electronic, 'nban'))
self.assert_(hasattr(self.electronic, 'nbn'))
self.assert_(hasattr(self.electronic, 'notation'))
self.assert_(hasattr(self.electronic, 'otherPhysicalFormat'))
self.assert_(hasattr(self.electronic, 'postalRegistration'))
self.assert_(hasattr(self.electronic, 'preferredCitation'))
self.assert_(hasattr(self.electronic, 'production'))
self.assert_(hasattr(self.electronic, 'provider'))
self.assert_(hasattr(self.electronic, 'providerStatement'))
self.assert_(hasattr(self.electronic, 'publication'))
self.assert_(hasattr(self.electronic, 'publisherNumber'))
self.assert_(hasattr(self.electronic, 'relatedInstance'))
self.assert_(hasattr(self.electronic, 'relatedTo'))
self.assert_(hasattr(self.electronic, 'reportNumber'))
self.assert_(hasattr(self.electronic, 'reproduction'))
self.assert_(hasattr(self.electronic, 'responsibilityStatement'))
self.assert_(hasattr(self.electronic, 'serialFirstIssue'))
self.assert_(hasattr(self.electronic, 'serialLastIssue'))
self.assert_(hasattr(self.electronic, 'sici'))
self.assert_(hasattr(self.electronic, 'soundContent'))
self.assert_(hasattr(self.electronic, 'stockNumber'))
self.assert_(hasattr(self.electronic, 'strn'))
self.assert_(hasattr(self.electronic, 'studyNumber'))
self.assert_(hasattr(self.electronic, 'supplementaryContentNote'))
self.assert_(hasattr(self.electronic, 'titleStatement'))
self.assert_(hasattr(self.electronic, 'upc'))
self.assert_(hasattr(self.electronic, 'uri'))
self.assert_(hasattr(self.electronic, 'urn'))
self.assert_(hasattr(self.electronic, 'videorecordingNumber'))
class TestEvent(unittest.TestCase):
def setUp(self):
self.event = Event()
def test_init(self):
self.assertEquals(type(self.event), Event)
def test_rdf_properties(self):
self.assert_(hasattr(self.event, 'authorizedAccessPoint'))
self.assert_(hasattr(self.event, 'eventAgent'))
self.assert_(hasattr(self.event, 'eventDate'))
self.assert_(hasattr(self.event, 'eventPlace'))
self.assert_(hasattr(self.event, 'identifier'))
self.assert_(hasattr(self.event, 'label'))
self.assert_(hasattr(self.event, 'relatedTo'))
def tearDown(self):
pass
def tearDown(self):
pass
class TestFamily(unittest.TestCase):
def setUp(self):
self.family = Family()
def test_init(self):
self.assertEquals(type(self.family), Family)
def test_rdf_properties(self):
self.assert_(hasattr(self.family, 'authorityAssigner'))
self.assert_(hasattr(self.family, 'authoritySource'))
self.assert_(hasattr(self.family, 'authorizedAccessPoint'))
self.assert_(hasattr(self.family, 'hasAuthority'))
self.assert_(hasattr(self.family, 'identifier'))
self.assert_(hasattr(self.family, 'label'))
self.assert_(hasattr(self.family, 'relatedTo'))
def tearDown(self):
pass
class TestHeldItem(unittest.TestCase):
def setUp(self):
self.helditem = HeldItem()
def test_init(self):
self.assertEquals(type(self.helditem), HeldItem)
def test_rdf_properties(self):
self.assert_(hasattr(self.helditem, 'accessCondition'))
self.assert_(hasattr(self.helditem, 'annotates'))
self.assert_(hasattr(self.helditem, 'annotationAssertedBy'))
self.assert_(hasattr(self.helditem, 'annotationBody'))
self.assert_(hasattr(self.helditem, 'annotationSource'))
self.assert_(hasattr(self.helditem, 'assertionDate'))
self.assert_(hasattr(self.helditem, 'authorizedAccessPoint'))
self.assert_(hasattr(self.helditem, 'barcode'))
self.assert_(hasattr(self.helditem, 'circulationStatus'))
self.assert_(hasattr(self.helditem, 'componentOf'))
self.assert_(hasattr(self.helditem, 'copyNote'))
self.assert_(hasattr(self.helditem, 'enumerationAndChronology'))
self.assert_(hasattr(self.helditem, 'heldBy'))
self.assert_(hasattr(self.helditem, 'holdingFor'))
self.assert_(hasattr(self.helditem, 'identifier'))
self.assert_(hasattr(self.helditem, 'itemId'))
self.assert_(hasattr(self.helditem, 'label'))
self.assert_(hasattr(self.helditem, 'lendingPolicy'))
self.assert_(hasattr(self.helditem, 'relatedTo'))
self.assert_(hasattr(self.helditem, 'reproductionPolicy'))
self.assert_(hasattr(self.helditem, 'retentionPolicy'))
self.assert_(hasattr(self.helditem, 'shelfMark'))
self.assert_(hasattr(self.helditem, 'shelfMarkDdc'))
self.assert_(hasattr(self.helditem, 'shelfMarkLcc'))
self.assert_(hasattr(self.helditem, 'shelfMarkNlm'))
self.assert_(hasattr(self.helditem, 'shelfMarkScheme'))
self.assert_(hasattr(self.helditem, 'shelfMarkUdc'))
self.assert_(hasattr(self.helditem, 'subLocation'))
def tearDown(self):
pass
class TestHeldMaterial(unittest.TestCase):
def setUp(self):
self.heldmaterial = HeldMaterial()
def test_init(self):
self.assertEquals(type(self.heldmaterial), HeldMaterial)
def test_rdf_properties(self):
self.assert_(hasattr(self.heldmaterial, 'accessCondition'))
self.assert_(hasattr(self.heldmaterial, 'annotates'))
self.assert_(hasattr(self.heldmaterial, 'annotationAssertedBy'))
self.assert_(hasattr(self.heldmaterial, 'annotationBody'))
self.assert_(hasattr(self.heldmaterial, 'annotationSource'))
self.assert_(hasattr(self.heldmaterial, 'assertionDate'))
self.assert_(hasattr(self.heldmaterial, 'authorizedAccessPoint'))
self.assert_(hasattr(self.heldmaterial, 'enumerationAndChronology'))
self.assert_(hasattr(self.heldmaterial, 'heldBy'))
self.assert_(hasattr(self.heldmaterial, 'holdingFor'))
self.assert_(hasattr(self.heldmaterial, 'identifier'))
self.assert_(hasattr(self.heldmaterial, 'label'))
self.assert_(hasattr(self.heldmaterial, 'lendingPolicy'))
self.assert_(hasattr(self.heldmaterial, 'relatedTo'))
self.assert_(hasattr(self.heldmaterial, 'reproductionPolicy'))
self.assert_(hasattr(self.heldmaterial, 'retentionPolicy'))
self.assert_(hasattr(self.heldmaterial, 'subLocation'))
def tearDown(self):
pass
class TestIdentifier(unittest.TestCase):
def setUp(self):
self.identifier = Identifier()
def test_init(self):
self.assertEquals(type(self.identifier), Identifier)
def test_rdf_properties(self):
self.assert_(hasattr(self.identifier, 'authorizedAccessPoint'))
self.assert_(hasattr(self.identifier, 'identifier'))
self.assert_(hasattr(self.identifier, 'identifierAssigner'))
self.assert_(hasattr(self.identifier, 'identifierQualifier'))
self.assert_(hasattr(self.identifier, 'identifierScheme'))
self.assert_(hasattr(self.identifier, 'identifierStatus'))
self.assert_(hasattr(self.identifier, 'identifierValue'))
self.assert_(hasattr(self.identifier, 'label'))
self.assert_(hasattr(self.identifier, 'relatedTo'))
def tearDown(self):
pass
class TestInstance(unittest.TestCase):
def setUp(self):
self.instance = Instance()
def test_init(self):
self.assertEquals(type(self.instance), Instance)
def test_rdf_properties(self):
self.assert_(hasattr(self.instance, 'abbreviatedTitle'))
self.assert_(hasattr(self.instance, 'ansi'))
self.assert_(hasattr(self.instance, 'arrangement'))
self.assert_(hasattr(self.instance, 'aspectRatio'))
self.assert_(hasattr(self.instance, 'authorizedAccessPoint'))
self.assert_(hasattr(self.instance, 'awardNote'))
self.assert_(hasattr(self.instance, 'carrierCategory'))
self.assert_(hasattr(self.instance, 'coden'))
self.assert_(hasattr(self.instance, 'colorContent'))
self.assert_(hasattr(self.instance, 'contentAccessibility'))
self.assert_(hasattr(self.instance, 'contentsNote'))
self.assert_(hasattr(self.instance, 'custodialHistory'))
self.assert_(hasattr(self.instance, 'dimensions'))
self.assert_(hasattr(self.instance, 'distribution'))
self.assert_(hasattr(self.instance, 'doi'))
self.assert_(hasattr(self.instance, 'duration'))
self.assert_(hasattr(self.instance, 'ean'))
self.assert_(hasattr(self.instance, 'edition'))
self.assert_(hasattr(self.instance, 'editionResponsibility'))
self.assert_(hasattr(self.instance, 'extent'))
self.assert_(hasattr(self.instance, 'fingerprint'))
self.assert_(hasattr(self.instance, 'formatOfMusic'))
self.assert_(hasattr(self.instance, 'frequency'))
self.assert_(hasattr(self.instance, 'frequencyNote'))
self.assert_(hasattr(self.instance, 'graphicScaleNote'))
self.assert_(hasattr(self.instance, 'hasEquivalent'))
self.assert_(hasattr(self.instance, 'hdl'))
self.assert_(hasattr(self.instance, 'identifier'))
self.assert_(hasattr(self.instance, 'illustrationNote'))
self.assert_(hasattr(self.instance, 'instanceOf'))
self.assert_(hasattr(self.instance, 'instanceTitle'))
self.assert_(hasattr(self.instance, 'isbn'))
self.assert_(hasattr(self.instance, 'isbn10'))
self.assert_(hasattr(self.instance, 'isbn13'))
self.assert_(hasattr(self.instance, 'ismn'))
self.assert_(hasattr(self.instance, 'iso'))
self.assert_(hasattr(self.instance, 'isrc'))
self.assert_(hasattr(self.instance, 'issn'))
self.assert_(hasattr(self.instance, 'issueNumber'))
self.assert_(hasattr(self.instance, 'issuedWith'))
self.assert_(hasattr(self.instance, 'keyTitle'))
self.assert_(hasattr(self.instance, 'label'))
self.assert_(hasattr(self.instance, 'lcOverseasAcq'))
self.assert_(hasattr(self.instance, 'lccn'))
self.assert_(hasattr(self.instance, 'legalDeposit'))
self.assert_(hasattr(self.instance, 'local'))
self.assert_(hasattr(self.instance, 'manufacture'))
self.assert_(hasattr(self.instance, 'matrixNumber'))
self.assert_(hasattr(self.instance, 'mediaCategory'))
self.assert_(hasattr(self.instance, 'modeOfIssuance'))
self.assert_(hasattr(self.instance, 'musicPlate'))
self.assert_(hasattr(self.instance, 'musicPublisherNumber'))
self.assert_(hasattr(self.instance, 'nban'))
self.assert_(hasattr(self.instance, 'nbn'))
self.assert_(hasattr(self.instance, 'notation'))
self.assert_(hasattr(self.instance, 'otherPhysicalFormat'))
self.assert_(hasattr(self.instance, 'postalRegistration'))
self.assert_(hasattr(self.instance, 'preferredCitation'))
self.assert_(hasattr(self.instance, 'production'))
self.assert_(hasattr(self.instance, 'provider'))
self.assert_(hasattr(self.instance, 'providerStatement'))
self.assert_(hasattr(self.instance, 'publication'))
self.assert_(hasattr(self.instance, 'publisherNumber'))
self.assert_(hasattr(self.instance, 'relatedInstance'))
self.assert_(hasattr(self.instance, 'relatedTo'))
self.assert_(hasattr(self.instance, 'reportNumber'))
self.assert_(hasattr(self.instance, 'reproduction'))
self.assert_(hasattr(self.instance, 'responsibilityStatement'))
self.assert_(hasattr(self.instance, 'serialFirstIssue'))
self.assert_(hasattr(self.instance, 'serialLastIssue'))
self.assert_(hasattr(self.instance, 'sici'))
self.assert_(hasattr(self.instance, 'soundContent'))
self.assert_(hasattr(self.instance, 'stockNumber'))
self.assert_(hasattr(self.instance, 'strn'))
self.assert_(hasattr(self.instance, 'studyNumber'))
self.assert_(hasattr(self.instance, 'supplementaryContentNote'))
self.assert_(hasattr(self.instance, 'titleStatement'))
self.assert_(hasattr(self.instance, 'upc'))
self.assert_(hasattr(self.instance, 'uri'))
self.assert_(hasattr(self.instance, 'urn'))
self.assert_(hasattr(self.instance, 'videorecordingNumber'))
def tearDown(self):
pass
class TestIntegrating(unittest.TestCase):
def setUp(self):
self.integrating = Integrating()
def test_init(self):
self.assertEquals(type(self.integrating), Integrating)
def test_rdf_properties(self):
self.assert_(hasattr(self.integrating, 'abbreviatedTitle'))
self.assert_(hasattr(self.integrating, 'ansi'))
self.assert_(hasattr(self.integrating, 'arrangement'))
self.assert_(hasattr(self.integrating, 'aspectRatio'))
self.assert_(hasattr(self.integrating, 'authorizedAccessPoint'))
self.assert_(hasattr(self.integrating, 'awardNote'))
self.assert_(hasattr(self.integrating, 'carrierCategory'))
self.assert_(hasattr(self.integrating, 'coden'))
self.assert_(hasattr(self.integrating, 'colorContent'))
self.assert_(hasattr(self.integrating, 'contentAccessibility'))
self.assert_(hasattr(self.integrating, 'contentsNote'))
self.assert_(hasattr(self.integrating, 'custodialHistory'))
self.assert_(hasattr(self.integrating, 'dimensions'))
self.assert_(hasattr(self.integrating, 'distribution'))
self.assert_(hasattr(self.integrating, 'doi'))
self.assert_(hasattr(self.integrating, 'duration'))
self.assert_(hasattr(self.integrating, 'ean'))
self.assert_(hasattr(self.integrating, 'edition'))
self.assert_(hasattr(self.integrating, 'editionResponsibility'))
self.assert_(hasattr(self.integrating, 'extent'))
self.assert_(hasattr(self.integrating, 'fingerprint'))
self.assert_(hasattr(self.integrating, 'formatOfMusic'))
self.assert_(hasattr(self.integrating, 'frequency'))
self.assert_(hasattr(self.integrating, 'frequencyNote'))
self.assert_(hasattr(self.integrating, 'graphicScaleNote'))
self.assert_(hasattr(self.integrating, 'hasEquivalent'))
self.assert_(hasattr(self.integrating, 'hdl'))
self.assert_(hasattr(self.integrating, 'identifier'))
self.assert_(hasattr(self.integrating, 'illustrationNote'))
self.assert_(hasattr(self.integrating, 'instanceOf'))
self.assert_(hasattr(self.integrating, 'instanceTitle'))
self.assert_(hasattr(self.integrating, 'isbn'))
self.assert_(hasattr(self.integrating, 'isbn10'))
self.assert_(hasattr(self.integrating, 'isbn13'))
self.assert_(hasattr(self.integrating, 'ismn'))
self.assert_(hasattr(self.integrating, 'iso'))
self.assert_(hasattr(self.integrating, 'isrc'))
self.assert_(hasattr(self.integrating, 'issn'))
self.assert_(hasattr(self.integrating, 'issueNumber'))
self.assert_(hasattr(self.integrating, 'issuedWith'))
self.assert_(hasattr(self.integrating, 'keyTitle'))
self.assert_(hasattr(self.integrating, 'label'))
self.assert_(hasattr(self.integrating, 'lcOverseasAcq'))
self.assert_(hasattr(self.integrating, 'lccn'))
self.assert_(hasattr(self.integrating, 'legalDeposit'))
self.assert_(hasattr(self.integrating, 'local'))
self.assert_(hasattr(self.integrating, 'manufacture'))
self.assert_(hasattr(self.integrating, 'matrixNumber'))
self.assert_(hasattr(self.integrating, 'mediaCategory'))
self.assert_(hasattr(self.integrating, 'modeOfIssuance'))
self.assert_(hasattr(self.integrating, 'musicPlate'))
self.assert_(hasattr(self.integrating, 'musicPublisherNumber'))
self.assert_(hasattr(self.integrating, 'nban'))
self.assert_(hasattr(self.integrating, 'nbn'))
self.assert_(hasattr(self.integrating, 'notation'))
self.assert_(hasattr(self.integrating, 'otherPhysicalFormat'))
self.assert_(hasattr(self.integrating, 'postalRegistration'))
self.assert_(hasattr(self.integrating, 'preferredCitation'))
self.assert_(hasattr(self.integrating, 'production'))
self.assert_(hasattr(self.integrating, 'provider'))
self.assert_(hasattr(self.integrating, 'providerStatement'))
self.assert_(hasattr(self.integrating, 'publication'))
self.assert_(hasattr(self.integrating, 'publisherNumber'))
self.assert_(hasattr(self.integrating, 'relatedInstance'))
self.assert_(hasattr(self.integrating, 'relatedTo'))
self.assert_(hasattr(self.integrating, 'reportNumber'))
self.assert_(hasattr(self.integrating, 'reproduction'))
self.assert_(hasattr(self.integrating, 'responsibilityStatement'))
self.assert_(hasattr(self.integrating, 'serialFirstIssue'))
self.assert_(hasattr(self.integrating, 'serialLastIssue'))
self.assert_(hasattr(self.integrating, 'sici'))
self.assert_(hasattr(self.integrating, 'soundContent'))
self.assert_(hasattr(self.integrating, 'stockNumber'))
self.assert_(hasattr(self.integrating, 'strn'))
self.assert_(hasattr(self.integrating, 'studyNumber'))
self.assert_(hasattr(self.integrating, 'supplementaryContentNote'))
self.assert_(hasattr(self.integrating, 'titleStatement'))
self.assert_(hasattr(self.integrating, 'upc'))
self.assert_(hasattr(self.integrating, 'uri'))
self.assert_(hasattr(self.integrating, 'urn'))
self.assert_(hasattr(self.integrating, 'videorecordingNumber'))
def tearDown(self):
pass
class TestIntendedAudience(unittest.TestCase):
def setUp(self):
self.intendedaudience = IntendedAudience()
def test_init(self):
self.assertEquals(type(self.intendedaudience), IntendedAudience)
def test_rdf_properties(self):
self.assert_(hasattr(self.intendedaudience, 'audience'))
self.assert_(hasattr(self.intendedaudience, 'audienceAssigner'))
self.assert_(hasattr(self.intendedaudience, 'authorizedAccessPoint'))
self.assert_(hasattr(self.intendedaudience, 'identifier'))
self.assert_(hasattr(self.intendedaudience, 'label'))
self.assert_(hasattr(self.intendedaudience, 'relatedTo'))
def tearDown(self):
pass
class TestJurisdiction(unittest.TestCase):
def setUp(self):
self.jurisdiction = Jurisdiction()
def test_init(self):
self.assertEquals(type(self.jurisdiction), Jurisdiction)
def test_rdf_properties(self):
self.assert_(hasattr(self.jurisdiction, 'authorityAssigner'))
self.assert_(hasattr(self.jurisdiction, 'authoritySource'))
self.assert_(hasattr(self.jurisdiction, 'authorizedAccessPoint'))
self.assert_(hasattr(self.jurisdiction, 'hasAuthority'))
self.assert_(hasattr(self.jurisdiction, 'identifier'))
self.assert_(hasattr(self.jurisdiction, 'label'))
self.assert_(hasattr(self.jurisdiction, 'relatedTo'))
def tearDown(self):
pass
class TestLanguage(unittest.TestCase):
def setUp(self):
self.language = Language()
def test_init(self):
self.assertEquals(type(self.language), Language)
def test_rdf_properties(self):
self.assert_(hasattr(self.language, 'authorizedAccessPoint'))
self.assert_(hasattr(self.language, 'identifier'))
self.assert_(hasattr(self.language, 'label'))
self.assert_(hasattr(self.language, 'languageOfPart'))
self.assert_(hasattr(self.language, 'languageOfPartUri'))
self.assert_(hasattr(self.language, 'languageSource'))
self.assert_(hasattr(self.language, 'relatedTo'))
self.assert_(hasattr(self.language, 'resourcePart'))
def tearDown(self):
pass
class TestManuscript(unittest.TestCase):
def setUp(self):
self.manuscript = Manuscript()
def test_init(self):
self.assertEquals(type(self.manuscript), Manuscript)
def test_rdf_properties(self):
self.assert_(hasattr(self.manuscript, 'abbreviatedTitle'))
self.assert_(hasattr(self.manuscript, 'ansi'))
self.assert_(hasattr(self.manuscript, 'arrangement'))
self.assert_(hasattr(self.manuscript, 'aspectRatio'))
self.assert_(hasattr(self.manuscript, 'authorizedAccessPoint'))
self.assert_(hasattr(self.manuscript, 'awardNote'))
self.assert_(hasattr(self.manuscript, 'carrierCategory'))
self.assert_(hasattr(self.manuscript, 'coden'))
self.assert_(hasattr(self.manuscript, 'colorContent'))
self.assert_(hasattr(self.manuscript, 'contentAccessibility'))
self.assert_(hasattr(self.manuscript, 'contentsNote'))
self.assert_(hasattr(self.manuscript, 'custodialHistory'))
self.assert_(hasattr(self.manuscript, 'dimensions'))
self.assert_(hasattr(self.manuscript, 'distribution'))
self.assert_(hasattr(self.manuscript, 'doi'))
self.assert_(hasattr(self.manuscript, 'duration'))
self.assert_(hasattr(self.manuscript, 'ean'))
self.assert_(hasattr(self.manuscript, 'edition'))
self.assert_(hasattr(self.manuscript, 'editionResponsibility'))
self.assert_(hasattr(self.manuscript, 'extent'))
self.assert_(hasattr(self.manuscript, 'fingerprint'))
self.assert_(hasattr(self.manuscript, 'formatOfMusic'))
self.assert_(hasattr(self.manuscript, 'frequency'))
self.assert_(hasattr(self.manuscript, 'frequencyNote'))
self.assert_(hasattr(self.manuscript, 'graphicScaleNote'))
self.assert_(hasattr(self.manuscript, 'hasEquivalent'))
self.assert_(hasattr(self.manuscript, 'hdl'))
self.assert_(hasattr(self.manuscript, 'identifier'))
self.assert_(hasattr(self.manuscript, 'illustrationNote'))
self.assert_(hasattr(self.manuscript, 'instanceOf'))
self.assert_(hasattr(self.manuscript, 'instanceTitle'))
self.assert_(hasattr(self.manuscript, 'isbn'))
self.assert_(hasattr(self.manuscript, 'isbn10'))
self.assert_(hasattr(self.manuscript, 'isbn13'))
self.assert_(hasattr(self.manuscript, 'ismn'))
self.assert_(hasattr(self.manuscript, 'iso'))
self.assert_(hasattr(self.manuscript, 'isrc'))
self.assert_(hasattr(self.manuscript, 'issn'))
self.assert_(hasattr(self.manuscript, 'issueNumber'))
self.assert_(hasattr(self.manuscript, 'issuedWith'))
self.assert_(hasattr(self.manuscript, 'keyTitle'))
self.assert_(hasattr(self.manuscript, 'label'))
self.assert_(hasattr(self.manuscript, 'lcOverseasAcq'))
self.assert_(hasattr(self.manuscript, 'lccn'))
self.assert_(hasattr(self.manuscript, 'legalDeposit'))
self.assert_(hasattr(self.manuscript, 'local'))
self.assert_(hasattr(self.manuscript, 'manufacture'))
self.assert_(hasattr(self.manuscript, 'matrixNumber'))
self.assert_(hasattr(self.manuscript, 'mediaCategory'))
self.assert_(hasattr(self.manuscript, 'modeOfIssuance'))
self.assert_(hasattr(self.manuscript, 'musicPlate'))
self.assert_(hasattr(self.manuscript, 'musicPublisherNumber'))
self.assert_(hasattr(self.manuscript, 'nban'))
self.assert_(hasattr(self.manuscript, 'nbn'))
self.assert_(hasattr(self.manuscript, 'notation'))
self.assert_(hasattr(self.manuscript, 'otherPhysicalFormat'))
self.assert_(hasattr(self.manuscript, 'postalRegistration'))
self.assert_(hasattr(self.manuscript, 'preferredCitation'))
self.assert_(hasattr(self.manuscript, 'production'))
self.assert_(hasattr(self.manuscript, 'provider'))
self.assert_(hasattr(self.manuscript, 'providerStatement'))
self.assert_(hasattr(self.manuscript, 'publication'))
self.assert_(hasattr(self.manuscript, 'publisherNumber'))
self.assert_(hasattr(self.manuscript, 'relatedInstance'))
self.assert_(hasattr(self.manuscript, 'relatedTo'))
self.assert_(hasattr(self.manuscript, 'reportNumber'))
self.assert_(hasattr(self.manuscript, 'reproduction'))
self.assert_(hasattr(self.manuscript, 'responsibilityStatement'))
self.assert_(hasattr(self.manuscript, 'serialFirstIssue'))
self.assert_(hasattr(self.manuscript, 'serialLastIssue'))
self.assert_(hasattr(self.manuscript, 'sici'))
self.assert_(hasattr(self.manuscript, 'soundContent'))
self.assert_(hasattr(self.manuscript, 'stockNumber'))
self.assert_(hasattr(self.manuscript, 'strn'))
self.assert_(hasattr(self.manuscript, 'studyNumber'))
self.assert_(hasattr(self.manuscript, 'supplementaryContentNote'))
self.assert_(hasattr(self.manuscript, 'titleStatement'))
self.assert_(hasattr(self.manuscript, 'upc'))
self.assert_(hasattr(self.manuscript, 'uri'))
self.assert_(hasattr(self.manuscript, 'urn'))
self.assert_(hasattr(self.manuscript, 'videorecordingNumber'))
def tearDown(self):
pass
class TestMeeting(unittest.TestCase):
def setUp(self):
self.meeting = Meeting()
def test_init(self):
self.assertEquals(type(self.meeting), Meeting)
def test_rdf_properties(self):
self.assert_(hasattr(self.meeting, 'authorityAssigner'))
self.assert_(hasattr(self.meeting, 'authoritySource'))
self.assert_(hasattr(self.meeting, 'authorizedAccessPoint'))
self.assert_(hasattr(self.meeting, 'hasAuthority'))
self.assert_(hasattr(self.meeting, 'identifier'))
self.assert_(hasattr(self.meeting, 'label'))
self.assert_(hasattr(self.meeting, 'relatedTo'))
def tearDown(self):
pass
class TestMixedMaterial(unittest.TestCase):
def setUp(self):
self.mixedmaterial = MixedMaterial()
def test_init(self):
self.assertEquals(type(self.mixedmaterial), MixedMaterial)
def test_rdf_properties(self):
self.assert_(hasattr(self.mixedmaterial, 'absorbed'))
self.assert_(hasattr(self.mixedmaterial, 'absorbedBy'))
self.assert_(hasattr(self.mixedmaterial, 'absorbedInPart'))
self.assert_(hasattr(self.mixedmaterial, 'absorbedInPartBy'))
self.assert_(hasattr(self.mixedmaterial, 'authorizedAccessPoint'))
self.assert_(hasattr(self.mixedmaterial, 'classification'))
self.assert_(hasattr(self.mixedmaterial, 'classificationDdc'))
self.assert_(hasattr(self.mixedmaterial, 'classificationLcc'))
self.assert_(hasattr(self.mixedmaterial, 'classificationNlm'))
self.assert_(hasattr(self.mixedmaterial, 'classificationUdc'))
self.assert_(hasattr(self.mixedmaterial, 'containedIn'))
self.assert_(hasattr(self.mixedmaterial, 'contains'))
self.assert_(hasattr(self.mixedmaterial, 'contentCategory'))
self.assert_(hasattr(self.mixedmaterial, 'continuedBy'))
self.assert_(hasattr(self.mixedmaterial, 'continuedInPartBy'))
self.assert_(hasattr(self.mixedmaterial, 'continues'))
self.assert_(hasattr(self.mixedmaterial, 'continuesInPart'))
self.assert_(hasattr(self.mixedmaterial, 'dataSource'))
self.assert_(hasattr(self.mixedmaterial, 'dissertationDegree'))
self.assert_(hasattr(self.mixedmaterial, 'dissertationIdentifier'))
self.assert_(hasattr(self.mixedmaterial, 'dissertationInstitution'))
self.assert_(hasattr(self.mixedmaterial, 'dissertationNote'))
self.assert_(hasattr(self.mixedmaterial, 'dissertationYear'))
self.assert_(hasattr(self.mixedmaterial, 'event'))
self.assert_(hasattr(self.mixedmaterial, 'expressionOf'))
self.assert_(hasattr(self.mixedmaterial, 'findingAid'))
self.assert_(hasattr(self.mixedmaterial, 'geographicCoverageNote'))
self.assert_(hasattr(self.mixedmaterial, 'hasDerivative'))
self.assert_(hasattr(self.mixedmaterial, 'hasExpression'))
self.assert_(hasattr(self.mixedmaterial, 'hasInstance'))
self.assert_(hasattr(self.mixedmaterial, 'identifier'))
self.assert_(hasattr(self.mixedmaterial, 'index'))
self.assert_(hasattr(self.mixedmaterial, 'isDerivativeOf'))
self.assert_(hasattr(self.mixedmaterial, 'isan'))
self.assert_(hasattr(self.mixedmaterial, 'issnL'))
self.assert_(hasattr(self.mixedmaterial, 'istc'))
self.assert_(hasattr(self.mixedmaterial, 'iswc'))
self.assert_(hasattr(self.mixedmaterial, 'label'))
self.assert_(hasattr(self.mixedmaterial, 'languageNote'))
self.assert_(hasattr(self.mixedmaterial, 'mergedToForm'))
self.assert_(hasattr(self.mixedmaterial, 'originDate'))
self.assert_(hasattr(self.mixedmaterial, 'originPlace'))
self.assert_(hasattr(self.mixedmaterial, 'originalVersion'))
self.assert_(hasattr(self.mixedmaterial, 'otherEdition'))
self.assert_(hasattr(self.mixedmaterial, 'precedes'))
self.assert_(hasattr(self.mixedmaterial, 'relatedTo'))
self.assert_(hasattr(self.mixedmaterial, 'relatedWork'))
self.assert_(hasattr(self.mixedmaterial, 'separatedFrom'))
self.assert_(hasattr(self.mixedmaterial, 'series'))
self.assert_(hasattr(self.mixedmaterial, 'splitInto'))
self.assert_(hasattr(self.mixedmaterial, 'subject'))
self.assert_(hasattr(self.mixedmaterial, 'subseries'))
self.assert_(hasattr(self.mixedmaterial, 'subseriesOf'))
self.assert_(hasattr(self.mixedmaterial, 'succeeds'))
self.assert_(hasattr(self.mixedmaterial, 'supersededBy'))
self.assert_(hasattr(self.mixedmaterial, 'supersededInPartBy'))
self.assert_(hasattr(self.mixedmaterial, 'supersedes'))
self.assert_(hasattr(self.mixedmaterial, 'supersedesInPart'))
self.assert_(hasattr(self.mixedmaterial, 'supplement'))
self.assert_(hasattr(self.mixedmaterial, 'supplementTo'))
self.assert_(hasattr(self.mixedmaterial, 'temporalCoverageNote'))
self.assert_(hasattr(self.mixedmaterial, 'translation'))
self.assert_(hasattr(self.mixedmaterial, 'translationOf'))
self.assert_(hasattr(self.mixedmaterial, 'unionOf'))
self.assert_(hasattr(self.mixedmaterial, 'workTitle'))
def tearDown(self):
pass
class TestMonograph(unittest.TestCase):
def setUp(self):
self.monograph = Monograph()
def test_init(self):
self.assertEquals(type(self.monograph), Monograph)
def test_rdf_properties(self):
self.assert_(hasattr(self.monograph, 'abbreviatedTitle'))
self.assert_(hasattr(self.monograph, 'ansi'))
self.assert_(hasattr(self.monograph, 'arrangement'))
self.assert_(hasattr(self.monograph, 'aspectRatio'))
self.assert_(hasattr(self.monograph, 'authorizedAccessPoint'))
self.assert_(hasattr(self.monograph, 'awardNote'))
self.assert_(hasattr(self.monograph, 'carrierCategory'))
self.assert_(hasattr(self.monograph, 'coden'))
self.assert_(hasattr(self.monograph, 'colorContent'))
self.assert_(hasattr(self.monograph, 'contentAccessibility'))
self.assert_(hasattr(self.monograph, 'contentsNote'))
self.assert_(hasattr(self.monograph, 'custodialHistory'))
self.assert_(hasattr(self.monograph, 'dimensions'))
self.assert_(hasattr(self.monograph, 'distribution'))
self.assert_(hasattr(self.monograph, 'doi'))
self.assert_(hasattr(self.monograph, 'duration'))
self.assert_(hasattr(self.monograph, 'ean'))
self.assert_(hasattr(self.monograph, 'edition'))
self.assert_(hasattr(self.monograph, 'editionResponsibility'))
self.assert_(hasattr(self.monograph, 'extent'))
self.assert_(hasattr(self.monograph, 'fingerprint'))
self.assert_(hasattr(self.monograph, 'formatOfMusic'))
self.assert_(hasattr(self.monograph, 'frequency'))
self.assert_(hasattr(self.monograph, 'frequencyNote'))
self.assert_(hasattr(self.monograph, 'graphicScaleNote'))
self.assert_(hasattr(self.monograph, 'hasEquivalent'))
self.assert_(hasattr(self.monograph, 'hdl'))
self.assert_(hasattr(self.monograph, 'identifier'))
self.assert_(hasattr(self.monograph, 'illustrationNote'))
self.assert_(hasattr(self.monograph, 'instanceOf'))
self.assert_(hasattr(self.monograph, 'instanceTitle'))
self.assert_(hasattr(self.monograph, 'isbn'))
self.assert_(hasattr(self.monograph, 'isbn10'))
self.assert_(hasattr(self.monograph, 'isbn13'))
self.assert_(hasattr(self.monograph, 'ismn'))
self.assert_(hasattr(self.monograph, 'iso'))
self.assert_(hasattr(self.monograph, 'isrc'))
self.assert_(hasattr(self.monograph, 'issn'))
self.assert_(hasattr(self.monograph, 'issueNumber'))
self.assert_(hasattr(self.monograph, 'issuedWith'))
self.assert_(hasattr(self.monograph, 'keyTitle'))
self.assert_(hasattr(self.monograph, 'label'))
self.assert_(hasattr(self.monograph, 'lcOverseasAcq'))
self.assert_(hasattr(self.monograph, 'lccn'))
self.assert_(hasattr(self.monograph, 'legalDeposit'))
self.assert_(hasattr(self.monograph, 'local'))
self.assert_(hasattr(self.monograph, 'manufacture'))
self.assert_(hasattr(self.monograph, 'matrixNumber'))
self.assert_(hasattr(self.monograph, 'mediaCategory'))
self.assert_(hasattr(self.monograph, 'modeOfIssuance'))
self.assert_(hasattr(self.monograph, 'musicPlate'))
self.assert_(hasattr(self.monograph, 'musicPublisherNumber'))
self.assert_(hasattr(self.monograph, 'nban'))
self.assert_(hasattr(self.monograph, 'nbn'))
self.assert_(hasattr(self.monograph, 'notation'))
self.assert_(hasattr(self.monograph, 'otherPhysicalFormat'))
self.assert_(hasattr(self.monograph, 'postalRegistration'))
self.assert_(hasattr(self.monograph, 'preferredCitation'))
self.assert_(hasattr(self.monograph, 'production'))
self.assert_(hasattr(self.monograph, 'provider'))
self.assert_(hasattr(self.monograph, 'providerStatement'))
self.assert_(hasattr(self.monograph, 'publication'))
self.assert_(hasattr(self.monograph, 'publisherNumber'))
self.assert_(hasattr(self.monograph, 'relatedInstance'))
self.assert_(hasattr(self.monograph, 'relatedTo'))
self.assert_(hasattr(self.monograph, 'reportNumber'))
self.assert_(hasattr(self.monograph, 'reproduction'))
self.assert_(hasattr(self.monograph, 'responsibilityStatement'))
self.assert_(hasattr(self.monograph, 'serialFirstIssue'))
self.assert_(hasattr(self.monograph, 'serialLastIssue'))
self.assert_(hasattr(self.monograph, 'sici'))
self.assert_(hasattr(self.monograph, 'soundContent'))
self.assert_(hasattr(self.monograph, 'stockNumber'))
self.assert_(hasattr(self.monograph, 'strn'))
self.assert_(hasattr(self.monograph, 'studyNumber'))
self.assert_(hasattr(self.monograph, 'supplementaryContentNote'))
self.assert_(hasattr(self.monograph, 'titleStatement'))
self.assert_(hasattr(self.monograph, 'upc'))
self.assert_(hasattr(self.monograph, 'uri'))
self.assert_(hasattr(self.monograph, 'urn'))
self.assert_(hasattr(self.monograph, 'videorecordingNumber'))
def tearDown(self):
pass
class TestMovingImage(unittest.TestCase):
def setUp(self):
self.movingimage = MovingImage()
def test_init(self):
self.assertEquals(type(self.movingimage), MovingImage)
def test_rdf_properties(self):
self.assert_(hasattr(self.movingimage, 'absorbed'))
self.assert_(hasattr(self.movingimage, 'absorbedBy'))
self.assert_(hasattr(self.movingimage, 'absorbedInPart'))
self.assert_(hasattr(self.movingimage, 'absorbedInPartBy'))
self.assert_(hasattr(self.movingimage, 'authorizedAccessPoint'))
self.assert_(hasattr(self.movingimage, 'classification'))
self.assert_(hasattr(self.movingimage, 'classificationDdc'))
self.assert_(hasattr(self.movingimage, 'classificationLcc'))
self.assert_(hasattr(self.movingimage, 'classificationNlm'))
self.assert_(hasattr(self.movingimage, 'classificationUdc'))
self.assert_(hasattr(self.movingimage, 'containedIn'))
self.assert_(hasattr(self.movingimage, 'contains'))
self.assert_(hasattr(self.movingimage, 'contentCategory'))
self.assert_(hasattr(self.movingimage, 'continuedBy'))
self.assert_(hasattr(self.movingimage, 'continuedInPartBy'))
self.assert_(hasattr(self.movingimage, 'continues'))
self.assert_(hasattr(self.movingimage, 'continuesInPart'))
self.assert_(hasattr(self.movingimage, 'dataSource'))
self.assert_(hasattr(self.movingimage, 'dissertationDegree'))
self.assert_(hasattr(self.movingimage, 'dissertationIdentifier'))
self.assert_(hasattr(self.movingimage, 'dissertationInstitution'))
self.assert_(hasattr(self.movingimage, 'dissertationNote'))
self.assert_(hasattr(self.movingimage, 'dissertationYear'))
self.assert_(hasattr(self.movingimage, 'event'))
self.assert_(hasattr(self.movingimage, 'expressionOf'))
self.assert_(hasattr(self.movingimage, 'findingAid'))
self.assert_(hasattr(self.movingimage, 'geographicCoverageNote'))
self.assert_(hasattr(self.movingimage, 'hasDerivative'))
self.assert_(hasattr(self.movingimage, 'hasExpression'))
self.assert_(hasattr(self.movingimage, 'hasInstance'))
self.assert_(hasattr(self.movingimage, 'identifier'))
self.assert_(hasattr(self.movingimage, 'index'))
self.assert_(hasattr(self.movingimage, 'isDerivativeOf'))
self.assert_(hasattr(self.movingimage, 'isan'))
self.assert_(hasattr(self.movingimage, 'issnL'))
self.assert_(hasattr(self.movingimage, 'istc'))
self.assert_(hasattr(self.movingimage, 'iswc'))
self.assert_(hasattr(self.movingimage, 'label'))
self.assert_(hasattr(self.movingimage, 'languageNote'))
self.assert_(hasattr(self.movingimage, 'mergedToForm'))
self.assert_(hasattr(self.movingimage, 'originDate'))
self.assert_(hasattr(self.movingimage, 'originPlace'))
self.assert_(hasattr(self.movingimage, 'originalVersion'))
self.assert_(hasattr(self.movingimage, 'otherEdition'))
self.assert_(hasattr(self.movingimage, 'precedes'))
self.assert_(hasattr(self.movingimage, 'relatedTo'))
self.assert_(hasattr(self.movingimage, 'relatedWork'))
self.assert_(hasattr(self.movingimage, 'separatedFrom'))
self.assert_(hasattr(self.movingimage, 'series'))
self.assert_(hasattr(self.movingimage, 'splitInto'))
self.assert_(hasattr(self.movingimage, 'subject'))
self.assert_(hasattr(self.movingimage, 'subseries'))
self.assert_(hasattr(self.movingimage, 'subseriesOf'))
self.assert_(hasattr(self.movingimage, 'succeeds'))
self.assert_(hasattr(self.movingimage, 'supersededBy'))
self.assert_(hasattr(self.movingimage, 'supersededInPartBy'))
self.assert_(hasattr(self.movingimage, 'supersedes'))
self.assert_(hasattr(self.movingimage, 'supersedesInPart'))
self.assert_(hasattr(self.movingimage, 'supplement'))
self.assert_(hasattr(self.movingimage, 'supplementTo'))
self.assert_(hasattr(self.movingimage, 'temporalCoverageNote'))
self.assert_(hasattr(self.movingimage, 'translation'))
self.assert_(hasattr(self.movingimage, 'translationOf'))
self.assert_(hasattr(self.movingimage, 'unionOf'))
self.assert_(hasattr(self.movingimage, 'workTitle'))
def tearDown(self):
pass
class TestMultimedia(unittest.TestCase):
def setUp(self):
self.multimedia = Multimedia()
def test_init(self):
self.assertEquals(type(self.multimedia), Multimedia)
def test_rdf_properties(self):
self.assert_(hasattr(self.multimedia, 'absorbed'))
self.assert_(hasattr(self.multimedia, 'absorbedBy'))
self.assert_(hasattr(self.multimedia, 'absorbedInPart'))
self.assert_(hasattr(self.multimedia, 'absorbedInPartBy'))
self.assert_(hasattr(self.multimedia, 'authorizedAccessPoint'))
self.assert_(hasattr(self.multimedia, 'classification'))
self.assert_(hasattr(self.multimedia, 'classificationDdc'))
self.assert_(hasattr(self.multimedia, 'classificationLcc'))
self.assert_(hasattr(self.multimedia, 'classificationNlm'))
self.assert_(hasattr(self.multimedia, 'classificationUdc'))
self.assert_(hasattr(self.multimedia, 'containedIn'))
self.assert_(hasattr(self.multimedia, 'contains'))
self.assert_(hasattr(self.multimedia, 'contentCategory'))
self.assert_(hasattr(self.multimedia, 'continuedBy'))
self.assert_(hasattr(self.multimedia, 'continuedInPartBy'))
self.assert_(hasattr(self.multimedia, 'continues'))
self.assert_(hasattr(self.multimedia, 'continuesInPart'))
self.assert_(hasattr(self.multimedia, 'dataSource'))
self.assert_(hasattr(self.multimedia, 'dissertationDegree'))
self.assert_(hasattr(self.multimedia, 'dissertationIdentifier'))
self.assert_(hasattr(self.multimedia, 'dissertationInstitution'))
self.assert_(hasattr(self.multimedia, 'dissertationNote'))
self.assert_(hasattr(self.multimedia, 'dissertationYear'))
self.assert_(hasattr(self.multimedia, 'event'))
self.assert_(hasattr(self.multimedia, 'expressionOf'))
self.assert_(hasattr(self.multimedia, 'findingAid'))
self.assert_(hasattr(self.multimedia, 'geographicCoverageNote'))
self.assert_(hasattr(self.multimedia, 'hasDerivative'))
self.assert_(hasattr(self.multimedia, 'hasExpression'))
self.assert_(hasattr(self.multimedia, 'hasInstance'))
self.assert_(hasattr(self.multimedia, 'identifier'))
self.assert_(hasattr(self.multimedia, 'index'))
self.assert_(hasattr(self.multimedia, 'isDerivativeOf'))
self.assert_(hasattr(self.multimedia, 'isan'))
self.assert_(hasattr(self.multimedia, 'issnL'))
self.assert_(hasattr(self.multimedia, 'istc'))
self.assert_(hasattr(self.multimedia, 'iswc'))
self.assert_(hasattr(self.multimedia, 'label'))
self.assert_(hasattr(self.multimedia, 'languageNote'))
self.assert_(hasattr(self.multimedia, 'mergedToForm'))
self.assert_(hasattr(self.multimedia, 'originDate'))
self.assert_(hasattr(self.multimedia, 'originPlace'))
self.assert_(hasattr(self.multimedia, 'originalVersion'))
self.assert_(hasattr(self.multimedia, 'otherEdition'))
self.assert_(hasattr(self.multimedia, 'precedes'))
self.assert_(hasattr(self.multimedia, 'relatedTo'))
self.assert_(hasattr(self.multimedia, 'relatedWork'))
self.assert_(hasattr(self.multimedia, 'separatedFrom'))
self.assert_(hasattr(self.multimedia, 'series'))
self.assert_(hasattr(self.multimedia, 'splitInto'))
self.assert_(hasattr(self.multimedia, 'subject'))
self.assert_(hasattr(self.multimedia, 'subseries'))
self.assert_(hasattr(self.multimedia, 'subseriesOf'))
self.assert_(hasattr(self.multimedia, 'succeeds'))
self.assert_(hasattr(self.multimedia, 'supersededBy'))
self.assert_(hasattr(self.multimedia, 'supersededInPartBy'))
self.assert_(hasattr(self.multimedia, 'supersedes'))
self.assert_(hasattr(self.multimedia, 'supersedesInPart'))
self.assert_(hasattr(self.multimedia, 'supplement'))
self.assert_(hasattr(self.multimedia, 'supplementTo'))
self.assert_(hasattr(self.multimedia, 'temporalCoverageNote'))
self.assert_(hasattr(self.multimedia, 'translation'))
self.assert_(hasattr(self.multimedia, 'translationOf'))
self.assert_(hasattr(self.multimedia, 'unionOf'))
self.assert_(hasattr(self.multimedia, 'workTitle'))
def tearDown(self):
pass
class TestMultipartMonograph(unittest.TestCase):
def setUp(self):
self.multipartmonograph = MultipartMonograph()
def test_init(self):
self.assertEquals(type(self.multipartmonograph), MultipartMonograph)
def test_rdf_properties(self):
self.assert_(hasattr(self.multipartmonograph, 'abbreviatedTitle'))
self.assert_(hasattr(self.multipartmonograph, 'ansi'))
self.assert_(hasattr(self.multipartmonograph, 'arrangement'))
self.assert_(hasattr(self.multipartmonograph, 'aspectRatio'))
self.assert_(hasattr(self.multipartmonograph, 'authorizedAccessPoint'))
self.assert_(hasattr(self.multipartmonograph, 'awardNote'))
self.assert_(hasattr(self.multipartmonograph, 'carrierCategory'))
self.assert_(hasattr(self.multipartmonograph, 'coden'))
self.assert_(hasattr(self.multipartmonograph, 'colorContent'))
self.assert_(hasattr(self.multipartmonograph, 'contentAccessibility'))
self.assert_(hasattr(self.multipartmonograph, 'contentsNote'))
self.assert_(hasattr(self.multipartmonograph, 'custodialHistory'))
self.assert_(hasattr(self.multipartmonograph, 'dimensions'))
self.assert_(hasattr(self.multipartmonograph, 'distribution'))
self.assert_(hasattr(self.multipartmonograph, 'doi'))
self.assert_(hasattr(self.multipartmonograph, 'duration'))
self.assert_(hasattr(self.multipartmonograph, 'ean'))
self.assert_(hasattr(self.multipartmonograph, 'edition'))
self.assert_(hasattr(self.multipartmonograph, 'editionResponsibility'))
self.assert_(hasattr(self.multipartmonograph, 'extent'))
self.assert_(hasattr(self.multipartmonograph, 'fingerprint'))
self.assert_(hasattr(self.multipartmonograph, 'formatOfMusic'))
self.assert_(hasattr(self.multipartmonograph, 'frequency'))
self.assert_(hasattr(self.multipartmonograph, 'frequencyNote'))
self.assert_(hasattr(self.multipartmonograph, 'graphicScaleNote'))
self.assert_(hasattr(self.multipartmonograph, 'hasEquivalent'))
self.assert_(hasattr(self.multipartmonograph, 'hdl'))
self.assert_(hasattr(self.multipartmonograph, 'identifier'))
self.assert_(hasattr(self.multipartmonograph, 'illustrationNote'))
self.assert_(hasattr(self.multipartmonograph, 'instanceOf'))
self.assert_(hasattr(self.multipartmonograph, 'instanceTitle'))
self.assert_(hasattr(self.multipartmonograph, 'isbn'))
self.assert_(hasattr(self.multipartmonograph, 'isbn10'))
self.assert_(hasattr(self.multipartmonograph, 'isbn13'))
self.assert_(hasattr(self.multipartmonograph, 'ismn'))
self.assert_(hasattr(self.multipartmonograph, 'iso'))
self.assert_(hasattr(self.multipartmonograph, 'isrc'))
self.assert_(hasattr(self.multipartmonograph, 'issn'))
self.assert_(hasattr(self.multipartmonograph, 'issueNumber'))
self.assert_(hasattr(self.multipartmonograph, 'issuedWith'))
self.assert_(hasattr(self.multipartmonograph, 'keyTitle'))
self.assert_(hasattr(self.multipartmonograph, 'label'))
self.assert_(hasattr(self.multipartmonograph, 'lcOverseasAcq'))
self.assert_(hasattr(self.multipartmonograph, 'lccn'))
self.assert_(hasattr(self.multipartmonograph, 'legalDeposit'))
self.assert_(hasattr(self.multipartmonograph, 'local'))
self.assert_(hasattr(self.multipartmonograph, 'manufacture'))
self.assert_(hasattr(self.multipartmonograph, 'matrixNumber'))
self.assert_(hasattr(self.multipartmonograph, 'mediaCategory'))
self.assert_(hasattr(self.multipartmonograph, 'modeOfIssuance'))
self.assert_(hasattr(self.multipartmonograph, 'musicPlate'))
self.assert_(hasattr(self.multipartmonograph, 'musicPublisherNumber'))
self.assert_(hasattr(self.multipartmonograph, 'nban'))
self.assert_(hasattr(self.multipartmonograph, 'nbn'))
self.assert_(hasattr(self.multipartmonograph, 'notation'))
self.assert_(hasattr(self.multipartmonograph, 'otherPhysicalFormat'))
self.assert_(hasattr(self.multipartmonograph, 'postalRegistration'))
self.assert_(hasattr(self.multipartmonograph, 'preferredCitation'))
self.assert_(hasattr(self.multipartmonograph, 'production'))
self.assert_(hasattr(self.multipartmonograph, 'provider'))
self.assert_(hasattr(self.multipartmonograph, 'providerStatement'))
self.assert_(hasattr(self.multipartmonograph, 'publication'))
self.assert_(hasattr(self.multipartmonograph, 'publisherNumber'))
self.assert_(hasattr(self.multipartmonograph, 'relatedInstance'))
self.assert_(hasattr(self.multipartmonograph, 'relatedTo'))
self.assert_(hasattr(self.multipartmonograph, 'reportNumber'))
self.assert_(hasattr(self.multipartmonograph, 'reproduction'))
self.assert_(hasattr(self.multipartmonograph, 'responsibilityStatement'))
self.assert_(hasattr(self.multipartmonograph, 'serialFirstIssue'))
self.assert_(hasattr(self.multipartmonograph, 'serialLastIssue'))
self.assert_(hasattr(self.multipartmonograph, 'sici'))
self.assert_(hasattr(self.multipartmonograph, 'soundContent'))
self.assert_(hasattr(self.multipartmonograph, 'stockNumber'))
self.assert_(hasattr(self.multipartmonograph, 'strn'))
self.assert_(hasattr(self.multipartmonograph, 'studyNumber'))
self.assert_(hasattr(self.multipartmonograph, 'supplementaryContentNote'))
self.assert_(hasattr(self.multipartmonograph, 'titleStatement'))
self.assert_(hasattr(self.multipartmonograph, 'upc'))
self.assert_(hasattr(self.multipartmonograph, 'uri'))
self.assert_(hasattr(self.multipartmonograph, 'urn'))
self.assert_(hasattr(self.multipartmonograph, 'videorecordingNumber'))
def tearDown(self):
pass
class TestOrganization(unittest.TestCase):
def setUp(self):
self.organization = Organization()
def test_init(self):
self.assertEquals(type(self.organization), Organization)
def test_rdf_properties(self):
self.assert_(hasattr(self.organization, 'authorityAssigner'))
self.assert_(hasattr(self.organization, 'authoritySource'))
self.assert_(hasattr(self.organization, 'authorizedAccessPoint'))
self.assert_(hasattr(self.organization, 'hasAuthority'))
self.assert_(hasattr(self.organization, 'identifier'))
self.assert_(hasattr(self.organization, 'label'))
self.assert_(hasattr(self.organization, 'relatedTo'))
def tearDown(self):
pass
class TestPerson(unittest.TestCase):
def setUp(self):
self.person = Person()
def test_init(self):
self.assertEquals(type(self.person), Person)
def test_rdf_properties(self):
self.assert_(hasattr(self.person, 'authorityAssigner'))
self.assert_(hasattr(self.person, 'authoritySource'))
self.assert_(hasattr(self.person, 'authorizedAccessPoint'))
self.assert_(hasattr(self.person, 'hasAuthority'))
self.assert_(hasattr(self.person, 'identifier'))
self.assert_(hasattr(self.person, 'label'))
self.assert_(hasattr(self.person, 'relatedTo'))
def tearDown(self):
pass
class TestPlace(unittest.TestCase):
def setUp(self):
self.place = Place()
def test_init(self):
self.assertEquals(type(self.place), Place)
def test_rdf_properties(self):
self.assert_(hasattr(self.place, 'authorityAssigner'))
self.assert_(hasattr(self.place, 'authoritySource'))
self.assert_(hasattr(self.place, 'authorizedAccessPoint'))
self.assert_(hasattr(self.place, 'hasAuthority'))
self.assert_(hasattr(self.place, 'identifier'))
self.assert_(hasattr(self.place, 'label'))
self.assert_(hasattr(self.place, 'relatedTo'))
def tearDown(self):
pass
class TestPrint(unittest.TestCase):
def setUp(self):
self.print_ = Print()
def test_init(self):
self.assertEquals(type(self.print_), Print)
def test_rdf_properties(self):
self.assert_(hasattr(self.print_, 'abbreviatedTitle'))
self.assert_(hasattr(self.print_, 'ansi'))
self.assert_(hasattr(self.print_, 'arrangement'))
self.assert_(hasattr(self.print_, 'aspectRatio'))
self.assert_(hasattr(self.print_, 'authorizedAccessPoint'))
self.assert_(hasattr(self.print_, 'awardNote'))
self.assert_(hasattr(self.print_, 'carrierCategory'))
self.assert_(hasattr(self.print_, 'coden'))
self.assert_(hasattr(self.print_, 'colorContent'))
self.assert_(hasattr(self.print_, 'contentAccessibility'))
self.assert_(hasattr(self.print_, 'contentsNote'))
self.assert_(hasattr(self.print_, 'custodialHistory'))
self.assert_(hasattr(self.print_, 'dimensions'))
self.assert_(hasattr(self.print_, 'distribution'))
self.assert_(hasattr(self.print_, 'doi'))
self.assert_(hasattr(self.print_, 'duration'))
self.assert_(hasattr(self.print_, 'ean'))
self.assert_(hasattr(self.print_, 'edition'))
self.assert_(hasattr(self.print_, 'editionResponsibility'))
self.assert_(hasattr(self.print_, 'extent'))
self.assert_(hasattr(self.print_, 'fingerprint'))
self.assert_(hasattr(self.print_, 'formatOfMusic'))
self.assert_(hasattr(self.print_, 'frequency'))
self.assert_(hasattr(self.print_, 'frequencyNote'))
self.assert_(hasattr(self.print_, 'graphicScaleNote'))
self.assert_(hasattr(self.print_, 'hasEquivalent'))
self.assert_(hasattr(self.print_, 'hdl'))
self.assert_(hasattr(self.print_, 'identifier'))
self.assert_(hasattr(self.print_, 'illustrationNote'))
self.assert_(hasattr(self.print_, 'instanceOf'))
self.assert_(hasattr(self.print_, 'instanceTitle'))
self.assert_(hasattr(self.print_, 'isbn'))
self.assert_(hasattr(self.print_, 'isbn10'))
self.assert_(hasattr(self.print_, 'isbn13'))
self.assert_(hasattr(self.print_, 'ismn'))
self.assert_(hasattr(self.print_, 'iso'))
self.assert_(hasattr(self.print_, 'isrc'))
self.assert_(hasattr(self.print_, 'issn'))
self.assert_(hasattr(self.print_, 'issueNumber'))
self.assert_(hasattr(self.print_, 'issuedWith'))
self.assert_(hasattr(self.print_, 'keyTitle'))
self.assert_(hasattr(self.print_, 'label'))
self.assert_(hasattr(self.print_, 'lcOverseasAcq'))
self.assert_(hasattr(self.print_, 'lccn'))
self.assert_(hasattr(self.print_, 'legalDeposit'))
self.assert_(hasattr(self.print_, 'local'))
self.assert_(hasattr(self.print_, 'manufacture'))
self.assert_(hasattr(self.print_, 'matrixNumber'))
self.assert_(hasattr(self.print_, 'mediaCategory'))
self.assert_(hasattr(self.print_, 'modeOfIssuance'))
self.assert_(hasattr(self.print_, 'musicPlate'))
self.assert_(hasattr(self.print_, 'musicPublisherNumber'))
self.assert_(hasattr(self.print_, 'nban'))
self.assert_(hasattr(self.print_, 'nbn'))
self.assert_(hasattr(self.print_, 'notation'))
self.assert_(hasattr(self.print_, 'otherPhysicalFormat'))
self.assert_(hasattr(self.print_, 'postalRegistration'))
self.assert_(hasattr(self.print_, 'preferredCitation'))
self.assert_(hasattr(self.print_, 'production'))
self.assert_(hasattr(self.print_, 'provider'))
self.assert_(hasattr(self.print_, 'providerStatement'))
self.assert_(hasattr(self.print_, 'publication'))
self.assert_(hasattr(self.print_, 'publisherNumber'))
self.assert_(hasattr(self.print_, 'relatedInstance'))
self.assert_(hasattr(self.print_, 'relatedTo'))
self.assert_(hasattr(self.print_, 'reportNumber'))
self.assert_(hasattr(self.print_, 'reproduction'))
self.assert_(hasattr(self.print_, 'responsibilityStatement'))
self.assert_(hasattr(self.print_, 'serialFirstIssue'))
self.assert_(hasattr(self.print_, 'serialLastIssue'))
self.assert_(hasattr(self.print_, 'sici'))
self.assert_(hasattr(self.print_, 'soundContent'))
self.assert_(hasattr(self.print_, 'stockNumber'))
self.assert_(hasattr(self.print_, 'strn'))
self.assert_(hasattr(self.print_, 'studyNumber'))
self.assert_(hasattr(self.print_, 'supplementaryContentNote'))
self.assert_(hasattr(self.print_, 'titleStatement'))
self.assert_(hasattr(self.print_, 'upc'))
self.assert_(hasattr(self.print_, 'uri'))
self.assert_(hasattr(self.print_, 'urn'))
self.assert_(hasattr(self.print_, 'videorecordingNumber'))
def tearDown(self):
pass
class TestProvider(unittest.TestCase):
def setUp(self):
self.provider = Provider()
def test_init(self):
self.assertEquals(type(self.provider), Provider)
def test_rdf_properties(self):
self.assert_(hasattr(self.provider, 'authorizedAccessPoint'))
self.assert_(hasattr(self.provider, 'copyrightDate'))
self.assert_(hasattr(self.provider, 'identifier'))
self.assert_(hasattr(self.provider, 'label'))
self.assert_(hasattr(self.provider, 'providerDate'))
self.assert_(hasattr(self.provider, 'providerName'))
self.assert_(hasattr(self.provider, 'providerPlace'))
self.assert_(hasattr(self.provider, 'providerRole'))
self.assert_(hasattr(self.provider, 'relatedTo'))
def tearDown(self):
pass
class TestRelated(unittest.TestCase):
def setUp(self):
self.related = Related()
def test_init(self):
self.assertEquals(type(self.related), Related)
def test_rdf_properties(self):
self.assert_(hasattr(self.related, 'authorizedAccessPoint'))
self.assert_(hasattr(self.related, 'identifier'))
self.assert_(hasattr(self.related, 'label'))
self.assert_(hasattr(self.related, 'relatedTo'))
self.assert_(hasattr(self.related, 'relationship'))
self.assert_(hasattr(self.related, 'relationshipUri'))
def tearDown(self):
pass
##def test_frame(bf_class):
## print_("""class Test{}(unittest.TestCase):""".format(bf_class))
## print_(""" def setUp(self):
## self.{} = {}()""".format(bf_class.lower(), bf_class))
## print_("\n")
## print_(""" def test_init(self):
## self.assertEquals(type(self.{}), {})""".format(bf_class.lower(), bf_class))
## print_("\n")
## print_(""" def test_rdf_properties(self):""")
## for row in dir(getattr(models, bf_class)):
## if type(row) == unicode:
## print_(""" self.assert_(hasattr(self.{}, '{}'))""".format(bf_class.lower(), row))
## print_("\n")
## print_(""" def tearDown(self):
## pass""")
class TestResource(unittest.TestCase):
def setUp(self):
self.resource = Resource()
def test_init(self):
self.assertEquals(type(self.resource), Resource)
def test_rdf_properties(self):
self.assert_(hasattr(self.resource, 'authorizedAccessPoint'))
self.assert_(hasattr(self.resource, 'identifier'))
self.assert_(hasattr(self.resource, 'label'))
self.assert_(hasattr(self.resource, 'relatedTo'))
def tearDown(self):
pass
class TestReview(unittest.TestCase):
def setUp(self):
self.review = Review()
def test_init(self):
self.assertEquals(type(self.review), Review)
def test_rdf_properties(self):
self.assert_(hasattr(self.review, 'annotates'))
self.assert_(hasattr(self.review, 'annotationAssertedBy'))
self.assert_(hasattr(self.review, 'annotationBody'))
self.assert_(hasattr(self.review, 'annotationSource'))
self.assert_(hasattr(self.review, 'assertionDate'))
self.assert_(hasattr(self.review, 'authorizedAccessPoint'))
self.assert_(hasattr(self.review, 'identifier'))
self.assert_(hasattr(self.review, 'label'))
self.assert_(hasattr(self.review, 'relatedTo'))
self.assert_(hasattr(self.review, 'review'))
self.assert_(hasattr(self.review, 'reviewOf'))
self.assert_(hasattr(self.review, 'startOfReview'))
def tearDown(self):
pass
class TestSerial(unittest.TestCase):
def setUp(self):
self.serial = Serial()
def test_init(self):
self.assertEquals(type(self.serial), Serial)
def test_rdf_properties(self):
self.assert_(hasattr(self.serial, 'abbreviatedTitle'))
self.assert_(hasattr(self.serial, 'ansi'))
self.assert_(hasattr(self.serial, 'arrangement'))
self.assert_(hasattr(self.serial, 'aspectRatio'))
self.assert_(hasattr(self.serial, 'authorizedAccessPoint'))
self.assert_(hasattr(self.serial, 'awardNote'))
self.assert_(hasattr(self.serial, 'carrierCategory'))
self.assert_(hasattr(self.serial, 'coden'))
self.assert_(hasattr(self.serial, 'colorContent'))
self.assert_(hasattr(self.serial, 'contentAccessibility'))
self.assert_(hasattr(self.serial, 'contentsNote'))
self.assert_(hasattr(self.serial, 'custodialHistory'))
self.assert_(hasattr(self.serial, 'dimensions'))
self.assert_(hasattr(self.serial, 'distribution'))
self.assert_(hasattr(self.serial, 'doi'))
self.assert_(hasattr(self.serial, 'duration'))
self.assert_(hasattr(self.serial, 'ean'))
self.assert_(hasattr(self.serial, 'edition'))
self.assert_(hasattr(self.serial, 'editionResponsibility'))
self.assert_(hasattr(self.serial, 'extent'))
self.assert_(hasattr(self.serial, 'fingerprint'))
self.assert_(hasattr(self.serial, 'formatOfMusic'))
self.assert_(hasattr(self.serial, 'frequency'))
self.assert_(hasattr(self.serial, 'frequencyNote'))
self.assert_(hasattr(self.serial, 'graphicScaleNote'))
self.assert_(hasattr(self.serial, 'hasEquivalent'))
self.assert_(hasattr(self.serial, 'hdl'))
self.assert_(hasattr(self.serial, 'identifier'))
self.assert_(hasattr(self.serial, 'illustrationNote'))
self.assert_(hasattr(self.serial, 'instanceOf'))
self.assert_(hasattr(self.serial, 'instanceTitle'))
self.assert_(hasattr(self.serial, 'isbn'))
self.assert_(hasattr(self.serial, 'isbn10'))
self.assert_(hasattr(self.serial, 'isbn13'))
self.assert_(hasattr(self.serial, 'ismn'))
self.assert_(hasattr(self.serial, 'iso'))
self.assert_(hasattr(self.serial, 'isrc'))
self.assert_(hasattr(self.serial, 'issn'))
self.assert_(hasattr(self.serial, 'issueNumber'))
self.assert_(hasattr(self.serial, 'issuedWith'))
self.assert_(hasattr(self.serial, 'keyTitle'))
self.assert_(hasattr(self.serial, 'label'))
self.assert_(hasattr(self.serial, 'lcOverseasAcq'))
self.assert_(hasattr(self.serial, 'lccn'))
self.assert_(hasattr(self.serial, 'legalDeposit'))
self.assert_(hasattr(self.serial, 'local'))
self.assert_(hasattr(self.serial, 'manufacture'))
self.assert_(hasattr(self.serial, 'matrixNumber'))
self.assert_(hasattr(self.serial, 'mediaCategory'))
self.assert_(hasattr(self.serial, 'modeOfIssuance'))
self.assert_(hasattr(self.serial, 'musicPlate'))
self.assert_(hasattr(self.serial, 'musicPublisherNumber'))
self.assert_(hasattr(self.serial, 'nban'))
self.assert_(hasattr(self.serial, 'nbn'))
self.assert_(hasattr(self.serial, 'notation'))
self.assert_(hasattr(self.serial, 'otherPhysicalFormat'))
self.assert_(hasattr(self.serial, 'postalRegistration'))
self.assert_(hasattr(self.serial, 'preferredCitation'))
self.assert_(hasattr(self.serial, 'production'))
self.assert_(hasattr(self.serial, 'provider'))
self.assert_(hasattr(self.serial, 'providerStatement'))
self.assert_(hasattr(self.serial, 'publication'))
self.assert_(hasattr(self.serial, 'publisherNumber'))
self.assert_(hasattr(self.serial, 'relatedInstance'))
self.assert_(hasattr(self.serial, 'relatedTo'))
self.assert_(hasattr(self.serial, 'reportNumber'))
self.assert_(hasattr(self.serial, 'reproduction'))
self.assert_(hasattr(self.serial, 'responsibilityStatement'))
self.assert_(hasattr(self.serial, 'serialFirstIssue'))
self.assert_(hasattr(self.serial, 'serialLastIssue'))
self.assert_(hasattr(self.serial, 'sici'))
self.assert_(hasattr(self.serial, 'soundContent'))
self.assert_(hasattr(self.serial, 'stockNumber'))
self.assert_(hasattr(self.serial, 'strn'))
self.assert_(hasattr(self.serial, 'studyNumber'))
self.assert_(hasattr(self.serial, 'supplementaryContentNote'))
self.assert_(hasattr(self.serial, 'titleStatement'))
self.assert_(hasattr(self.serial, 'upc'))
self.assert_(hasattr(self.serial, 'uri'))
self.assert_(hasattr(self.serial, 'urn'))
self.assert_(hasattr(self.serial, 'videorecordingNumber'))
def tearDown(self):
pass
class TestStillImage(unittest.TestCase):
def setUp(self):
self.stillimage = StillImage()
def test_init(self):
self.assertEquals(type(self.stillimage), StillImage)
def test_rdf_properties(self):
self.assert_(hasattr(self.stillimage, 'absorbed'))
self.assert_(hasattr(self.stillimage, 'absorbedBy'))
self.assert_(hasattr(self.stillimage, 'absorbedInPart'))
self.assert_(hasattr(self.stillimage, 'absorbedInPartBy'))
self.assert_(hasattr(self.stillimage, 'authorizedAccessPoint'))
self.assert_(hasattr(self.stillimage, 'classification'))
self.assert_(hasattr(self.stillimage, 'classificationDdc'))
self.assert_(hasattr(self.stillimage, 'classificationLcc'))
self.assert_(hasattr(self.stillimage, 'classificationNlm'))
self.assert_(hasattr(self.stillimage, 'classificationUdc'))
self.assert_(hasattr(self.stillimage, 'containedIn'))
self.assert_(hasattr(self.stillimage, 'contains'))
self.assert_(hasattr(self.stillimage, 'contentCategory'))
self.assert_(hasattr(self.stillimage, 'continuedBy'))
self.assert_(hasattr(self.stillimage, 'continuedInPartBy'))
self.assert_(hasattr(self.stillimage, 'continues'))
self.assert_(hasattr(self.stillimage, 'continuesInPart'))
self.assert_(hasattr(self.stillimage, 'dataSource'))
self.assert_(hasattr(self.stillimage, 'dissertationDegree'))
self.assert_(hasattr(self.stillimage, 'dissertationIdentifier'))
self.assert_(hasattr(self.stillimage, 'dissertationInstitution'))
self.assert_(hasattr(self.stillimage, 'dissertationNote'))
self.assert_(hasattr(self.stillimage, 'dissertationYear'))
self.assert_(hasattr(self.stillimage, 'event'))
self.assert_(hasattr(self.stillimage, 'expressionOf'))
self.assert_(hasattr(self.stillimage, 'findingAid'))
self.assert_(hasattr(self.stillimage, 'geographicCoverageNote'))
self.assert_(hasattr(self.stillimage, 'hasDerivative'))
self.assert_(hasattr(self.stillimage, 'hasExpression'))
self.assert_(hasattr(self.stillimage, 'hasInstance'))
self.assert_(hasattr(self.stillimage, 'identifier'))
self.assert_(hasattr(self.stillimage, 'index'))
self.assert_(hasattr(self.stillimage, 'isDerivativeOf'))
self.assert_(hasattr(self.stillimage, 'isan'))
self.assert_(hasattr(self.stillimage, 'issnL'))
self.assert_(hasattr(self.stillimage, 'istc'))
self.assert_(hasattr(self.stillimage, 'iswc'))
self.assert_(hasattr(self.stillimage, 'label'))
self.assert_(hasattr(self.stillimage, 'languageNote'))
self.assert_(hasattr(self.stillimage, 'mergedToForm'))
self.assert_(hasattr(self.stillimage, 'originDate'))
self.assert_(hasattr(self.stillimage, 'originPlace'))
self.assert_(hasattr(self.stillimage, 'originalVersion'))
self.assert_(hasattr(self.stillimage, 'otherEdition'))
self.assert_(hasattr(self.stillimage, 'precedes'))
self.assert_(hasattr(self.stillimage, 'relatedTo'))
self.assert_(hasattr(self.stillimage, 'relatedWork'))
self.assert_(hasattr(self.stillimage, 'separatedFrom'))
self.assert_(hasattr(self.stillimage, 'series'))
self.assert_(hasattr(self.stillimage, 'splitInto'))
self.assert_(hasattr(self.stillimage, 'subject'))
self.assert_(hasattr(self.stillimage, 'subseries'))
self.assert_(hasattr(self.stillimage, 'subseriesOf'))
self.assert_(hasattr(self.stillimage, 'succeeds'))
self.assert_(hasattr(self.stillimage, 'supersededBy'))
self.assert_(hasattr(self.stillimage, 'supersededInPartBy'))
self.assert_(hasattr(self.stillimage, 'supersedes'))
self.assert_(hasattr(self.stillimage, 'supersedesInPart'))
self.assert_(hasattr(self.stillimage, 'supplement'))
self.assert_(hasattr(self.stillimage, 'supplementTo'))
self.assert_(hasattr(self.stillimage, 'temporalCoverageNote'))
self.assert_(hasattr(self.stillimage, 'translation'))
self.assert_(hasattr(self.stillimage, 'translationOf'))
self.assert_(hasattr(self.stillimage, 'unionOf'))
self.assert_(hasattr(self.stillimage, 'workTitle'))
def tearDown(self):
pass
class TestSummary(unittest.TestCase):
def setUp(self):
self.summary = Summary()
def test_init(self):
self.assertEquals(type(self.summary), Summary)
def test_rdf_properties(self):
self.assert_(hasattr(self.summary, 'annotates'))
self.assert_(hasattr(self.summary, 'annotationAssertedBy'))
self.assert_(hasattr(self.summary, 'annotationBody'))
self.assert_(hasattr(self.summary, 'annotationSource'))
self.assert_(hasattr(self.summary, 'assertionDate'))
self.assert_(hasattr(self.summary, 'authorizedAccessPoint'))
self.assert_(hasattr(self.summary, 'identifier'))
self.assert_(hasattr(self.summary, 'label'))
self.assert_(hasattr(self.summary, 'relatedTo'))
self.assert_(hasattr(self.summary, 'startOfSummary'))
self.assert_(hasattr(self.summary, 'summary'))
self.assert_(hasattr(self.summary, 'summaryOf'))
def tearDown(self):
pass
class TestTableOfContents(unittest.TestCase):
def setUp(self):
self.tableofcontents = TableOfContents()
def test_init(self):
self.assertEquals(type(self.tableofcontents), TableOfContents)
def test_rdf_properties(self):
self.assert_(hasattr(self.tableofcontents, 'annotates'))
self.assert_(hasattr(self.tableofcontents, 'annotationAssertedBy'))
self.assert_(hasattr(self.tableofcontents, 'annotationBody'))
self.assert_(hasattr(self.tableofcontents, 'annotationSource'))
self.assert_(hasattr(self.tableofcontents, 'assertionDate'))
self.assert_(hasattr(self.tableofcontents, 'authorizedAccessPoint'))
self.assert_(hasattr(self.tableofcontents, 'identifier'))
self.assert_(hasattr(self.tableofcontents, 'label'))
self.assert_(hasattr(self.tableofcontents, 'relatedTo'))
self.assert_(hasattr(self.tableofcontents, 'tableOfContents'))
self.assert_(hasattr(self.tableofcontents, 'tableOfContentsFor'))
def tearDown(self):
pass
class TestTactile(unittest.TestCase):
def setUp(self):
self.tactile = Tactile()
def test_init(self):
self.assertEquals(type(self.tactile), Tactile)
def test_rdf_properties(self):
self.assert_(hasattr(self.tactile, 'abbreviatedTitle'))
self.assert_(hasattr(self.tactile, 'ansi'))
self.assert_(hasattr(self.tactile, 'arrangement'))
self.assert_(hasattr(self.tactile, 'aspectRatio'))
self.assert_(hasattr(self.tactile, 'authorizedAccessPoint'))
self.assert_(hasattr(self.tactile, 'awardNote'))
self.assert_(hasattr(self.tactile, 'carrierCategory'))
self.assert_(hasattr(self.tactile, 'coden'))
self.assert_(hasattr(self.tactile, 'colorContent'))
self.assert_(hasattr(self.tactile, 'contentAccessibility'))
self.assert_(hasattr(self.tactile, 'contentsNote'))
self.assert_(hasattr(self.tactile, 'custodialHistory'))
self.assert_(hasattr(self.tactile, 'dimensions'))
self.assert_(hasattr(self.tactile, 'distribution'))
self.assert_(hasattr(self.tactile, 'doi'))
self.assert_(hasattr(self.tactile, 'duration'))
self.assert_(hasattr(self.tactile, 'ean'))
self.assert_(hasattr(self.tactile, 'edition'))
self.assert_(hasattr(self.tactile, 'editionResponsibility'))
self.assert_(hasattr(self.tactile, 'extent'))
self.assert_(hasattr(self.tactile, 'fingerprint'))
self.assert_(hasattr(self.tactile, 'formatOfMusic'))
self.assert_(hasattr(self.tactile, 'frequency'))
self.assert_(hasattr(self.tactile, 'frequencyNote'))
self.assert_(hasattr(self.tactile, 'graphicScaleNote'))
self.assert_(hasattr(self.tactile, 'hasEquivalent'))
self.assert_(hasattr(self.tactile, 'hdl'))
self.assert_(hasattr(self.tactile, 'identifier'))
self.assert_(hasattr(self.tactile, 'illustrationNote'))
self.assert_(hasattr(self.tactile, 'instanceOf'))
self.assert_(hasattr(self.tactile, 'instanceTitle'))
self.assert_(hasattr(self.tactile, 'isbn'))
self.assert_(hasattr(self.tactile, 'isbn10'))
self.assert_(hasattr(self.tactile, 'isbn13'))
self.assert_(hasattr(self.tactile, 'ismn'))
self.assert_(hasattr(self.tactile, 'iso'))
self.assert_(hasattr(self.tactile, 'isrc'))
self.assert_(hasattr(self.tactile, 'issn'))
self.assert_(hasattr(self.tactile, 'issueNumber'))
self.assert_(hasattr(self.tactile, 'issuedWith'))
self.assert_(hasattr(self.tactile, 'keyTitle'))
self.assert_(hasattr(self.tactile, 'label'))
self.assert_(hasattr(self.tactile, 'lcOverseasAcq'))
self.assert_(hasattr(self.tactile, 'lccn'))
self.assert_(hasattr(self.tactile, 'legalDeposit'))
self.assert_(hasattr(self.tactile, 'local'))
self.assert_(hasattr(self.tactile, 'manufacture'))
self.assert_(hasattr(self.tactile, 'matrixNumber'))
self.assert_(hasattr(self.tactile, 'mediaCategory'))
self.assert_(hasattr(self.tactile, 'modeOfIssuance'))
self.assert_(hasattr(self.tactile, 'musicPlate'))
self.assert_(hasattr(self.tactile, 'musicPublisherNumber'))
self.assert_(hasattr(self.tactile, 'nban'))
self.assert_(hasattr(self.tactile, 'nbn'))
self.assert_(hasattr(self.tactile, 'notation'))
self.assert_(hasattr(self.tactile, 'otherPhysicalFormat'))
self.assert_(hasattr(self.tactile, 'postalRegistration'))
self.assert_(hasattr(self.tactile, 'preferredCitation'))
self.assert_(hasattr(self.tactile, 'production'))
self.assert_(hasattr(self.tactile, 'provider'))
self.assert_(hasattr(self.tactile, 'providerStatement'))
self.assert_(hasattr(self.tactile, 'publication'))
self.assert_(hasattr(self.tactile, 'publisherNumber'))
self.assert_(hasattr(self.tactile, 'relatedInstance'))
self.assert_(hasattr(self.tactile, 'relatedTo'))
self.assert_(hasattr(self.tactile, 'reportNumber'))
self.assert_(hasattr(self.tactile, 'reproduction'))
self.assert_(hasattr(self.tactile, 'responsibilityStatement'))
self.assert_(hasattr(self.tactile, 'serialFirstIssue'))
self.assert_(hasattr(self.tactile, 'serialLastIssue'))
self.assert_(hasattr(self.tactile, 'sici'))
self.assert_(hasattr(self.tactile, 'soundContent'))
self.assert_(hasattr(self.tactile, 'stockNumber'))
self.assert_(hasattr(self.tactile, 'strn'))
self.assert_(hasattr(self.tactile, 'studyNumber'))
self.assert_(hasattr(self.tactile, 'supplementaryContentNote'))
self.assert_(hasattr(self.tactile, 'titleStatement'))
self.assert_(hasattr(self.tactile, 'upc'))
self.assert_(hasattr(self.tactile, 'uri'))
self.assert_(hasattr(self.tactile, 'urn'))
self.assert_(hasattr(self.tactile, 'videorecordingNumber'))
def tearDown(self):
pass
class TestTemporal(unittest.TestCase):
def setUp(self):
self.temporal = Temporal()
def test_init(self):
self.assertEquals(type(self.temporal), Temporal)
def test_rdf_properties(self):
self.assert_(hasattr(self.temporal, 'authorityAssigner'))
self.assert_(hasattr(self.temporal, 'authoritySource'))
self.assert_(hasattr(self.temporal, 'authorizedAccessPoint'))
self.assert_(hasattr(self.temporal, 'hasAuthority'))
self.assert_(hasattr(self.temporal, 'identifier'))
self.assert_(hasattr(self.temporal, 'label'))
self.assert_(hasattr(self.temporal, 'relatedTo'))
def tearDown(self):
pass
class TestText(unittest.TestCase):
def setUp(self):
self.text = Text()
def test_init(self):
self.assertEquals(type(self.text), Text)
def test_rdf_properties(self):
self.assert_(hasattr(self.text, 'absorbed'))
self.assert_(hasattr(self.text, 'absorbedBy'))
self.assert_(hasattr(self.text, 'absorbedInPart'))
self.assert_(hasattr(self.text, 'absorbedInPartBy'))
self.assert_(hasattr(self.text, 'authorizedAccessPoint'))
self.assert_(hasattr(self.text, 'classification'))
self.assert_(hasattr(self.text, 'classificationDdc'))
self.assert_(hasattr(self.text, 'classificationLcc'))
self.assert_(hasattr(self.text, 'classificationNlm'))
self.assert_(hasattr(self.text, 'classificationUdc'))
self.assert_(hasattr(self.text, 'containedIn'))
self.assert_(hasattr(self.text, 'contains'))
self.assert_(hasattr(self.text, 'contentCategory'))
self.assert_(hasattr(self.text, 'continuedBy'))
self.assert_(hasattr(self.text, 'continuedInPartBy'))
self.assert_(hasattr(self.text, 'continues'))
self.assert_(hasattr(self.text, 'continuesInPart'))
self.assert_(hasattr(self.text, 'dataSource'))
self.assert_(hasattr(self.text, 'dissertationDegree'))
self.assert_(hasattr(self.text, 'dissertationIdentifier'))
self.assert_(hasattr(self.text, 'dissertationInstitution'))
self.assert_(hasattr(self.text, 'dissertationNote'))
self.assert_(hasattr(self.text, 'dissertationYear'))
self.assert_(hasattr(self.text, 'event'))
self.assert_(hasattr(self.text, 'expressionOf'))
self.assert_(hasattr(self.text, 'findingAid'))
self.assert_(hasattr(self.text, 'geographicCoverageNote'))
self.assert_(hasattr(self.text, 'hasDerivative'))
self.assert_(hasattr(self.text, 'hasExpression'))
self.assert_(hasattr(self.text, 'hasInstance'))
self.assert_(hasattr(self.text, 'identifier'))
self.assert_(hasattr(self.text, 'index'))
self.assert_(hasattr(self.text, 'isDerivativeOf'))
self.assert_(hasattr(self.text, 'isan'))
self.assert_(hasattr(self.text, 'issnL'))
self.assert_(hasattr(self.text, 'istc'))
self.assert_(hasattr(self.text, 'iswc'))
self.assert_(hasattr(self.text, 'label'))
self.assert_(hasattr(self.text, 'languageNote'))
self.assert_(hasattr(self.text, 'mergedToForm'))
self.assert_(hasattr(self.text, 'originDate'))
self.assert_(hasattr(self.text, 'originPlace'))
self.assert_(hasattr(self.text, 'originalVersion'))
self.assert_(hasattr(self.text, 'otherEdition'))
self.assert_(hasattr(self.text, 'precedes'))
self.assert_(hasattr(self.text, 'relatedTo'))
self.assert_(hasattr(self.text, 'relatedWork'))
self.assert_(hasattr(self.text, 'separatedFrom'))
self.assert_(hasattr(self.text, 'series'))
self.assert_(hasattr(self.text, 'splitInto'))
self.assert_(hasattr(self.text, 'subject'))
self.assert_(hasattr(self.text, 'subseries'))
self.assert_(hasattr(self.text, 'subseriesOf'))
self.assert_(hasattr(self.text, 'succeeds'))
self.assert_(hasattr(self.text, 'supersededBy'))
self.assert_(hasattr(self.text, 'supersededInPartBy'))
self.assert_(hasattr(self.text, 'supersedes'))
self.assert_(hasattr(self.text, 'supersedesInPart'))
self.assert_(hasattr(self.text, 'supplement'))
self.assert_(hasattr(self.text, 'supplementTo'))
self.assert_(hasattr(self.text, 'temporalCoverageNote'))
self.assert_(hasattr(self.text, 'translation'))
self.assert_(hasattr(self.text, 'translationOf'))
self.assert_(hasattr(self.text, 'unionOf'))
self.assert_(hasattr(self.text, 'workTitle'))
def tearDown(self):
pass
class TestThreeDimensionalObject(unittest.TestCase):
def setUp(self):
self.threedimensionalobject = ThreeDimensionalObject()
def test_init(self):
self.assertEquals(type(self.threedimensionalobject), ThreeDimensionalObject)
def test_rdf_properties(self):
self.assert_(hasattr(self.threedimensionalobject, 'absorbed'))
self.assert_(hasattr(self.threedimensionalobject, 'absorbedBy'))
self.assert_(hasattr(self.threedimensionalobject, 'absorbedInPart'))
self.assert_(hasattr(self.threedimensionalobject, 'absorbedInPartBy'))
self.assert_(hasattr(self.threedimensionalobject, 'authorizedAccessPoint'))
self.assert_(hasattr(self.threedimensionalobject, 'classification'))
self.assert_(hasattr(self.threedimensionalobject, 'classificationDdc'))
self.assert_(hasattr(self.threedimensionalobject, 'classificationLcc'))
self.assert_(hasattr(self.threedimensionalobject, 'classificationNlm'))
self.assert_(hasattr(self.threedimensionalobject, 'classificationUdc'))
self.assert_(hasattr(self.threedimensionalobject, 'containedIn'))
self.assert_(hasattr(self.threedimensionalobject, 'contains'))
self.assert_(hasattr(self.threedimensionalobject, 'contentCategory'))
self.assert_(hasattr(self.threedimensionalobject, 'continuedBy'))
self.assert_(hasattr(self.threedimensionalobject, 'continuedInPartBy'))
self.assert_(hasattr(self.threedimensionalobject, 'continues'))
self.assert_(hasattr(self.threedimensionalobject, 'continuesInPart'))
self.assert_(hasattr(self.threedimensionalobject, 'dataSource'))
self.assert_(hasattr(self.threedimensionalobject, 'dissertationDegree'))
self.assert_(hasattr(self.threedimensionalobject, 'dissertationIdentifier'))
self.assert_(hasattr(self.threedimensionalobject, 'dissertationInstitution'))
self.assert_(hasattr(self.threedimensionalobject, 'dissertationNote'))
self.assert_(hasattr(self.threedimensionalobject, 'dissertationYear'))
self.assert_(hasattr(self.threedimensionalobject, 'event'))
self.assert_(hasattr(self.threedimensionalobject, 'expressionOf'))
self.assert_(hasattr(self.threedimensionalobject, 'findingAid'))
self.assert_(hasattr(self.threedimensionalobject, 'geographicCoverageNote'))
self.assert_(hasattr(self.threedimensionalobject, 'hasDerivative'))
self.assert_(hasattr(self.threedimensionalobject, 'hasExpression'))
self.assert_(hasattr(self.threedimensionalobject, 'hasInstance'))
self.assert_(hasattr(self.threedimensionalobject, 'identifier'))
self.assert_(hasattr(self.threedimensionalobject, 'index'))
self.assert_(hasattr(self.threedimensionalobject, 'isDerivativeOf'))
self.assert_(hasattr(self.threedimensionalobject, 'isan'))
self.assert_(hasattr(self.threedimensionalobject, 'issnL'))
self.assert_(hasattr(self.threedimensionalobject, 'istc'))
self.assert_(hasattr(self.threedimensionalobject, 'iswc'))
self.assert_(hasattr(self.threedimensionalobject, 'label'))
self.assert_(hasattr(self.threedimensionalobject, 'languageNote'))
self.assert_(hasattr(self.threedimensionalobject, 'mergedToForm'))
self.assert_(hasattr(self.threedimensionalobject, 'originDate'))
self.assert_(hasattr(self.threedimensionalobject, 'originPlace'))
self.assert_(hasattr(self.threedimensionalobject, 'originalVersion'))
self.assert_(hasattr(self.threedimensionalobject, 'otherEdition'))
self.assert_(hasattr(self.threedimensionalobject, 'precedes'))
self.assert_(hasattr(self.threedimensionalobject, 'relatedTo'))
self.assert_(hasattr(self.threedimensionalobject, 'relatedWork'))
self.assert_(hasattr(self.threedimensionalobject, 'separatedFrom'))
self.assert_(hasattr(self.threedimensionalobject, 'series'))
self.assert_(hasattr(self.threedimensionalobject, 'splitInto'))
self.assert_(hasattr(self.threedimensionalobject, 'subject'))
self.assert_(hasattr(self.threedimensionalobject, 'subseries'))
self.assert_(hasattr(self.threedimensionalobject, 'subseriesOf'))
self.assert_(hasattr(self.threedimensionalobject, 'succeeds'))
self.assert_(hasattr(self.threedimensionalobject, 'supersededBy'))
self.assert_(hasattr(self.threedimensionalobject, 'supersededInPartBy'))
self.assert_(hasattr(self.threedimensionalobject, 'supersedes'))
self.assert_(hasattr(self.threedimensionalobject, 'supersedesInPart'))
self.assert_(hasattr(self.threedimensionalobject, 'supplement'))
self.assert_(hasattr(self.threedimensionalobject, 'supplementTo'))
self.assert_(hasattr(self.threedimensionalobject, 'temporalCoverageNote'))
self.assert_(hasattr(self.threedimensionalobject, 'translation'))
self.assert_(hasattr(self.threedimensionalobject, 'translationOf'))
self.assert_(hasattr(self.threedimensionalobject, 'unionOf'))
self.assert_(hasattr(self.threedimensionalobject, 'workTitle'))
def tearDown(self):
pass
class TestTitle(unittest.TestCase):
def setUp(self):
self.title = Title()
def test_init(self):
self.assertEquals(type(self.title), Title)
def test_rdf_properties(self):
self.assert_(hasattr(self.title, 'authorizedAccessPoint'))
self.assert_(hasattr(self.title, 'identifier'))
self.assert_(hasattr(self.title, 'label'))
self.assert_(hasattr(self.title, 'partNumber'))
self.assert_(hasattr(self.title, 'partTitle'))
self.assert_(hasattr(self.title, 'relatedTo'))
self.assert_(hasattr(self.title, 'subtitle'))
self.assert_(hasattr(self.title, 'titleAttribute'))
self.assert_(hasattr(self.title, 'titleQualifier'))
self.assert_(hasattr(self.title, 'titleSource'))
self.assert_(hasattr(self.title, 'titleType'))
self.assert_(hasattr(self.title, 'titleValue'))
self.assert_(hasattr(self.title, 'titleVariationDate'))
def tearDown(self):
pass
class TestTopic(unittest.TestCase):
def setUp(self):
self.topic = Topic()
def test_init(self):
self.assertEquals(type(self.topic), Topic)
def test_rdf_properties(self):
self.assert_(hasattr(self.topic, 'authorityAssigner'))
self.assert_(hasattr(self.topic, 'authoritySource'))
self.assert_(hasattr(self.topic, 'authorizedAccessPoint'))
self.assert_(hasattr(self.topic, 'hasAuthority'))
self.assert_(hasattr(self.topic, 'identifier'))
self.assert_(hasattr(self.topic, 'label'))
self.assert_(hasattr(self.topic, 'relatedTo'))
def tearDown(self):
pass
class TestWork(unittest.TestCase):
def setUp(self):
self.work = Work()
def test_init(self):
self.assertEquals(type(self.work), Work)
def test_rdf_properties(self):
self.assert_(hasattr(self.work, 'absorbed'))
self.assert_(hasattr(self.work, 'absorbedBy'))
self.assert_(hasattr(self.work, 'absorbedInPart'))
self.assert_(hasattr(self.work, 'absorbedInPartBy'))
self.assert_(hasattr(self.work, 'authorizedAccessPoint'))
self.assert_(hasattr(self.work, 'classification'))
self.assert_(hasattr(self.work, 'classificationDdc'))
self.assert_(hasattr(self.work, 'classificationLcc'))
self.assert_(hasattr(self.work, 'classificationNlm'))
self.assert_(hasattr(self.work, 'classificationUdc'))
self.assert_(hasattr(self.work, 'containedIn'))
self.assert_(hasattr(self.work, 'contains'))
self.assert_(hasattr(self.work, 'contentCategory'))
self.assert_(hasattr(self.work, 'continuedBy'))
self.assert_(hasattr(self.work, 'continuedInPartBy'))
self.assert_(hasattr(self.work, 'continues'))
self.assert_(hasattr(self.work, 'continuesInPart'))
self.assert_(hasattr(self.work, 'dataSource'))
self.assert_(hasattr(self.work, 'dissertationDegree'))
self.assert_(hasattr(self.work, 'dissertationIdentifier'))
self.assert_(hasattr(self.work, 'dissertationInstitution'))
self.assert_(hasattr(self.work, 'dissertationNote'))
self.assert_(hasattr(self.work, 'dissertationYear'))
self.assert_(hasattr(self.work, 'event'))
self.assert_(hasattr(self.work, 'expressionOf'))
self.assert_(hasattr(self.work, 'findingAid'))
self.assert_(hasattr(self.work, 'geographicCoverageNote'))
self.assert_(hasattr(self.work, 'hasDerivative'))
self.assert_(hasattr(self.work, 'hasExpression'))
self.assert_(hasattr(self.work, 'hasInstance'))
self.assert_(hasattr(self.work, 'identifier'))
self.assert_(hasattr(self.work, 'index'))
self.assert_(hasattr(self.work, 'isDerivativeOf'))
self.assert_(hasattr(self.work, 'isan'))
self.assert_(hasattr(self.work, 'issnL'))
self.assert_(hasattr(self.work, 'istc'))
self.assert_(hasattr(self.work, 'iswc'))
self.assert_(hasattr(self.work, 'label'))
self.assert_(hasattr(self.work, 'languageNote'))
self.assert_(hasattr(self.work, 'mergedToForm'))
self.assert_(hasattr(self.work, 'originDate'))
self.assert_(hasattr(self.work, 'originPlace'))
self.assert_(hasattr(self.work, 'originalVersion'))
self.assert_(hasattr(self.work, 'otherEdition'))
self.assert_(hasattr(self.work, 'precedes'))
self.assert_(hasattr(self.work, 'relatedTo'))
self.assert_(hasattr(self.work, 'relatedWork'))
self.assert_(hasattr(self.work, 'separatedFrom'))
self.assert_(hasattr(self.work, 'series'))
self.assert_(hasattr(self.work, 'splitInto'))
self.assert_(hasattr(self.work, 'subject'))
self.assert_(hasattr(self.work, 'subseries'))
self.assert_(hasattr(self.work, 'subseriesOf'))
self.assert_(hasattr(self.work, 'succeeds'))
self.assert_(hasattr(self.work, 'supersededBy'))
self.assert_(hasattr(self.work, 'supersededInPartBy'))
self.assert_(hasattr(self.work, 'supersedes'))
self.assert_(hasattr(self.work, 'supersedesInPart'))
self.assert_(hasattr(self.work, 'supplement'))
self.assert_(hasattr(self.work, 'supplementTo'))
self.assert_(hasattr(self.work, 'temporalCoverageNote'))
self.assert_(hasattr(self.work, 'translation'))
self.assert_(hasattr(self.work, 'translationOf'))
self.assert_(hasattr(self.work, 'unionOf'))
self.assert_(hasattr(self.work, 'workTitle'))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "805cbcb12cf24c20e77681fa3d2cb6b5",
"timestamp": "",
"source": "github",
"line_count": 2587,
"max_line_length": 102,
"avg_line_length": 49.44994201778121,
"alnum_prop": 0.6692332345790959,
"repo_name": "jermnelson/flask_bibframe",
"id": "002771f2f6b59f795e2ae78ef1c9a3b2f4b76d92",
"size": "128352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_bibframe/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132917"
}
],
"symlink_target": ""
} |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tournament = Table('tournament', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('official_title', String(length=128)),
Column('host', String(length=128)),
Column('url', String(length=128)),
Column('entrants', Integer),
Column('bracket_type', String(length=128)),
Column('game_type', String(length=128)),
Column('date', Date),
Column('name', String(length=128)),
Column('tournament_type', String(length=64)),
Column('region_name', String(length=64)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['tournament'].columns['url'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['tournament'].columns['url'].drop()
| {
"content_hash": "33f75e912ee0a00a6708a32431bd0d6e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 32.542857142857144,
"alnum_prop": 0.6883230904302019,
"repo_name": "lawrluor/matchstats",
"id": "983ac84649e72e7936e07f42f090b73192d02781",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db2_repository/versions/015_migration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87079"
},
{
"name": "HTML",
"bytes": "129740"
},
{
"name": "JavaScript",
"bytes": "52833"
},
{
"name": "Python",
"bytes": "255561"
}
],
"symlink_target": ""
} |
__all__ = [
'OAuthToken',
]
class OAuthToken(object):
"""OAuth credentials base-class. Several implementations are available:
this class:
provides crawler what is require to authenticate against sources
via OAuth.
"""
def __init__(self, access_token, refresh_token=None,
token_secret=None, consumer_key=None, expires=None):
self.__access_token = access_token
self.__refresh_token = refresh_token
self.__token_secret = token_secret
self.__consumer_key = consumer_key
self.__expires = expires
access_token = property(
fget=lambda slf: slf.__access_token,
doc='''Read-only property accessor over the
OAuth granted access token used by Docido to gain access
to the protected resources on behalf of the user, instead
of using the user's service provider credentials.
:rtype: string
'''
)
refresh_token = property(
fget=lambda slf: slf.__refresh_token,
doc='''Read-only property accessor over the
OAuth refresh token used to recreate the access token.
:rtype: string
'''
)
token_secret = property(
fget=lambda slf: slf.__token_secret,
doc='''Read-only property accessor over the
secret token provided by a service when retrieving an OAuth token.
This property is set only when required provided by the authentication
mechanism of the crawled service and required by crawler to fetch data.
:rtype: string
'''
)
consumer_key = property(
fget=lambda slf: slf.__consumer_key,
doc='''Read-only property accessor over the
Docido consumer key. This property is set when required by
the crawler to fetch data.
:rtype: string
'''
)
expires = property(
fget=lambda slf: slf.__expires,
doc='''Read-only property accessor over the expires field provided
by authentication mechanism of the crawled service when token was
acquired.
:rtype: string
'''
)
| {
"content_hash": "86dde8bd71b508c86796a8c45df3817c",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 30.32857142857143,
"alnum_prop": 0.6241168158266603,
"repo_name": "LilliJane/docido-python-sdk",
"id": "c889ddbc8ada7e6cbe076a76a714c990ea943afe",
"size": "2123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docido_sdk/oauth/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "152553"
}
],
"symlink_target": ""
} |
import re
import sys
import pgcs.core.data
import pgcs.core.diff
import pgcs.core.load
core = pgcs.core
def get_entries(named_object_list):
if named_object_list:
return named_object_list.entries
else:
return []
def get_object(entry):
obj1, obj2 = entry.diff.objects
if obj1 is not None and obj2 is None:
return obj1
else:
return None
def _missing_objects(diff):
for entry in get_entries(diff.languages):
yield get_object(entry)
for entry in get_entries(diff.namespaces):
yield get_object(entry)
for seq in (entry.diff.types,
entry.diff.indexes,
entry.diff.tables,
entry.diff.views,
entry.diff.sequences,
entry.diff.functions):
for e in get_entries(seq):
yield get_object(e)
def missing_objects(diff):
for object in _missing_objects(diff):
if object is not None:
yield object
def format(object):
return object.get_name()
def format_ns(object):
return object.namespace.name, object.get_name()
info = {
core.data.Language: ("LANGUAGE", format),
core.data.Namespace: ("SCHEMA", format),
core.data.Type: ("TYPE", format_ns),
core.data.Index: ("INDEX", format_ns),
core.data.Table: ("TABLE", format_ns),
core.data.View: ("VIEW", format_ns),
core.data.Sequence: ("SEQUENCE", format_ns),
core.data.Function: ("FUNCTION", format_ns),
}
def handle_language(filters, tokens):
nsname = tokens[5]
name = tokens[6]
return filters.get(("LANGUAGE", (nsname, name))) is not None
def handle_namespace(filters, tokens):
kindname = tokens[3]
name = tokens[5]
return filters.get((kindname, name)) is not None
def handle_function(filters, tokens):
kindname = tokens[3]
nsname = tokens[4]
name = " ".join(tokens[5:-1])
name = transform_function_args(name)
return filters.get((kindname, (nsname, name))) is not None
def transform_function_args(full_name):
name, argstring = re.match(r"(.*)\((.*)\)", full_name).groups()
args = []
for oldarg in argstring.split(","):
oldarg = oldarg.strip()
newarg = function_arg_mappings.get(oldarg)
if newarg is not None:
args.append(newarg)
else:
args.append(oldarg)
return "%s(%s)" % (name, ", ".join(args))
function_arg_mappings = {
"boolean": "bool",
"character varying": "varchar",
"character": "bpchar",
"character[]": "_bpchar",
"integer": "int4",
"timestamp without time zone": "timestamp",
}
def handle_other(filters, tokens):
kindname = tokens[3]
nsname = tokens[4]
name = tokens[5]
return filters.get((kindname, (nsname, name))) is not None
handlers = {
"PROCEDURAL": handle_language,
"SCHEMA": handle_namespace,
"TYPE": handle_other,
"INDEX": handle_other,
"TABLE": handle_other,
"VIEW": handle_other,
"SEQUENCE": handle_other,
"FUNCTION": handle_function,
}
def main():
source, target = sys.argv[1:]
databases = core.load.load_databases([source, target])
diff_tree = core.diff.diff_databases(databases)
filters = {}
for object in missing_objects(diff_tree):
kind = type(object)
kindname, formatter = info[kind]
name = formatter(object)
filters[(kindname, name)] = object
for line in sys.stdin:
if line.startswith(";"):
print line,
else:
tokens = line.split()
if len(tokens) >= 7:
kind = tokens[3]
handler = handlers.get(kind)
if handler:
if handler(filters, tokens):
print line,
if __name__ == "__main__":
main()
| {
"content_hash": "abea27512a8e608f7302e4508f791aa6",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 64,
"avg_line_length": 24.211267605633804,
"alnum_prop": 0.6559045956951716,
"repo_name": "somia/pgcs",
"id": "ba32d572ce029bf3f2fa6309e794a373fcc37069",
"size": "3438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgcs/tool/copy.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
class Assign:
"""Expr for Asssignments.
This type of expressions can be parsed anywhere, but can only be resolved
within function call arguments.
"""
def __init__(self, name, value):
self.name = name
self.value = value
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.name == other.name and self.value == other.value
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
right = " ".join(str(self.value).splitlines(True))
return f"Assign(name={self.name}, value={right}\n)"
def accept(self, visitor):
return visitor.visitAssignExpr(self)
class Grouping:
def __init__(self, expression):
self.expression = expression
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.expression == other.expression
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
return "Grouping(\n " + " ".join(str(self.expression).splitlines(True)) + "\n)"
def accept(self, visitor):
return visitor.visitGroupingExpr(self)
class Binary:
def __init__(self, left, operator, right):
self.left = left
self.operator = operator
self.right = right
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (
self.left == other.left
and self.operator == other.operator
and self.right == other.right
)
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
left = " ".join(str(self.left).splitlines(True))
right = " ".join(str(self.right).splitlines(True))
string_list = ["left=" + left, "op=" + str(self.operator.lexeme), "right=" + right]
return "Binary(\n " + ",\n ".join(string_list) + "\n)"
def accept(self, visitor):
return visitor.visitBinaryExpr(self)
class Unary:
def __init__(self, operator, right):
self.operator = operator
self.right = right
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.operator == other.operator and self.right == other.right
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
right = " ".join(str(self.right).splitlines(True))
string_list = ["op=" + str(self.operator.lexeme), "right=" + right]
return "Unary(\n " + ", ".join(string_list) + "\n)"
def accept(self, visitor):
return visitor.visitUnaryExpr(self)
class Call:
"""Function call expressions"""
def __init__(self, callee, args):
self.callee = callee
self.args = args
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.callee == other.callee and self.args == other.args
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
string_list = [
"callee=" + str(self.callee),
"args=" + " ".join(str(self.args).splitlines(True)),
]
return "Call(\n " + ",\n ".join(string_list) + "\n)"
def accept(self, visitor):
return visitor.visitCallExpr(self)
class Variable:
def __init__(self, name, level=None):
self.name = name
self.level = level
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.name == other.name and self.level == other.level
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
string_list = ["name=" + self.name.lexeme]
if self.level is not None:
string_list.append("level=" + self.level.value)
return "Variable(" + ",\n ".join(string_list) + ")"
def accept(self, visitor):
return visitor.visitVariableExpr(self)
class QuotedName:
"""Expressions for back-quoted names (i.e. `@1wrid_name!!`)"""
def __init__(self, expression):
self.expression = expression
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.expression == other.expression
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
return "QuotedName(" + self.expression.lexeme + ")"
def accept(self, visitor):
return visitor.visitQuotedNameExpr(self)
class Literal:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.value == other.value
def __repr__(self): # pragma: no cover
return self.__str__()
def __str__(self): # pragma: no cover
return "Literal(" + str(self.value) + ")"
def accept(self, visitor):
return visitor.visitLiteralExpr(self)
| {
"content_hash": "453583e69a0dda396556e55d670facc9",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 91,
"avg_line_length": 28.80327868852459,
"alnum_prop": 0.5655473344716373,
"repo_name": "bambinos/formulae",
"id": "13b60063b333df9c762e5e6e9bf43ec3d984bfa6",
"size": "5271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "formulae/expr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "36118"
},
{
"name": "Python",
"bytes": "254943"
}
],
"symlink_target": ""
} |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Disable ling too long warnings for this file, since we are building shader
# code which is way more readable when not splitted into multiple lines.
# pylint: disable=C0301
from direct.stdpy.file import isfile, open
from rpcore.image import Image
from rpcore.rpobject import RPObject
from rpcore.loader import RPLoader
class DisplayShaderBuilder(object):
""" Utility class to generate shaders on the fly to display texture previews
and also buffers """
@classmethod
def build(cls, texture, view_width, view_height):
""" Builds a shader to display <texture> in a view port with the size
<view_width> * <view_height> """
view_width, view_height = int(view_width), int(view_height)
cache_key = "/$$rptemp/$$TEXDISPLAY-X{}-Y{}-Z{}-TT{}-CT{}-VW{}-VH{}.frag.glsl".format(
texture.get_x_size(),
texture.get_y_size(),
texture.get_z_size(),
texture.get_texture_type(),
texture.get_component_type(),
view_width,
view_height)
# Only regenerate the file when there is no cache entry for it
if not isfile(cache_key) or True:
fragment_shader = cls._build_fragment_shader(texture, view_width, view_height)
with open(cache_key, "w") as handle:
handle.write(fragment_shader)
return RPLoader.load_shader("/$$rp/shader/default_gui_shader.vert.glsl", cache_key)
@classmethod
def _build_fragment_shader(cls, texture, view_width, view_height):
""" Internal method to build a fragment shader displaying the texture """
sampling_code, sampler_type = cls._generate_sampling_code(texture, view_width, view_height)
# Build actual shader
built = """
#version 400
#pragma include "render_pipeline_base.inc.glsl"
in vec2 texcoord;
out vec3 result;
uniform int mipmap;
uniform int slice;
uniform float brightness;
uniform bool tonemap;
uniform """ + sampler_type + """ p3d_Texture0;
void main() {
int view_width = """ + str(view_width) + """;
int view_height = """ + str(view_height) + """;
ivec2 display_coord = ivec2(texcoord * vec2(view_width, view_height));
int int_index = display_coord.x + display_coord.y * view_width;
""" + sampling_code + """
result *= brightness;
if (tonemap)
result = result / (1 + result);
}
"""
# Strip trailing spaces
built = '\n'.join([i.strip() for i in built.split("\n")])
return built
@classmethod
def _generate_sampling_code(cls, texture, view_width, view_height): # pylint: disable=W0613
""" Generates the GLSL code to sample a texture and also returns the
GLSL sampler type """
texture_type = texture.get_texture_type()
comp_type = texture.get_component_type()
# Useful snippets
int_coord = "ivec2 int_coord = ivec2(texcoord * textureSize(p3d_Texture0, mipmap).xy);"
slice_count = "int slice_count = textureSize(p3d_Texture0, 0).z;"
float_types = [Image.T_float, Image.T_unsigned_byte]
int_types = [Image.T_int, Image.T_unsigned_short, Image.T_unsigned_int_24_8]
result = "result = vec3(1, 0, 1);", "sampler2D"
if comp_type not in float_types + int_types:
RPObject.global_warn("DisplayShaderBuilder", "Unkown texture component type:", comp_type)
# 2D Textures
if texture_type == Image.TT_2d_texture:
if comp_type in float_types:
result = "result = textureLod(p3d_Texture0, texcoord, mipmap).xyz;", "sampler2D"
elif comp_type in int_types:
result = int_coord + "result = texelFetch(p3d_Texture0, int_coord, mipmap).xyz / 10.0;", "isampler2D"
# Buffer Textures
elif texture_type == Image.TT_buffer_texture:
range_check = lambda s: "if (int_index < textureSize(p3d_Texture0)) {" + s + "} else { result = vec3(1.0, 0.6, 0.2);};"
if comp_type in float_types:
result = range_check("result = texelFetch(p3d_Texture0, int_index).xyz;"), "samplerBuffer"
elif comp_type in int_types:
result = range_check("result = texelFetch(p3d_Texture0, int_index).xyz / 10.0;"), "isamplerBuffer"
# 3D Textures
elif texture_type == Image.TT_3d_texture:
if comp_type in float_types:
result = slice_count + "result = textureLod(p3d_Texture0, vec3(texcoord, (0.5 + slice) / slice_count), mipmap).xyz;", "sampler3D"
elif comp_type in int_types:
result = int_coord + "result = texelFetch(p3d_Texture0, ivec3(int_coord, slice), mipmap).xyz / 10.0;", "isampler3D"
# 2D Texture Array
elif texture_type == Image.TT_2d_texture_array:
if comp_type in float_types:
result = "result = textureLod(p3d_Texture0, vec3(texcoord, slice), mipmap).xyz;", "sampler2DArray"
elif comp_type in int_types:
result = int_coord + "result = texelFetch(p3d_Texture0, ivec3(int_coord, slice), mipmap).xyz / 10.0;", "isampler2DArray"
# Cubemap
elif texture_type == Image.TT_cube_map:
code = "vec3 sample_dir = get_cubemap_coordinate(slice, texcoord*2-1);\n"
code += "result = textureLod(p3d_Texture0, sample_dir, mipmap).xyz;"
result = code, "samplerCube"
# Cubemap array
elif texture_type == Image.TT_cube_map_array:
code = "vec3 sample_dir = get_cubemap_coordinate(slice % 6, texcoord*2-1);\n"
code += "result = textureLod(p3d_Texture0, vec4(sample_dir, slice / 6), mipmap).xyz;"
result = code, "samplerCubeArray"
else:
print("WARNING: Unhandled texture type", texture_type, "in display shader builder")
return result
| {
"content_hash": "f0a446868411bf151028a02d8e856ff3",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 145,
"avg_line_length": 42.10857142857143,
"alnum_prop": 0.6103948975437644,
"repo_name": "croxis/SpaceDrive",
"id": "4b546d3a8a0e118d15724483ed3f687b87356e90",
"size": "7369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacedrive/renderpipeline/rpcore/util/display_shader_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "21897"
},
{
"name": "C++",
"bytes": "165025"
},
{
"name": "GLSL",
"bytes": "741524"
},
{
"name": "Groff",
"bytes": "119"
},
{
"name": "Python",
"bytes": "1523574"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
from taiga.base.api import ModelCrudViewSet
from taiga.base.api import ModelUpdateRetrieveViewSet
from taiga.base.api.mixins import BlockedByProjectMixin
from taiga.base import exceptions as exc
from taiga.base import filters
from taiga.base import response
from taiga.projects.mixins.ordering import BulkUpdateOrderMixin
from taiga.projects.history.mixins import HistoryResourceMixin
from taiga.projects.notifications.mixins import WatchedResourceMixin
from taiga.projects.occ.mixins import OCCResourceMixin
from . import models
from . import serializers
from . import permissions
from . import services
######################################################
# Custom Attribute ViewSets
#######################################################
class UserStoryCustomAttributeViewSet(BulkUpdateOrderMixin, BlockedByProjectMixin, ModelCrudViewSet):
model = models.UserStoryCustomAttribute
serializer_class = serializers.UserStoryCustomAttributeSerializer
permission_classes = (permissions.UserStoryCustomAttributePermission,)
filter_backends = (filters.CanViewProjectFilterBackend,)
filter_fields = ("project",)
bulk_update_param = "bulk_userstory_custom_attributes"
bulk_update_perm = "change_userstory_custom_attributes"
bulk_update_order_action = services.bulk_update_userstory_custom_attribute_order
class TaskCustomAttributeViewSet(BulkUpdateOrderMixin, BlockedByProjectMixin, ModelCrudViewSet):
model = models.TaskCustomAttribute
serializer_class = serializers.TaskCustomAttributeSerializer
permission_classes = (permissions.TaskCustomAttributePermission,)
filter_backends = (filters.CanViewProjectFilterBackend,)
filter_fields = ("project",)
bulk_update_param = "bulk_task_custom_attributes"
bulk_update_perm = "change_task_custom_attributes"
bulk_update_order_action = services.bulk_update_task_custom_attribute_order
class IssueCustomAttributeViewSet(BulkUpdateOrderMixin, BlockedByProjectMixin, ModelCrudViewSet):
model = models.IssueCustomAttribute
serializer_class = serializers.IssueCustomAttributeSerializer
permission_classes = (permissions.IssueCustomAttributePermission,)
filter_backends = (filters.CanViewProjectFilterBackend,)
filter_fields = ("project",)
bulk_update_param = "bulk_issue_custom_attributes"
bulk_update_perm = "change_issue_custom_attributes"
bulk_update_order_action = services.bulk_update_issue_custom_attribute_order
######################################################
# Custom Attributes Values ViewSets
#######################################################
class BaseCustomAttributesValuesViewSet(OCCResourceMixin, HistoryResourceMixin, WatchedResourceMixin,
BlockedByProjectMixin, ModelUpdateRetrieveViewSet):
def get_object_for_snapshot(self, obj):
return getattr(obj, self.content_object)
class UserStoryCustomAttributesValuesViewSet(BaseCustomAttributesValuesViewSet):
model = models.UserStoryCustomAttributesValues
serializer_class = serializers.UserStoryCustomAttributesValuesSerializer
permission_classes = (permissions.UserStoryCustomAttributesValuesPermission,)
lookup_field = "user_story_id"
content_object = "user_story"
def get_queryset(self):
qs = self.model.objects.all()
qs = qs.select_related("user_story", "user_story__project")
return qs
class TaskCustomAttributesValuesViewSet(BaseCustomAttributesValuesViewSet):
model = models.TaskCustomAttributesValues
serializer_class = serializers.TaskCustomAttributesValuesSerializer
permission_classes = (permissions.TaskCustomAttributesValuesPermission,)
lookup_field = "task_id"
content_object = "task"
def get_queryset(self):
qs = self.model.objects.all()
qs = qs.select_related("task", "task__project")
return qs
class IssueCustomAttributesValuesViewSet(BaseCustomAttributesValuesViewSet):
model = models.IssueCustomAttributesValues
serializer_class = serializers.IssueCustomAttributesValuesSerializer
permission_classes = (permissions.IssueCustomAttributesValuesPermission,)
lookup_field = "issue_id"
content_object = "issue"
def get_queryset(self):
qs = self.model.objects.all()
qs = qs.select_related("issue", "issue__project")
return qs
| {
"content_hash": "98cbdf6de2e10960bc03e6ed1d78ff11",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 101,
"avg_line_length": 42.46153846153846,
"alnum_prop": 0.7409420289855072,
"repo_name": "curiosityio/taiga-docker",
"id": "a11d6e31869f1423518100c756e89c683f72167c",
"size": "5328",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taiga-back/taiga-back/taiga/projects/custom_attributes/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "186988"
},
{
"name": "JavaScript",
"bytes": "2007"
},
{
"name": "Nginx",
"bytes": "4140"
},
{
"name": "Python",
"bytes": "2793020"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_region_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vultr_region_info) instead.
short_description: Gather facts about the Vultr regions available.
description:
- Gather facts about regions available to boot servers.
version_added: "2.7"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr regions facts
local_action:
module: vultr_region_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_region_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_region_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
"vultr_region_facts": [
{
"block_storage": false,
"continent": "Europe",
"country": "GB",
"ddos_protection": true,
"id": 8,
"name": "London",
"regioncode": "LHR",
"state": ""
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrRegionFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrRegionFacts, self).__init__(module, "vultr_region_facts")
self.returns = {
"DCID": dict(key='id', convert_to='int'),
"block_storage": dict(convert_to='bool'),
"continent": dict(),
"country": dict(),
"ddos_protection": dict(convert_to='bool'),
"name": dict(),
"regioncode": dict(),
"state": dict()
}
def get_regions(self):
return self.api_query(path="/v1/regions/list")
def parse_regions_list(regions_list):
return [region for id, region in regions_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
region_facts = AnsibleVultrRegionFacts(module)
result = region_facts.get_result(parse_regions_list(region_facts.get_regions()))
ansible_facts = {
'vultr_region_facts': result['vultr_region_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
| {
"content_hash": "a516b6f46732aff693a3fab1c72253d8",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 99,
"avg_line_length": 25.872180451127818,
"alnum_prop": 0.6262714327230456,
"repo_name": "thaim/ansible",
"id": "ec0a81cadd9140f10e8bf163ef95f3b4e0f5c00a",
"size": "3633",
"binary": false,
"copies": "7",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vultr/_vultr_region_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from time import time
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from canvas.browse import frontpage_algorithms
from canvas.models import Content, Comment, Category
class Command(BaseCommand):
args = ''
help = 'Update comment scores for the front pages.'
def handle(self, generations, *args, **options):
start = time()
updates = 0
epsilon = 0.01
generations = int(generations)
print "Generations: %s" % generations
def flatten(list_of_lists):
return set([int(item) for sublist in list_of_lists for item in sublist])
comment_ids = flatten([Category.ALL.popular[0:100]])
print "Gen 1", len(comment_ids)
if generations > 1:
comment_ids |= flatten([Category.ALL.popular[:]])
print "Gen 2", len(comment_ids)
if generations > 2:
comment_ids |= flatten([category.popular[:] for category in Category.all_objects.all()])
print "Gen 3", len(comment_ids)
for comment in Comment.all_objects.in_bulk_list(comment_ids):
updates += 1
comment.update_score()
for child_comment in Comment.all_objects.in_bulk_list(comment.popular_replies[0:3]):
updates += 1
child_comment.update_score()
print "Scores updated. Rows updated: %s Total elapsed time: %0.2fs" % (updates, (time() - start))
if generations == 2:
print "Running hypothesis-testing scoring functions."
frontpage_algorithms.update_scores()
| {
"content_hash": "a5bb694ff5680f0c0b96e96ab18daa9e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 105,
"avg_line_length": 33.795918367346935,
"alnum_prop": 0.6280193236714976,
"repo_name": "canvasnetworks/canvas",
"id": "b2f8b4e1fcb06696a0b922ab4ab64f8258116b6f",
"size": "1656",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/canvas/management/commands/update_scores.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "537625"
},
{
"name": "HTML",
"bytes": "689709"
},
{
"name": "JavaScript",
"bytes": "1313262"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6659685"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "5326"
}
],
"symlink_target": ""
} |
def str2tuple(s, sep='/'):
"""
Given the string representation of a tagged token, return the
corresponding tuple representation. The rightmost occurrence of
*sep* in *s* will be used to divide *s* into a word string and
a tag string. If *sep* does not occur in *s*, return (s, None).
>>> from nltk.tag.util import str2tuple
>>> str2tuple('fly/NN')
('fly', 'NN')
:type s: str
:param s: The string representation of a tagged token.
:type sep: str
:param sep: The separator string used to separate word strings
from tags.
"""
loc = s.rfind(sep)
if loc >= 0:
return (s[:loc], s[loc+len(sep):].upper())
else:
return (s, None)
def tuple2str(tagged_token, sep='/'):
"""
Given the tuple representation of a tagged token, return the
corresponding string representation. This representation is
formed by concatenating the token's word string, followed by the
separator, followed by the token's tag. (If the tag is None,
then just return the bare word string.)
>>> from nltk.tag.util import tuple2str
>>> tagged_token = ('fly', 'NN')
>>> tuple2str(tagged_token)
'fly/NN'
:type tagged_token: tuple(str, str)
:param tagged_token: The tuple representation of a tagged token.
:type sep: str
:param sep: The separator string used to separate word strings
from tags.
"""
word, tag = tagged_token
if tag is None:
return word
else:
assert sep not in tag, 'tag may not contain sep!'
return '%s%s%s' % (word, sep, tag)
def untag(tagged_sentence):
"""
Given a tagged sentence, return an untagged version of that
sentence. I.e., return a list containing the first element
of each tuple in *tagged_sentence*.
>>> from nltk.tag.util import untag
>>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')])
['John', 'saw', 'Mary']
"""
return [w for (w, t) in tagged_sentence]
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| {
"content_hash": "efedda754a52d84f937d08654516ce63",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 69,
"avg_line_length": 32,
"alnum_prop": 0.6138059701492538,
"repo_name": "haya14busa/alc-etm-searcher",
"id": "6893f3602da948d035ffdcf2ab0f5143b3dbe12e",
"size": "2389",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nltk-3.0a3/build/lib/nltk/tag/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11448"
},
{
"name": "Java",
"bytes": "30518"
},
{
"name": "Python",
"bytes": "6856183"
}
],
"symlink_target": ""
} |
"""The tests for the Netatmo sensor platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.netatmo import sensor
from homeassistant.components.netatmo.sensor import MODULE_TYPE_WIND
from homeassistant.helpers import entity_registry as er
from .common import TEST_TIME, selected_platforms
async def test_weather_sensor(hass, config_entry, netatmo_auth):
"""Test weather sensor setup."""
with patch("time.time", return_value=TEST_TIME), selected_platforms(["sensor"]):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
prefix = "sensor.netatmo_mystation_"
assert hass.states.get(f"{prefix}temperature").state == "24.6"
assert hass.states.get(f"{prefix}humidity").state == "36"
assert hass.states.get(f"{prefix}co2").state == "749"
assert hass.states.get(f"{prefix}pressure").state == "1017.3"
async def test_public_weather_sensor(hass, config_entry, netatmo_auth):
"""Test public weather sensor setup."""
with patch("time.time", return_value=TEST_TIME), selected_platforms(["sensor"]):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) > 0
prefix = "sensor.netatmo_home_max_"
assert hass.states.get(f"{prefix}temperature").state == "27.4"
assert hass.states.get(f"{prefix}humidity").state == "76"
assert hass.states.get(f"{prefix}pressure").state == "1014.4"
prefix = "sensor.netatmo_home_avg_"
assert hass.states.get(f"{prefix}temperature").state == "22.7"
assert hass.states.get(f"{prefix}humidity").state == "63.2"
assert hass.states.get(f"{prefix}pressure").state == "1010.3"
entities_before_change = len(hass.states.async_all())
valid_option = {
"lat_ne": 32.91336,
"lon_ne": -117.187429,
"lat_sw": 32.83336,
"lon_sw": -117.26743,
"show_on_map": True,
"area_name": "Home avg",
"mode": "max",
}
result = await hass.config_entries.options.async_init(config_entry.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={"new_area": "Home avg"}
)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input=valid_option
)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == entities_before_change
assert hass.states.get(f"{prefix}temperature").state == "27.4"
@pytest.mark.parametrize(
"strength, expected",
[(50, "Full"), (60, "High"), (80, "Medium"), (90, "Low")],
)
async def test_process_wifi(strength, expected):
"""Test wifi strength translation."""
assert sensor.process_wifi(strength) == expected
@pytest.mark.parametrize(
"strength, expected",
[(50, "Full"), (70, "High"), (80, "Medium"), (90, "Low")],
)
async def test_process_rf(strength, expected):
"""Test radio strength translation."""
assert sensor.process_rf(strength) == expected
@pytest.mark.parametrize(
"health, expected",
[(4, "Unhealthy"), (3, "Poor"), (2, "Fair"), (1, "Fine"), (0, "Healthy")],
)
async def test_process_health(health, expected):
"""Test health index translation."""
assert sensor.process_health(health) == expected
@pytest.mark.parametrize(
"model, data, expected",
[
(MODULE_TYPE_WIND, 5591, "Full"),
(MODULE_TYPE_WIND, 5181, "High"),
(MODULE_TYPE_WIND, 4771, "Medium"),
(MODULE_TYPE_WIND, 4361, "Low"),
(MODULE_TYPE_WIND, 4300, "Very Low"),
],
)
async def test_process_battery(model, data, expected):
"""Test battery level translation."""
assert sensor.process_battery(data, model) == expected
@pytest.mark.parametrize(
"angle, expected",
[
(0, "N"),
(40, "NE"),
(70, "E"),
(130, "SE"),
(160, "S"),
(220, "SW"),
(250, "W"),
(310, "NW"),
(340, "N"),
],
)
async def test_process_angle(angle, expected):
"""Test wind direction translation."""
assert sensor.process_angle(angle) == expected
@pytest.mark.parametrize(
"angle, expected",
[(-1, 359), (-40, 320)],
)
async def test_fix_angle(angle, expected):
"""Test wind angle fix."""
assert sensor.fix_angle(angle) == expected
@pytest.mark.parametrize(
"uid, name, expected",
[
("12:34:56:37:11:ca-reachable", "netatmo_mystation_reachable", "True"),
("12:34:56:03:1b:e4-rf_status", "netatmo_mystation_yard_radio", "Full"),
(
"12:34:56:05:25:6e-rf_status",
"netatmo_valley_road_rain_gauge_radio",
"Medium",
),
(
"12:34:56:36:fc:de-rf_status_lvl",
"netatmo_mystation_netatmooutdoor_radio_level",
"65",
),
(
"12:34:56:37:11:ca-wifi_status_lvl",
"netatmo_mystation_wifi_level",
"45",
),
(
"12:34:56:37:11:ca-wifi_status",
"netatmo_mystation_wifi_status",
"Full",
),
(
"12:34:56:37:11:ca-temp_trend",
"netatmo_mystation_temperature_trend",
"stable",
),
(
"12:34:56:37:11:ca-pressure_trend",
"netatmo_mystation_pressure_trend",
"down",
),
("12:34:56:05:51:20-sum_rain_1", "netatmo_mystation_yard_rain_last_hour", "0"),
("12:34:56:05:51:20-sum_rain_24", "netatmo_mystation_yard_rain_today", "0"),
("12:34:56:03:1b:e4-windangle", "netatmo_mystation_garden_direction", "SW"),
(
"12:34:56:03:1b:e4-windangle_value",
"netatmo_mystation_garden_angle",
"217",
),
("12:34:56:03:1b:e4-gustangle", "mystation_garden_gust_direction", "S"),
(
"12:34:56:03:1b:e4-gustangle",
"netatmo_mystation_garden_gust_direction",
"S",
),
(
"12:34:56:03:1b:e4-gustangle_value",
"netatmo_mystation_garden_gust_angle_value",
"206",
),
(
"12:34:56:03:1b:e4-guststrength",
"netatmo_mystation_garden_gust_strength",
"9",
),
(
"12:34:56:26:68:92-health_idx",
"netatmo_baby_bedroom_health",
"Fine",
),
],
)
async def test_weather_sensor_enabling(
hass, config_entry, uid, name, expected, netatmo_auth
):
"""Test enabling of by default disabled sensors."""
with patch("time.time", return_value=TEST_TIME), selected_platforms(["sensor"]):
states_before = len(hass.states.async_all())
assert hass.states.get(f"sensor.{name}") is None
registry = er.async_get(hass)
registry.async_get_or_create(
"sensor",
"netatmo",
uid,
suggested_object_id=name,
disabled_by=None,
)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) > states_before
assert hass.states.get(f"sensor.{name}").state == expected
| {
"content_hash": "4165e5e83d768ace989f6e5697eb712c",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 87,
"avg_line_length": 31.43404255319149,
"alnum_prop": 0.5850819006362529,
"repo_name": "aronsky/home-assistant",
"id": "bebd8e0191c487b5e5838e6d0bc0faed6ef14e17",
"size": "7387",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "tests/components/netatmo/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import os
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden, HttpResponseBadRequest
from django.contrib.sessions.models import Session
from django.conf import settings
from django.db.models import Q
from django.core.exceptions import PermissionDenied
from models import Folder, Image, Clipboard, ClipboardItem
from models import tools
from models import FolderRoot, UnfiledImages, ImagesWithMissingData
from django.contrib.auth.models import User
from django import forms
from django.contrib import admin
class NewFolderForm(forms.ModelForm):
class Meta:
model = Folder
fields = ('name', )
def popup_status(request):
return request.REQUEST.has_key('_popup') or request.REQUEST.has_key('pop')
def selectfolder_status(request):
return request.REQUEST.has_key('select_folder')
def popup_param(request):
if popup_status(request):
return "?_popup=1"
else:
return ""
def _userperms(item, request):
r = []
ps = ['read', 'edit', 'add_children']
for p in ps:
attr = "has_%s_permission" % p
if hasattr(item, attr):
x = getattr(item, attr)(request)
if x:
r.append( p )
return r
@login_required
def directory_listing(request, folder_id=None, viewtype=None):
clipboard = tools.get_user_clipboard(request.user)
if viewtype=='images_with_missing_data':
folder = ImagesWithMissingData()
elif viewtype=='unfiled_images':
folder = UnfiledImages()
elif folder_id == None:
folder = FolderRoot()
else:
folder = Folder.objects.get(id=folder_id)
# search
def filter_folder(qs, terms=[]):
for term in terms:
qs = qs.filter(Q(name__icontains=term) | Q(owner__username__icontains=term) | Q(owner__first_name__icontains=term) | Q(owner__last_name__icontains=term) )
return qs
def filter_image(qs, terms=[]):
for term in terms:
qs = qs.filter( Q(name__icontains=term) | Q(original_filename__icontains=term ) | Q(owner__username__icontains=term) | Q(owner__first_name__icontains=term) | Q(owner__last_name__icontains=term) )
return qs
q = request.GET.get('q', None)
if q:
search_terms = q.split(" ")
else:
search_terms = []
limit_search_to_folder = request.GET.get('limit_search_to_folder', False) in (True, 'on')
if len(search_terms)>0:
if folder and limit_search_to_folder and not folder.is_root:
folder_qs = folder.get_descendants()
# TODO: check how folder__in=folder.get_descendats() performs in large trees
image_qs = Image.objects.filter(folder__in=folder.get_descendants())
else:
folder_qs = Folder.objects.all()
image_qs = Image.objects.all()
folder_qs = filter_folder(folder_qs, search_terms)
image_qs = filter_image(image_qs, search_terms)
show_result_count = True
else:
folder_qs = folder.children.all()
image_qs = folder.image_files.all()
show_result_count = False
folder_qs = folder_qs.order_by('name')
image_qs = image_qs.order_by('name')
folder_children = []
folder_files = []
for f in folder_qs:
f.perms = _userperms(f, request)
if hasattr(f, 'has_read_permission'):
if f.has_read_permission(request):
#print "%s has read permission for %s" % (request.user, f)
folder_children.append(f)
else:
pass#print "%s has NO read permission for %s" % (request.user, f)
else:
folder_children.append(f)
for f in image_qs:
f.perms = _userperms(f, request)
if hasattr(f, 'has_read_permission'):
if f.has_read_permission(request):
#print "%s has read permission for %s" % (request.user, f)
folder_files.append(f)
else:
pass#print "%s has NO read permission for %s" % (request.user, f)
else:
folder_files.append(f)
try:
permissions = {
'has_edit_permission': folder.has_edit_permission(request),
'has_read_permission': folder.has_read_permission(request),
'has_add_children_permission': folder.has_add_children_permission(request),
}
except:
permissions = {}
#print admin.site.root_path
return render_to_response('image_filer/directory_listing.html', {
'folder':folder,
'folder_children':folder_children,
'folder_files':folder_files,
'permissions': permissions,
'permstest': _userperms(folder, request),
'current_url': request.path,
'title': u'Directory listing for %s' % folder.name,
'search_string': ' '.join(search_terms),
'show_result_count': show_result_count,
'limit_search_to_folder': limit_search_to_folder,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
'root_path': "/%s" % admin.site.root_path, # needed in the admin/base.html template for logout links and stuff
}, context_instance=RequestContext(request))
@login_required
def edit_folder(request, folder_id):
# TODO: implement edit_folder view
folder=None
return render_to_response('image_filer/folder_edit.html', {
'folder':folder,
'is_popup': request.REQUEST.has_key('_popup') or request.REQUEST.has_key('pop'),
}, context_instance=RequestContext(request))
@login_required
def edit_image(request, folder_id):
# TODO: implement edit_image view
folder=None
return render_to_response('image_filer/image_edit.html', {
'folder':folder,
'is_popup': request.REQUEST.has_key('_popup') or request.REQUEST.has_key('pop'),
}, context_instance=RequestContext(request))
@login_required
def make_folder(request, folder_id=None):
if not folder_id:
folder_id = request.REQUEST.get('parent_id', None)
if folder_id:
folder = Folder.objects.get(id=folder_id)
else:
folder = None
if request.user.is_superuser:
pass
elif folder == None:
# regular users may not add root folders
raise PermissionDenied
elif not folder.has_add_children_permission(request):
# the user does not have the permission to add subfolders
raise PermissionDenied
if request.method == 'POST':
new_folder_form = NewFolderForm(request.POST)
if new_folder_form.is_valid():
new_folder = new_folder_form.save(commit=False)
new_folder.parent = folder
new_folder.owner = request.user
new_folder.save()
#print u"Saving folder %s as child of %s" % (new_folder, folder)
return HttpResponse('<script type="text/javascript">opener.dismissPopupAndReload(window);</script>')
else:
#print u"New Folder GET, parent %s" % folder
new_folder_form = NewFolderForm()
return render_to_response('image_filer/include/new_folder_form.html', {
'new_folder_form': new_folder_form,
'is_popup': request.REQUEST.has_key('_popup') or request.REQUEST.has_key('pop'),
}, context_instance=RequestContext(request))
class UploadFileForm(forms.ModelForm):
class Meta:
model=Image
#fields = ('file',)
from image_filer.utils.files import generic_handle_file
@login_required
def upload(request):
return render_to_response('image_filer/upload.html', {
'title': u'Upload files',
'is_popup': popup_status(request),
}, context_instance=RequestContext(request))
def ajax_upload(request, folder_id=None):
"""
receives an upload from the flash uploader and fixes the session
because of the missing cookie. Receives only one file at the time,
althow it may be a zip file, that will be unpacked.
"""
#print request.POST
# flashcookie-hack (flash does not submit the cookie, so we send the
# django sessionid over regular post
try:
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
#session_key = request.POST.get('jsessionid')
session_key = request.POST.get('jsessionid')
request.session = engine.SessionStore(session_key)
request.user = User.objects.get(id=request.session['_auth_user_id'])
#print request.session['_auth_user_id']
#print session_key
#print engine
#print request.user
#print request.session
# upload and save the file
if not request.method == 'POST':
return HttpResponse("must be POST")
original_filename = request.POST.get('Filename')
file = request.FILES.get('Filedata')
#print request.FILES
#print original_filename, file
clipboard, was_clipboard_created = Clipboard.objects.get_or_create(user=request.user)
files = generic_handle_file(file, original_filename)
file_items = []
for ifile, iname in files:
try:
iext = os.path.splitext(iname)[1].lower()
except:
iext = ''
#print "extension: ", iext
if iext in ['.jpg','.jpeg','.png','.gif']:
imageform = UploadFileForm({'original_filename':iname,'owner': request.user.pk}, {'file':ifile})
if imageform.is_valid():
#print 'imageform is valid'
try:
image = imageform.save(commit=False)
image.save()
file_items.append(image)
except Exception, e:
print e
#print "save %s" % image
bi = ClipboardItem(clipboard=clipboard, file=image)
bi.save()
#sprint image
else:
pass#print imageform.errors
except Exception, e:
print e
raise e
return render_to_response('image_filer/include/clipboard_item_rows.html', {'items': file_items }, context_instance=RequestContext(request))
@login_required
def paste_clipboard_to_folder(request):
if request.method=='POST':
folder = Folder.objects.get( id=request.POST.get('folder_id') )
clipboard = Clipboard.objects.get( id=request.POST.get('clipboard_id') )
if folder.has_add_children_permission(request):
tools.move_files_from_clipboard_to_folder(clipboard, folder)
tools.discard_clipboard(clipboard)
else:
raise PermissionDenied
return HttpResponseRedirect( '%s%s' % (request.REQUEST.get('redirect_to', ''), popup_param(request) ) )
@login_required
def discard_clipboard(request):
if request.method=='POST':
clipboard = Clipboard.objects.get( id=request.POST.get('clipboard_id') )
tools.discard_clipboard(clipboard)
return HttpResponseRedirect( '%s%s' % (request.POST.get('redirect_to', ''), popup_param(request) ) )
@login_required
def delete_clipboard(request):
if request.method=='POST':
clipboard = Clipboard.objects.get( id=request.POST.get('clipboard_id') )
tools.delete_clipboard(clipboard)
return HttpResponseRedirect( '%s%s' % (request.POST.get('redirect_to', ''), popup_param(request) ) )
@login_required
def move_file_to_clipboard(request):
print "move file"
if request.method=='POST':
file_id = request.POST.get("file_id", None)
clipboard = tools.get_user_clipboard(request.user)
if file_id:
file = Image.objects.get(id=file_id)
if file.has_edit_permission(request):
tools.move_file_to_clipboard([file], clipboard)
else:
raise PermissionDenied
return HttpResponseRedirect( '%s%s' % (request.POST.get('redirect_to', ''), popup_param(request) ) )
@login_required
def clone_files_from_clipboard_to_folder(request):
if request.method=='POST':
clipboard = Clipboard.objects.get( id=request.POST.get('clipboard_id') )
folder = Folder.objects.get( id=request.POST.get('folder_id') )
tools.clone_files_from_clipboard_to_folder(clipboard, folder)
return HttpResponseRedirect( '%s%s' % (request.POST.get('redirect_to', ''), popup_param(request) ) )
class ImageExportForm(forms.Form):
FORMAT_CHOICES = (
('jpg', 'jpg'),
('png', 'png'),
('gif', 'gif'),
#('tif', 'tif'),
)
format = forms.ChoiceField(choices=FORMAT_CHOICES)
crop = forms.BooleanField(required=False)
upscale = forms.BooleanField(required=False)
width = forms.IntegerField()
height = forms.IntegerField()
import filters
@login_required
def export_image(request, image_id):
image = Image.objects.get(id=image_id)
if request.method=='POST':
form = ImageExportForm(request.POST)
if form.is_valid():
resize_filter = filters.ResizeFilter()
im = filters.Image.open(image.file.path)
format = form.cleaned_data['format']
if format=='png':
mimetype='image/jpg'
pil_format = 'PNG'
#elif format=='tif':
# mimetype='image/tiff'
# pil_format = 'TIFF'
elif format=='gif':
mimetype='image/gif'
pil_format = 'GIF'
else:
mimetype='image/jpg'
pil_format = 'JPEG'
im = resize_filter.render(im,
size_x=int(form.cleaned_data['width']),
size_y=int(form.cleaned_data['height']),
crop=form.cleaned_data['crop'],
upscale=form.cleaned_data['upscale']
)
response = HttpResponse(mimetype='%s' % mimetype)
response['Content-Disposition'] = 'attachment; filename=exported_image.%s' % format
im.save(response, pil_format)
return response
else:
form = ImageExportForm(initial={'crop': True, 'width': image.file.width, 'height':image.file.height})
return render_to_response('image_filer/image_export_form.html', {
'form': form,
'image': image
}, context_instance=RequestContext(request))
| {
"content_hash": "f9f8dc8e20502be6a8f703a72bdb694c",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 207,
"avg_line_length": 39.91280653950954,
"alnum_prop": 0.6060212998361552,
"repo_name": "stefanfoulis/django-image-filer",
"id": "bb00df54104dc453981b100432241108ecea7185",
"size": "14648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image_filer/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "81401"
},
{
"name": "PHP",
"bytes": "886"
},
{
"name": "Python",
"bytes": "216851"
}
],
"symlink_target": ""
} |
'''
Copyright 2015 Planet Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import numpy
from osgeo import gdal
from radiometric_normalization import gimage
from radiometric_normalization import pif
def generate(candidate_path, reference_path,
method='filter_alpha', method_options=None,
last_band_alpha=False):
''' Generates psuedo invariant features as a mask
:param str candidate_path: Path to the candidate image
:param str reference_path: Path to the reference image
:param str method: Which psuedo invariant feature generation method to use
:param object method_options: A passthrough argument for any specific
options for the method chosen:
- Not applicable for 'filter_alpha'
- The width of the filter for 'filter_PCA'
:returns: A boolean array in the same coordinate system of the
candidate/reference image (True for the PIF)
'''
if method == 'filter_alpha':
_, c_alpha, c_band_count = _open_image_and_get_info(
candidate_path, last_band_alpha)
_, r_alpha, r_band_count = _open_image_and_get_info(
reference_path, last_band_alpha)
_assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count)
combined_alpha = numpy.logical_and(c_alpha, r_alpha)
pif_mask = pif.generate_alpha_band_pifs(combined_alpha)
elif method == 'filter_PCA':
c_ds, c_alpha, c_band_count = _open_image_and_get_info(
candidate_path, last_band_alpha)
r_ds, r_alpha, r_band_count = _open_image_and_get_info(
reference_path, last_band_alpha)
_assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count)
combined_alpha = numpy.logical_and(c_alpha, r_alpha)
if method_options:
parameters = method_options
else:
parameters = pif.DEFAULT_PCA_OPTIONS
pif_mask = numpy.ones(c_alpha.shape, dtype=numpy.bool)
for band_no in range(1, c_band_count + 1):
logging.info('PIF: Band {}'.format(band_no))
c_band = gimage.read_single_band(c_ds, band_no)
r_band = gimage.read_single_band(r_ds, band_no)
pif_band_mask = pif.generate_pca_pifs(
c_band, r_band, combined_alpha, parameters)
pif_mask = numpy.logical_and(pif_mask, pif_band_mask)
no_total_pixels = c_alpha.size
no_valid_pixels = len(numpy.nonzero(pif_mask)[0])
valid_percent = 100.0 * no_valid_pixels / no_total_pixels
logging.info(
'PIF: Found {} final pifs out of {} pixels ({}%) for all '
'bands'.format(no_valid_pixels, no_total_pixels, valid_percent))
elif method == 'filter_robust':
c_ds, c_alpha, c_band_count = _open_image_and_get_info(
candidate_path, last_band_alpha)
r_ds, r_alpha, r_band_count = _open_image_and_get_info(
reference_path, last_band_alpha)
_assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count)
combined_alpha = numpy.logical_and(c_alpha, r_alpha)
if method_options:
parameters = method_options
else:
parameters = pif.DEFAULT_ROBUST_OPTIONS
pif_mask = numpy.ones(c_alpha.shape, dtype=numpy.bool)
for band_no in range(1, c_band_count + 1):
logging.info('PIF: Band {}'.format(band_no))
c_band = gimage.read_single_band(c_ds, band_no)
r_band = gimage.read_single_band(r_ds, band_no)
pif_band_mask = pif.generate_robust_pifs(
c_band, r_band, combined_alpha, parameters)
pif_mask = numpy.logical_and(pif_mask, pif_band_mask)
no_total_pixels = c_alpha.size
no_valid_pixels = len(numpy.nonzero(pif_mask)[0])
valid_percent = 100.0 * no_valid_pixels / no_total_pixels
logging.info(
'PIF: Found {} final pifs out of {} pixels ({}%) for all '
'bands'.format(no_valid_pixels, no_total_pixels, valid_percent))
else:
raise NotImplementedError('Only "filter_alpha", "filter_PCA" and '
'"filter_robust" methods are implemented.')
return pif_mask
def _open_image_and_get_info(path, last_band_alpha):
gdal_ds = gdal.Open(path)
alpha_band, band_count = gimage.read_alpha_and_band_count(
gdal_ds, last_band_alpha=last_band_alpha)
return gdal_ds, alpha_band, band_count
def _assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count):
assert r_band_count == c_band_count
assert r_alpha.shape == c_alpha.shape
| {
"content_hash": "980b16c4cae08884617014689916f17f",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 41.90983606557377,
"alnum_prop": 0.6377860355955408,
"repo_name": "planetlabs/radiometric_normalization",
"id": "a4e971769d770b4c9e393dce212d1f22c86e641a",
"size": "5113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radiometric_normalization/wrappers/pif_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "106911"
},
{
"name": "Shell",
"bytes": "955"
}
],
"symlink_target": ""
} |
import pickle
import os
import sys
import inspect
almost_current = os.path.abspath(inspect.getfile(inspect.currentframe()))
currentdir = os.path.dirname(almost_current)
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from models.tensorflow_model import TFWord2Vec # noqa
from models.gensim_model import Gensim # noqa
def train_both_models_with_different_emb_sizes(language,
window_size,
emb_list,
epochs_to_train,
path_to_corpus,
experiment_name):
"""
Function to train the tensorflow model and the gensim model
using diffent embedding sizes. After the training is done
a pickle file is created containing the names of the models and
the path for the respectives pickle files.
:type language: str
:type window_size: int
:type emb_list: list of ints
:type epochs_to_train: int
:type path_to_corpus: str
:type experiment_name: str
"""
size = len(emb_list)
pickles = []
names = []
for i, emb in enumerate(emb_list):
print("{0}/{1}: Training word embeddings of size {2}".format(i + 1,
size,
emb))
print("\n====Training the official tf model====\n")
tf_model = TFWord2Vec(language,
'tf',
window_size,
emb,
epochs_to_train)
tf_model.train(path_to_corpus)
pickles.append(tf_model.get_pickle())
names.append(tf_model.short_name)
print("\n====Training the Gensim model====\n")
g_model = Gensim(language,
'g',
window_size,
emb)
g_model.train(path_to_corpus)
pickles.append(g_model.get_pickle())
names.append(g_model.short_name)
new_dict = {'names': names,
'pickles': pickles}
pickle_folder = os.path.join(os.getcwd(), "pickles")
if not os.path.exists(pickle_folder):
os.mkdir("pickles")
file_name = os.path.join("pickles", experiment_name + ".p")
with open(file_name, 'wb') as pkl_file:
pickle.dump(new_dict, pkl_file)
def train_both_models_with_different_window_sizes(language,
emb_size,
window_list,
epochs_to_train,
path_to_corpus,
experiment_name):
"""
Function to train the tensorflow model and the gensim model
using diffent window sizes. After the training is done
a pickle file is created containing the names of the models and
the path for the respectives pickle files.
:type language: str
:type emb_size: int
:type window_list: list of ints
:type epochs_to_train: int
:type path_to_corpus: str
:type experiment_name: str
"""
size = len(window_list)
pickles = []
names = []
for i, window_size in enumerate(window_list):
print("{0}/{1}: window size = {2}".format(i + 1, size, window_size))
sufix = "W" + str(window_size)
print("\n====Training the official tf model====\n")
tf_model = TFWord2Vec(language,
'tf' + sufix,
window_size,
emb_size,
epochs_to_train)
tf_model.train(path_to_corpus)
pickles.append(tf_model.get_pickle())
names.append(tf_model.short_name)
print("\n====Training the Gensim model====\n")
g_model = Gensim(language,
'g' + sufix,
window_size,
emb_size)
g_model.train(path_to_corpus)
pickles.append(g_model.get_pickle())
names.append(g_model.short_name)
new_dict = {'names': names,
'pickles': pickles}
pickle_folder = os.path.join(os.getcwd(), "pickles")
if not os.path.exists(pickle_folder):
os.mkdir("pickles")
file_name = os.path.join("pickles", experiment_name + ".p")
with open(file_name, 'wb') as pkl_file:
pickle.dump(new_dict, pkl_file)
| {
"content_hash": "81af62f58f282feb22d0c29764c7e8a5",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 76,
"avg_line_length": 35.92248062015504,
"alnum_prop": 0.5038843331894691,
"repo_name": "LIAMF-USP/word2vec-TF",
"id": "cc36c7eeb884f8512542553174430ba8a8021171",
"size": "4634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/basic_experiment/train_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "15463"
},
{
"name": "Python",
"bytes": "79825"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
} |
"""End to end tests that run flows on actual clients."""
from grr.endtoend_tests import administrative
from grr.endtoend_tests import artifacts
from grr.endtoend_tests import base
from grr.endtoend_tests import collectors
from grr.endtoend_tests import discovery
from grr.endtoend_tests import file_finder
from grr.endtoend_tests import filesystem
from grr.endtoend_tests import fingerprint
from grr.endtoend_tests import grep
from grr.endtoend_tests import limits
from grr.endtoend_tests import memory
from grr.endtoend_tests import network
from grr.endtoend_tests import processes
from grr.endtoend_tests import registry
from grr.endtoend_tests import transfer
| {
"content_hash": "ac4141a1bfaff4946e3b4165b8939287",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 56,
"avg_line_length": 35.05263157894737,
"alnum_prop": 0.8333333333333334,
"repo_name": "pchaigno/grreat",
"id": "b04e82a6d16a12ac917ab541110ab425c986acec",
"size": "688",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "endtoend_tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36573"
},
{
"name": "JavaScript",
"bytes": "831111"
},
{
"name": "Makefile",
"bytes": "5482"
},
{
"name": "Perl",
"bytes": "483"
},
{
"name": "Python",
"bytes": "4517593"
},
{
"name": "Shell",
"bytes": "31210"
}
],
"symlink_target": ""
} |
import unittest
from quickbooks.objects.customer import Customer
class CustomerTests(unittest.TestCase):
def test_unicode(self):
customer = Customer()
customer.DisplayName = "test"
self.assertEquals(unicode(customer), "test")
def test_to_ref(self):
customer = Customer()
customer.DisplayName = "test"
customer.Id = 100
ref = customer.to_ref()
self.assertEquals(ref.name, "test")
self.assertEquals(ref.type, "Customer")
self.assertEquals(ref.value, 100)
| {
"content_hash": "91772922ee0aa5e6beeb1187bb0825e8",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 52,
"avg_line_length": 24.954545454545453,
"alnum_prop": 0.6411657559198543,
"repo_name": "ferdiaz/python-quickbooks",
"id": "f3ab4d87edaca370884faef36f122cd7aa52ca60",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/objects/test_customer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "92199"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import unicode_literals
from io import open
from glob import glob
import sys
import os
import os.path
import subprocess
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
class TestProxy(object):
def __init__(self, test_exe_path, use_valgrind=False):
self.test_exe_path = os.path.normpath(os.path.abspath(test_exe_path))
self.use_valgrind = use_valgrind
def run(self, options):
if self.use_valgrind:
cmd = VALGRIND_CMD.split()
else:
cmd = []
cmd.extend([self.test_exe_path, '--test-auto'] + options)
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except:
print(cmd)
raise
stdout = process.communicate()[0]
if process.returncode:
return False, stdout
return True, stdout
def runAllTests(exe_path, use_valgrind=False):
test_proxy = TestProxy(exe_path, use_valgrind=use_valgrind)
status, test_names = test_proxy.run(['--list-tests'])
if not status:
print("Failed to obtain unit tests list:\n" + test_names, file=sys.stderr)
return 1
test_names = [name.strip() for name in test_names.decode('utf-8').strip().split('\n')]
failures = []
for name in test_names:
print('TESTING %s:' % name, end=' ')
succeed, result = test_proxy.run(['--test', name])
if succeed:
print('OK')
else:
failures.append((name, result))
print('FAILED')
failed_count = len(failures)
pass_count = len(test_names) - failed_count
if failed_count:
print()
for name, result in failures:
print(result)
print('%d/%d tests passed (%d failure(s))' % ( pass_count, len(test_names), failed_count))
return 1
else:
print('All %d tests passed' % len(test_names))
return 0
def main():
from optparse import OptionParser
parser = OptionParser(usage="%prog [options] <path to test_lib_json.exe>")
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Must provides at least path to test_lib_json executable.')
sys.exit(1)
exit_code = runAllTests(args[0], use_valgrind=options.valgrind)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| {
"content_hash": "18b14fd039501959a70558ec6edabe51",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 109,
"avg_line_length": 33.936708860759495,
"alnum_prop": 0.6068631107795599,
"repo_name": "Huawei/eSDK_eLTE_SDK_Windows",
"id": "08850ec1a1da42eb98d2569ffd1a6c26b62454ab",
"size": "2898",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "third_party/open_src/jsoncpp-0.10.5/source/test/rununittests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "45600"
},
{
"name": "C",
"bytes": "1941730"
},
{
"name": "C++",
"bytes": "9810262"
},
{
"name": "CMake",
"bytes": "3613"
},
{
"name": "Makefile",
"bytes": "209539"
},
{
"name": "Objective-C",
"bytes": "250083"
},
{
"name": "Protocol Buffer",
"bytes": "9363"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/spices/shared_spice_giggledust.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "c2e291a6ebea029387b4c2f4ccc856a9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6970684039087948,
"repo_name": "obi-two/Rebelion",
"id": "4bac8e0c8cc5f8cb3c5965ea18b6dc289d8c9a02",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/spices/shared_spice_giggledust.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.six import BytesIO
import django
try:
from django.utils.deconstruct import deconstructible
except ImportError: # Django 1.7+ migrations
deconstructible = lambda klass, *args, **kwargs: klass
# Storage only accepts `max_length` in 1.8+
if django.VERSION >= (1, 8):
from django.core.files.storage import Storage, FileSystemStorage
else:
from django.core.files.storage import Storage as DjangoStorage
from django.core.files.storage import FileSystemStorage as DjangoFileSystemStorage
class StorageMixin(object):
def save(self, name, content, max_length=None):
return super(StorageMixin, self).save(name, content)
def get_available_name(self, name, max_length=None):
return super(StorageMixin, self).get_available_name(name)
class Storage(DjangoStorage, StorageMixin):
pass
class FileSystemStorage(DjangoFileSystemStorage, StorageMixin):
pass
| {
"content_hash": "0627b5cf25dab46e77ce22cf57e5ac95",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 36.107142857142854,
"alnum_prop": 0.7359050445103857,
"repo_name": "torchbox/django-storages",
"id": "8bb898f15596ef9bdd5509841721304d1ca76dd4",
"size": "1011",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "storages/compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "88415"
}
],
"symlink_target": ""
} |
import rospy
from race.msg import drive_values
from race.msg import drive_param
from std_msgs.msg import Bool
pub = rospy.Publisher('drive_pwm', drive_values, queue_size=10)
em_pub = rospy.Publisher('eStop', Bool, queue_size=10)
# function to map from one range to another, similar to arduino
def arduino_map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
# callback function on occurance of drive parameters(angle & velocity)
def callback(data):
velocity = data.velocity
angle = data.angle
print("Velocity: ",velocity,"Angle: ",angle)
# Do the computation
pwm1 = arduino_map(velocity,-100,100,6554,13108);
pwm2 = arduino_map(angle,-100,100,6554,13108);
msg = drive_values()
msg.pwm_drive = pwm1
msg.pwm_angle = pwm2
pub.publish(msg)
def talker():
rospy.init_node('serial_talker', anonymous=True)
em_pub.publish(False)
rospy.Subscriber("drive_parameters", drive_param, callback)
rospy.spin()
if __name__ == '__main__':
print("Serial talker initialized")
talker()
| {
"content_hash": "8cf226817bd60fd2543b67d34e23fa94",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 30.02857142857143,
"alnum_prop": 0.7117031398667936,
"repo_name": "rdelfin/cyberphysical-robot-car",
"id": "d99f19290c219225e275d97b6a5e0dfec9200dd7",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "race/race/src/talker.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2210"
},
{
"name": "Python",
"bytes": "9011"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <d.linguaglossa@mseclab.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import time
import subprocess
import signal
import shlex
from subprocess import PIPE
from .pjf_executor import PJFExecutor
from .pjf_testcase_server import PJFTestcaseServer
from .errors import PJFMissingArgument ,PJFBaseException, PJFProcessExecutionError
class PJFProcessMonitor(PJFTestcaseServer, PJFExecutor):
""" Represent a class used to start and monitor a single process """
def __init__(self, configuration):
"""
Init the ProcessMonitor server
"""
self.logger = self.init_logger()
if ["debug", "ports", "process_to_monitor"] not in configuration:
raise PJFMissingArgument()
self.config = configuration
self.process = None
self.finished = False
self.testcase_count = 0
if self.config.debug:
print("[\033[92mINFO\033[0m] Starting process monitoring...")
print("[\033[92mINFO\033[0m] Starting Testcase Server ({0})...".format(
self.config.ports["servers"]["TCASE_PORT"]
))
super(PJFProcessMonitor, self).__init__(configuration)
self.logger.debug("[{0}] - PJFProcessMonitor successfully completed".format(time.strftime("%H:%M:%S")))
def shutdown(self, *args):
"""
Shutdown the running process and the monitor
"""
try:
self._shutdown()
if self.process:
self.process.wait()
self.process.stdout.close()
self.process.stdin.close()
self.process.stderr.close()
self.finished = True
self.send_testcase('', '127.0.0.1', self.config.ports["servers"]["TCASE_PORT"])
self.logger.debug("[{0}] - PJFProcessMonitor successfully completed".format(time.strftime("%H:%M:%S")))
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def save_testcase(self, testcase):
"""
Save all testcases collected during monitoring
"""
try:
if self.config.debug:
print("[\033[92mINFO\033[0m] Saving testcase...")
dir_name = "testcase_{0}".format(os.path.basename(shlex.split(self.config.process_to_monitor)[0]))
try:
os.mkdir(dir_name)
except OSError:
pass
for test in testcase:
with open("{0}/testcase_{1}.json".format(dir_name, self.testcase_count), "wb") as t:
t.write(test)
t.close()
self.testcase_count += 1
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def run_and_monitor(self):
"""
Run command once and check exit code
"""
signal.signal(signal.SIGINT, self.shutdown)
self.spawn(self.config.process_to_monitor, timeout=0)
return self._is_sigsegv(self.return_code)
def start_monitor(self, standalone=True):
"""
Run command in a loop and check exit status plus restart process when needed
"""
try:
self.start()
cmdline = shlex.split(self.config.process_to_monitor)
if standalone:
signal.signal(signal.SIGINT, self.shutdown)
self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE)
while self.process and not self.finished:
self.process.wait()
if self._is_sigsegv(self.process.returncode):
if self.config.debug:
print("[\033[92mINFO\033[0m] Process crashed with \033[91mSIGSEGV\033[0m, waiting for testcase...")
while not self.got_testcase():
time.sleep(1)
self.save_testcase(self.testcase[-10:]) # just take last 10 testcases
if self.process:
self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
self.shutdown()
self.process = False
self.got_testcase = lambda: True
raise PJFProcessExecutionError("Binary <%s> does not exist" % cmdline[0])
except Exception as e:
raise PJFBaseException("Unknown error please send log to author")
def _is_sigsegv(self, return_code):
"""
Check return code against SIGSEGV
"""
if return_code == -signal.SIGSEGV:
return True
return False
| {
"content_hash": "ff7fca0cbcd7b703b070abdcb6afaa0e",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 123,
"avg_line_length": 42.19259259259259,
"alnum_prop": 0.6234199438202247,
"repo_name": "mseclab/PyJFuzz",
"id": "13205ed6d7d32390b83079e07c0adc6c2888bc8b",
"size": "5696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyjfuzz/core/pjf_process_monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "20812"
},
{
"name": "JavaScript",
"bytes": "28825"
},
{
"name": "Python",
"bytes": "196691"
}
],
"symlink_target": ""
} |
"""
Utility functions for supporting the XML Schema Datatypes hierarchy
"""
from rdflib import XSD
XSD_DTs = set(
(XSD.integer, XSD.decimal, XSD.float, XSD.double, XSD.string,
XSD.boolean, XSD.dateTime, XSD.nonPositiveInteger, XSD.negativeInteger,
XSD.long, XSD.int, XSD.short, XSD.byte, XSD.nonNegativeInteger,
XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte,
XSD.positiveInteger, XSD.date))
_sub_types = {
XSD.integer: [
XSD.nonPositiveInteger, XSD.negativeInteger, XSD.long, XSD.int,
XSD.short, XSD.byte, XSD.nonNegativeInteger, XSD.positiveInteger,
XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte],
}
_super_types = {}
for superdt in XSD_DTs:
for subdt in _sub_types.get(superdt, []):
_super_types[subdt] = superdt
# we only care about float, double, integer, decimal
_typePromotionMap = {
XSD.float: {XSD.integer: XSD.float,
XSD.decimal: XSD.float,
XSD.double: XSD.double},
XSD.double: {XSD.integer: XSD.double,
XSD.float: XSD.double,
XSD.decimal: XSD.double},
XSD.decimal: {XSD.integer: XSD.decimal,
XSD.float: XSD.float,
XSD.double: XSD.double},
XSD.integer: {XSD.decimal: XSD.decimal,
XSD.float: XSD.float,
XSD.double: XSD.double}
}
def type_promotion(t1, t2):
if t2 == None:
return t1
t1 = _super_types.get(t1, t1)
t2 = _super_types.get(t2, t2)
if t1 == t2:
return t1 # matching super-types
try:
return _typePromotionMap[t1][t2]
except KeyError:
raise TypeError(
'Operators cannot combine datatypes %s and %s' % (t1, t2))
| {
"content_hash": "56671dcf2291c9ad944730aefc4352bb",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 31.12280701754386,
"alnum_prop": 0.62119503945885,
"repo_name": "dbs/rdflib",
"id": "be2351e203f8499c7e28755b6e272691f8ec6591",
"size": "1774",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "rdflib/plugins/sparql/datatypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "145"
},
{
"name": "HTML",
"bytes": "120202"
},
{
"name": "Jupyter Notebook",
"bytes": "283784"
},
{
"name": "Python",
"bytes": "1470218"
},
{
"name": "Ruby",
"bytes": "28544"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
"""
This script evaluates trained models that have been saved to the filesystem.
See mnist_tutorial_picklable.py for instructions.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import tensorflow as tf
from cleverhans.compat import flags
from cleverhans.dataset import MNIST
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils import set_log_level
from cleverhans.utils_tf import model_eval, silence
from cleverhans.serial import load
silence()
FLAGS = flags.FLAGS
def evaluate_model(
filepath,
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
batch_size=128,
testing=False,
num_threads=None,
):
"""
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST test data
mnist = MNIST(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
x_train, y_train = mnist.get_set("train")
x_test, y_test = mnist.get_set("test")
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eval_params = {"batch_size": batch_size}
fgsm_params = {"eps": 0.3, "clip_min": 0.0, "clip_max": 1.0}
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
if is_adv is None:
report_text = None
elif is_adv:
report_text = "adversarial"
else:
report_text = "legitimate"
if report_text:
print("Test accuracy on %s examples: %0.4f" % (report_text, acc))
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
preds = model.get_logits(x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds, x_test, y_test, "train_clean_train_clean_eval", False)
do_eval(preds_adv, x_test, y_test, "clean_train_adv_eval", True)
def main(argv=None):
_, filepath = argv
evaluate_model(filepath=filepath)
if __name__ == "__main__":
tf.app.run()
| {
"content_hash": "4cf5998b00897957e3bc49cbacc39568",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 29.236842105263158,
"alnum_prop": 0.660966096609661,
"repo_name": "cleverhans-lab/cleverhans",
"id": "b9de18e38f3f1c42bb5a38791ffcb4ee51b6549d",
"size": "3333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleverhans_v3.1.0/cleverhans_tutorials/evaluate_pickled_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "242"
},
{
"name": "HTML",
"bytes": "64"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "1016809"
},
{
"name": "Shell",
"bytes": "2831"
}
],
"symlink_target": ""
} |
"""
https://leetcode.com/problems/majority-element/
https://leetcode.com/submissions/detail/109296614/
"""
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dic = dict()
for value in nums:
if not value in dic:
dic[value] = 0
dic[value] = dic.get(value) + 1
for value in dic.keys():
if dic[value] > len(nums) / 2:
return value
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.majorityElement([1]), 1)
self.assertEqual(solution.majorityElement([3, 2, 3]), 3)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7edcf4535f1213d66ad82d73d86a3cee",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 64,
"avg_line_length": 22.285714285714285,
"alnum_prop": 0.5512820512820513,
"repo_name": "vivaxy/algorithms",
"id": "81254bb8f5aeddf6db1858eab3f5fcc1bff8e48d",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problems/majority_element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "130225"
},
{
"name": "Python",
"bytes": "272982"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
from rspecs.commons import DEFAULT_XMLNS, DEFAULT_XS, DEFAULT_SCHEMA_LOCATION,\
DSL_PREFIX, PROTOGENI_PREFIX
from rspecs.commons_tn import DEFAULT_SHARED_VLAN
from rspecs.formatter_base import FormatterBase
from lxml import etree
DEFAULT_AD_SCHEMA_LOCATION = DEFAULT_SCHEMA_LOCATION
DEFAULT_AD_SCHEMA_LOCATION += DSL_PREFIX + "3/ad.xsd "
DEFAULT_AD_SCHEMA_LOCATION += DSL_PREFIX + "ext/shared-vlan/1/ad.xsd "
DEFAULT_AD_SCHEMA_LOCATION += PROTOGENI_PREFIX
DEFAULT_AD_SCHEMA_LOCATION += PROTOGENI_PREFIX + "/ad.xsd "
class TNRMv3AdvertisementFormatter(FormatterBase):
def __init__(self, xmlns=DEFAULT_XMLNS, xs=DEFAULT_XS,
sharedvlan=DEFAULT_SHARED_VLAN,
protogeni=PROTOGENI_PREFIX,
schema_location=DEFAULT_AD_SCHEMA_LOCATION):
ns_ = {"sharedvlan": "%s" % (sharedvlan),
"protogeni": "%s" % (protogeni)}
super(TNRMv3AdvertisementFormatter, self).__init__(
"advertisement", schema_location,
ns_, xmlns, xs)
self.__sv = sharedvlan
self.__proto = protogeni
def add_node(self, rspec, node, inner_call=True):
n = etree.SubElement(rspec, "{%s}node" % (self.xmlns))
n.attrib["component_id"] = node.get("component_id")
n.attrib["component_manager_id"] = node.get("component_manager_id")
n.attrib["exclusive"] = node.get("exclusive")
if inner_call and node.get("component_manager_uuid") is not None:
n.attrib["{%s}component_manager_uuid" % (self.__proto)] =\
node.get("component_manager_uuid")
if node.get("sliver_type_name") is not None:
s = etree.SubElement(n, "{%s}sliver_type" % (self.xmlns))
s.attrib["name"] = node.get("sliver_type_name")
for i in node.get("interfaces"):
interface = etree.SubElement(n, "{%s}interface" % (self.xmlns))
interface.attrib["component_id"] = i.get("component_id")
for v in i.get("vlan"):
available = etree.SubElement(interface,
"{%s}available" % (self.__sv))
if v.get("tag") is not None:
available.attrib["localTag"] = v.get("tag")
if v.get("name") is not None:
available.attrib["name"] = v.get("name")
if v.get("description") is not None:
available.attrib["description"] = v.get("description")
def node(self, node, inner_call=True):
self.add_node(self.rspec, node, inner_call)
def add_link(self, rspec, link, inner_call=True):
l = etree.SubElement(rspec, "{%s}link" % (self.xmlns))
l.attrib["component_id"] = link.get("component_id")
if inner_call and link.get("component_manager_uuid") is not None:
l.attrib["{%s}component_manager_uuid" % (self.__proto)] =\
link.get("component_manager_uuid")
m = etree.SubElement(l, "{%s}component_manager" % (self.xmlns))
m.attrib["name"] = link.get("component_manager_name")
for i in link.get("interface_ref"):
interface = etree.SubElement(l, "{%s}interface_ref" % (self.xmlns))
interface.attrib["component_id"] = i.get("component_id")
for p in link.get("property"):
prop = etree.SubElement(l, "{%s}property" % (self.xmlns))
prop.attrib["source_id"] = p.get("source_id")
prop.attrib["dest_id"] = p.get("dest_id")
prop.attrib["capacity"] = p.get("capacity")
def link(self, link, inner_call=True):
self.add_link(self.rspec, link, inner_call)
| {
"content_hash": "2820ca1ce0beb1341abaa395c47ba5bb",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 45.6,
"alnum_prop": 0.5893640350877193,
"repo_name": "ict-felix/stack",
"id": "3544c73f14c65e59510cac6cf63015e4b84ac5ea",
"size": "3648",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/resource/utilities/rspecs/tnrm/advertisement_formatter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
} |
import unittest
from zang.inboundxml.elements.refer import Refer
from zang.domain.enums.http_method import HttpMethod
from zang.inboundxml.elements.sip import Sip
class TestRefer(unittest.TestCase):
def test_init_with_required_values(self):
expected = '<Refer></Refer>'
assert Refer().xml == expected
def test_init_with_arguments(self):
text = 'sip:username@example.com'
refer = Refer(address=text)
expected = '<Refer>%s</Refer>' % text
assert refer.xml == expected
def test_init_add_element(self):
text = 'username@example.com'
sip = Sip(text)
refer = Refer()
refer.addElement(sip)
expected = '<Refer><Sip>%s</Sip></Refer>' % text
assert refer.xml == expected
def test_init_remove_element_at_index(self):
text = 'Hello from Avaya CPaaS!'
sip = Sip(text)
refer = Refer()
refer.addElement(sip)
expected = '<Refer><Sip>%s</Sip></Refer>' % text
assert refer.xml == expected
refer.removeElementAtIndex(0)
expected = '<Refer></Refer>'
assert refer.xml == expected
def test_remove_element_at_out_of_range_index(self):
text = 'Hello from Avaya CPaaS!'
sip = Sip(text)
refer = Refer()
refer.addElement(sip)
index = len(refer._content)
self.assertRaises(
IndexError, lambda: refer.removeElementAtIndex(index))
def test_init_with_optional_attributes(self):
method = HttpMethod.GET
refer = Refer(method=method)
expected = '<Refer method="%s"></Refer>' % (method.value)
assert refer.xml == expected
def test_init_with_unsupported_attributes(self):
self.assertRaises(TypeError, lambda: Refer(foo='bar'))
def test_with_update_attributes(self):
refer = Refer()
timeout = 0
refer.timeout = 0
expected = '<Refer timeout="%s"></Refer>' % (timeout)
assert refer.xml == expected
def test_udefinded_method_with_primitive_type(self):
self.assertRaises(TypeError, lambda: Refer().addElement(0.5))
def test_udefinded_method_with_base_node(self):
self.assertRaises(AttributeError, lambda: Refer().url)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "dbc8053815f2531c9b8ef9059139705f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 69,
"avg_line_length": 32.42253521126761,
"alnum_prop": 0.6185925282363163,
"repo_name": "zang-cloud/zang-python",
"id": "b6738d7a05ade3803c8b518f9f81cffdd3d373fc",
"size": "2302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/inboundxml/test_refer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "353373"
}
],
"symlink_target": ""
} |
from mock import Mock
from allura.model import VotableArtifact
class TestVotableArtifact(object):
def setUp(self):
self.user1 = Mock()
self.user1.username = 'test-user'
self.user2 = Mock()
self.user2.username = 'user2'
def test_vote_up(self):
vote = VotableArtifact()
vote.vote_up(self.user1)
assert vote.votes_up == 1
assert vote.votes_up_users == [self.user1.username]
vote.vote_up(self.user2)
assert vote.votes_up == 2
assert vote.votes_up_users == [self.user1.username,
self.user2.username]
vote.vote_up(self.user1) # unvote user1
assert vote.votes_up == 1
assert vote.votes_up_users == [self.user2.username]
assert vote.votes_down == 0, 'vote_down must be 0 if we voted up only'
assert len(vote.votes_down_users) == 0
def test_vote_down(self):
vote = VotableArtifact()
vote.vote_down(self.user1)
assert vote.votes_down == 1
assert vote.votes_down_users == [self.user1.username]
vote.vote_down(self.user2)
assert vote.votes_down == 2
assert vote.votes_down_users == [self.user1.username,
self.user2.username]
vote.vote_down(self.user1) # unvote user1
assert vote.votes_down == 1
assert vote.votes_down_users == [self.user2.username]
assert vote.votes_up == 0, 'vote_up must be 0 if we voted down only'
assert len(vote.votes_up_users) == 0
def test_change_vote(self):
vote = VotableArtifact()
vote.vote_up(self.user1)
vote.vote_down(self.user1)
assert vote.votes_down == 1
assert vote.votes_down_users == [self.user1.username]
assert vote.votes_up == 0
assert len(vote.votes_up_users) == 0
| {
"content_hash": "49fdf58674ae81f1d2a91461504e7094",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 31.433333333333334,
"alnum_prop": 0.5885471898197243,
"repo_name": "Bitergia/allura",
"id": "cfb3bf84a0dfe17218a3c7c632cd1edeaea1d61c",
"size": "1886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Allura/allura/tests/unit/test_mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2985957"
},
{
"name": "JavaScript",
"bytes": "647110"
},
{
"name": "Perl",
"bytes": "184"
},
{
"name": "Puppet",
"bytes": "2677"
},
{
"name": "Python",
"bytes": "1990317"
},
{
"name": "Ruby",
"bytes": "4134"
},
{
"name": "Shell",
"bytes": "6672"
}
],
"symlink_target": ""
} |
def run(name, fields="+l", exclude=None, ctags="/usr/local/bin/ctags",
creates='tags'):
from os.path import join
if fields is None:
fields = []
elif isinstance(fields, str):
fields = [fields]
fields = " --fields=".join([""] + fields)
if exclude is None:
exclude = []
elif isinstance(exclude, str):
exclude = [exclude]
exclude = " --exclude=".join([""] + exclude)
cmd = "{ctags} -R {fields} {exclude} .".format(ctags=ctags, fields=fields,
exclude=exclude)
return __states__['cmd.run'](
name=cmd, cwd=name, creates=join(name, creates))
| {
"content_hash": "d6c6e119a197f9df5fbebca393fc332a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 33.6,
"alnum_prop": 0.5357142857142857,
"repo_name": "mdavezac/pepper",
"id": "47afac521d4894723bb6514e0aa98b9b3a7bfd4f",
"size": "672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_states/ctags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "12789"
},
{
"name": "Lua",
"bytes": "1252"
},
{
"name": "Python",
"bytes": "31883"
},
{
"name": "SaltStack",
"bytes": "25941"
},
{
"name": "Shell",
"bytes": "3067"
},
{
"name": "VimL",
"bytes": "1392"
}
],
"symlink_target": ""
} |
import hashlib
import os
import platform
import re
import signal
import sys
import tempfile
import time
try:
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "lib")))
import psutil
except (ImportError, NotImplementedError):
class psutil(object):
class Process(object):
def __init__(self, pid):
raise RuntimeError("psutil not available for this platform")
_platform = platform.system()
import subprocess
if _platform == "Windows":
POPEN_FLAGS = subprocess.CREATE_NEW_PROCESS_GROUP
prof_timer = time.clock
else:
POPEN_FLAGS = 0
prof_timer = time.time
PATH_DBG = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lib", "dbgmin"))
CB_PASS = 0
CB_FAIL = 1
CB_HANG = 2
CB_ERROR = 3
_delete_queue = []
DEFAULT_TIMEOUT = 60
class _Classification(object):
"""
Internal class used for classification of FuzzResults
"""
SUPPORTED_CLS = set(["EXCESS_MEMORY_USAGE", "EXPLOITABLE", "NOT_AN_EXCEPTION",
"PROBABLY_EXPLOITABLE", "PROBABLY_NOT_EXPLOITABLE", "TIMEOUT", "UNKNOWN"])
def __init__(self, description):
if isinstance(description, _Classification):
description = description.description
else:
assert description in self.SUPPORTED_CLS, "Unsupported classification: %s" % description
self.description = description
def __str__(self):
return self.description
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __bool__(self):
return self.description != "NOT_AN_EXCEPTION"
EXCESS_MEMORY_USAGE = _Classification("EXCESS_MEMORY_USAGE")
EXPLOITABLE = _Classification("EXPLOITABLE")
NOT_AN_EXCEPTION = _Classification("NOT_AN_EXCEPTION")
PROBABLY_EXPLOITABLE = _Classification("PROBABLY_EXPLOITABLE")
PROBABLY_NOT_EXPLOITABLE = _Classification("PROBABLY_NOT_EXPLOITABLE")
TIMEOUT = _Classification("TIMEOUT")
UNKNOWN = _Classification("UNKNOWN")
class TargetPid(object):
"""
This class is a placeholder for the PID of the target process needed by
a callback function in :func:`~run` or one of the similar functions
provided in this module. If an instance of this class is given in the
list of *callback_args*, it will be replaced with the pid (as an
integer) before being passed to the callback.
"""
pass
def run(target_cmd, stdin=None, callback=None, callback_args=None, env=None,
timeout=DEFAULT_TIMEOUT, memory_limit=None, idle_limit=None):
"""
This function runs the given target command in a sub-process, and
returns the results. The *target_cmd* parameter should be a list
of command line arguments, with the first argument being the target
executable. The expected duration of the run is determined automatically.
:func:`run` returns a :class:`FuzzResult`.
Availability: Unix, Windows.
"""
fail_class = NOT_AN_EXCEPTION
if env is None:
env = dict(os.environ)
if _platform in ("Linux", "QNX"):
if "ASAN_OPTIONS" not in env:
# If the project does not set ASAN_OPTIONS use these as defaults
env["ASAN_OPTIONS"] = "alloc_dealloc_mismatch=1 " \
"allocator_may_return_null=1 " \
"allow_user_poisoning=0 " \
"check_initialization_order=1 " \
"check_malloc_usable_size=0 " \
"detect_stack_use_after_return=1 " \
"disable_core=1 " \
"exitcode=139 " \
"handle_segv=1 " \
"strict_init_order=1 " \
"strict_memcmp=1"
env["G_DEBUG"] = "gc-friendly" # https://developer.gnome.org/glib/unstable/glib-running.html
env["G_SLICE"] = "always-malloc" # https://developer.gnome.org/glib/unstable/glib-running.html#G_SLICE
env["LIBC_FATAL_STDERR_"] = "1"
with tempfile.TemporaryFile(mode="w+t") as f:
p = subprocess.Popen(target_cmd, stdin=stdin, stdout=f, stderr=f, env=env)
try:
cb_res = _call_callback(callback, callback_args, p.pid)
if cb_res == CB_ERROR:
raise RuntimeError("callback() returned error")
target_mon = TargetMonitor(p.pid, idle_limit=idle_limit,
memory_limit=memory_limit, time_limit=timeout)
while p.poll() is None:
if target_mon.check_memory():
fail_class = EXCESS_MEMORY_USAGE
break
if target_mon.check_idle():
break
if target_mon.check_timeout():
fail_class = TIMEOUT
break
time.sleep(0.01)
finally:
if p.poll() is None:
if _platform == "Windows":
with open(os.devnull, "w") as fp:
subprocess.call(["taskkill", "/pid", str(p.pid), "/f"], stdout=fp, stderr=fp)
else:
os.kill(p.pid, signal.SIGKILL)
exit_code = p.wait()
f.seek(0, os.SEEK_SET)
stdout = f.read()
if exit_code != 0:
if _platform in ("Linux", "QNX"):
if exit_code < 0:
sig = exit_code * -1
elif exit_code > 128:
sig = exit_code - 128
else:
sig = 0
if sig not in (signal.SIGINT, signal.SIGKILL):
fail_class = UNKNOWN
elif exit_code in (126, 127):
raise RuntimeError("Process exited with code %d.\n%s" % (exit_code, stdout))
elif _platform == "Windows":
fail_class = UNKNOWN
if cb_res == CB_HANG:
fail_class = TIMEOUT
elif cb_res == CB_FAIL:
fail_class = UNKNOWN
return FuzzResult(classification=fail_class, text=stdout, exit_code=exit_code)
def _call_callback(callback, cb_args, pid):
if callback is not None:
if cb_args is None:
cb_args = []
else:
cb_args = list(cb_args)
for i, a in enumerate(cb_args):
if isinstance(a, TargetPid):
cb_args[i] = pid
return callback(*cb_args)
else:
return CB_PASS
def _limit_output_length(output):
if len(output) > FuzzResult.MAX_TEXT:
# truncate at the beginning to get the most info...
truncmsg = "\n*** TRUNCATED, orig %d bytes ***\n" % len(output)
output = "%s%s" % (output[-(FuzzResult.MAX_TEXT - len(truncmsg)):], truncmsg)
return output
_RE_PROCESS_EXP = re.compile(r'''^CLASSIFICATION:
(?P<classification>.*) |
^EXCEPTION_TYPE:
(?P<exc_type>[A-Z_]+) |
^STACK_FRAME:
((?P<module>[^!]+)!)?
(?P<symbol>[^+]+)
(\+0x(?P<offset>[0-9a-f]+))? |
^(?P<traceback>
\s*Traceback\s\(most\srecent\scall\slast\):$ # traceback in exploitable
\s+File\s"[^"]+?exploitable/exploitable\.py".*$\s+.*$ # first frame in exploitable.py
(\s+File.*$\s+.*$)* # one or more following frames
\s*.*$) | # final line (exception desc)
^SHORT_DESCRIPTION:(?P<short_desc>[A-Za-z]+) |
^Last\ event:\ .+?code\ (?P<exception>[0-9a-fA-F]+)
''', re.MULTILINE|re.VERBOSE)
def process_exploitable_output(stdout):
'''This function is used to process the output from CERT Exploitable and !exploitable'''
classification = None
exception = 0
backtrace = []
short_desc = None
exc_type = None
# Things noticed along the way.
stack_cookie = None
exploitable_exception = None
# Look through the lines of debugger output for stuff.
for m in _RE_PROCESS_EXP.finditer(stdout):
if m.group('classification'):
classification = _Classification(m.group('classification'))
elif m.group('exc_type'):
exc_type = m.group('exc_type')
elif m.group('exception'):
exception = int(m.group('exception'), 16)
exploitable_exception = (exception in [0xC0000409])
elif m.group('short_desc'):
short_desc = m.group('short_desc')
elif m.group('symbol'):
module = m.group('module')
symbol = m.group('symbol')
offset = m.group('offset')
if offset is not None:
offset = int(offset, 16)
backtrace.append(LSO((module, symbol, offset)))
# A stack cookie is recognized by its symbol name.
# A stack cookie causes an exception and a handler call.
# A stack cookie exception is considered exploitable.
stack_cookie = symbol in ['__report_gsfailure']
if stack_cookie:
# Erase the handler traceback.
backtrace = []
elif m.group('traceback'):
raise RuntimeError("Crash in CERT triage tools:\n%s" % m.group('traceback'))
else:
# Unrecogniz(ed)(able) line from debugger
pass
if short_desc == "StackExhaustion":
backtrace = backtrace[-10:]
# Sometimes override the classification.
if stack_cookie or exploitable_exception or \
exc_type == "STATUS_STACK_BUFFER_OVERRUN":
classification = EXPLOITABLE
if exception >= 0x40000000 and exception < 0xC0200000 and \
classification == NOT_AN_EXCEPTION:
classification = UNKNOWN
# these are false positive things, like ^C
if exception in [0x40010005, 0x80000003]:
classification = NOT_AN_EXCEPTION
# Issues that indicate that the target may not be configured properly
if exception in [0xC0000417]:
# 0xC0000417: STATUS_INVALID_CRUNTIME_PARAMETER
classification = UNKNOWN
if classification is None:
classification = NOT_AN_EXCEPTION
return (backtrace, classification, exception)
class FuzzResult(object):
"""
A notable result from a fuzzing iteration. This should be yielded from an
execution of :meth:`Fuzzer.do_iteration`. It will be reported to the
ALF central server at the next check-in interval, and accessible from the ALF
website thereafter. The result will be associated with the mutation filename
in the :meth:`Fuzzer.do_iteration` execution that yields this object.
"""
MAX_TEXT = 1 * 1024 * 1024
"""Maximum string length allowed for the :attr:`text` attribute."""
MAJOR_HASH_DEPTH = 4 # Do NOT change this unless you recalculate all hashs
"""Maximum stack depth for calculating the major hash."""
def __init__(self, classification=NOT_AN_EXCEPTION, text="", backtrace=None, exit_code=0):
self.classification = classification
self.text = text
self.backtrace = backtrace if backtrace is not None else []
self.exit_code = exit_code
@property
def classification(self):
"""A classification constant as defined in :mod:`alf.debug`."""
return self._classification
@classification.setter
def classification(self, value):
self._classification = _Classification(value)
@property
def backtrace(self):
"""A list of :class:`LSO` objects representing a backtrace at the time of the crash.
These are ordered by descending time (ie. most recent is first in the list).
It is an error for this to be empty if :attr:`~alf.FuzzResult.classification` is
anything other than :data:`~alf.debug.NOT_AN_EXCEPTION`."""
return self._backtrace
@backtrace.setter
def backtrace(self, value):
self._backtrace = list(value)
@property
def exit_code(self):
"""This is the exit code from the target process."""
return self._exit_code
@exit_code.setter
def exit_code(self, exit_code):
if not isinstance(exit_code, int) and hasattr(__builtins__, "long") and not isinstance(exit_code, long):
raise TypeError("exit_code must be an int, got %s" % type(exit_code))
self._exit_code = exit_code
@property
def text(self):
"""
A freeform string to describe the result. It is suggested this include
the standard output and error streams from the target. Cannot exceed
:attr:`MAX_TEXT` in length.
"""
return self._text
@text.setter
def text(self, value):
if not isinstance(value, str):
raise TypeError("Expecting text to be a str, got a %s" % type(value))
self._text = _limit_output_length(value)
def _calc_hash(self, max_depth, use_offset):
hasher = hashlib.sha224()
for s in self.backtrace[:max_depth]:
sym = s.get_str(include_offset=False)
if not sym:
sym = "Unknown"
else:
sym = sym.lower()
hasher.update(sym)
if use_offset:
# to be consistant make sure we are dealing with an int not a str that could be
# base 10 or 16 or 0X or 0x...
offset = s.off if s.off is not None else 0
assert isinstance(offset, int) or (hasattr(__builtins__, "long") and isinstance(offset, long)), \
"Offset is %s should be int. Value: %s" % (type(offset), offset)
hasher.update(str(offset))
# sha224 is 224bits or 28 bytes or 56 hex chars
return hasher.hexdigest().upper()
@property
def major(self):
"""This is the major hash of this result based on the backtrace for grouping."""
return self._calc_hash(self.MAJOR_HASH_DEPTH, False)
@property
def minor(self):
"""This is the minor hash of this result based on the backtrace for grouping."""
return self._calc_hash(len(self.backtrace), True)
class FuzzDeletion(object):
"""
A file or folder created by a fuzzing iteration which should be cleaned up safely. This
should be yielded from an execution of :meth:`Fuzzer.do_iteration`. The
file or folder will be deleted in a safe manner after :meth:`Fuzzer.do_iteration`
returns.
"""
def __init__(self, path):
if not isinstance(path, str):
raise TypeError("Expecting path to be a str, got a %s" % type(path))
self.path = path
"""The path of the file or folder to be deleted."""
class LSO(object):
"""
Representation of a resolved address in an executable image.
*lso* is a tuple containing the library name, symbol name, and offset.
The interpretation of offset is given in the table below.
=========== ======== ===================
Library Symbol Offset
=========== ======== ===================
(any value) not None relative to symbol
not None None relative to library
None None absolute address
=========== ======== ===================
This is the primary way that symbols are represented internally by ALF,
which allows for consistent handling and formatting.
Call stacks and backtraces are represented as lists of LSOs, organized head-first.
(Note that :py:meth:`list.pop` assumes tail-first by default. Use
``pop(0)``.)
"""
def __init__(self, lso):
self.lib = lso[0]
"""Library"""
self.sym = lso[1]
"""Symbol"""
self.off = lso[2]
"""Offset"""
if self.off is not None:
self.off = int(self.off)
def get_str(self, include_offset=True):
"""Return a string representation of the address in the form
'library!symbol+offset' if *include_offset* is True, otherwise in the form
'library!symbol'."""
if self.lib and self.sym:
result = "%s!%s" % (self.lib, self.sym)
elif self.lib:
result = self.lib
elif self.sym:
result = self.sym
else: # missing both lib and sym
result = "Unknown"
if include_offset and self.off is not None:
result = "%s+0x%x" % (result, self.off)
return result
def __eq__(self, other):
return (self.lib, self.sym, self.off) == (other.lib, other.sym, other.off)
def __ne__(self, other):
return (self.lib, self.sym, self.off) != (other.lib, other.sym, other.off)
def __le__(self, other):
return (self.lib, self.sym, self.off) <= (other.lib, other.sym, other.off)
def __ge__(self, other):
return (self.lib, self.sym, self.off) >= (other.lib, other.sym, other.off)
def __lt__(self, other):
return (self.lib, self.sym, self.off) < (other.lib, other.sym, other.off)
def __gt__(self, other):
return (self.lib, self.sym, self.off) > (other.lib, other.sym, other.off)
def __str__(self):
return self.get_str(True)
def __repr__(self):
return str(self)
def _get_delete_path():
try:
return _delete_queue.pop(0)
except IndexError:
return None
def delete(path):
"""
Delete files/folders in a safer manner than using :func:`os.remove` or
:func:`shutil.rmtree` directly.
"""
assert isinstance(path, str)
_delete_queue.append(path)
def lib_trim(back_trace, noise, trim_bottom=False):
"""
This can be used to remove unwanted noise from a list of :class:`LSO` objects representing a
backtrace. It will return a list of :class:`LSO` objects.
back_trace is a list of :class:`LSO` objects representing a backtrace.
noise is a list of strings that represent the library of entries that will be removed from
back_trace.
trim_bottom will trim noise symbols off the bottom of the call stack.
"""
assert isinstance(back_trace, list)
assert isinstance(noise, list)
if trim_bottom:
back_trace.reverse()
while True:
for entry in noise:
if not back_trace:
return []
if back_trace[0].lib == entry:
back_trace.pop(0)
break
else:
break
if trim_bottom:
back_trace.reverse()
return back_trace
class TargetMonitor(object):
IDLE_CHECK = 0.1 # seconds
IDLE_THRESHOLD = 3.0 # percent
MEMORY_CHECK = 0.1
TIMEOUT_CHECK = 0.1
def __init__(self, pid, idle_limit=None, memory_limit=None, time_limit=None):
self.limit = {"idle":idle_limit,
"memory":memory_limit,
"time":time_limit}
self.check = {"idle":0,
"memory":0,
"time":0}
self.idle_start = None
try:
if isinstance(pid, psutil.Process):
self.ps = pid
else:
self.ps = psutil.Process(pid)
self.ps.get_cpu_percent(interval=0)
except psutil.NoSuchProcess:
self.ps = None
def check_idle(self):
now = prof_timer()
if self.limit["idle"] and self.ps and (now - self.check["idle"]) > self.IDLE_CHECK:
self.check["idle"] = now
try:
cpu_time = self.ps.get_cpu_percent(interval=0) # target cpu usage
for child in self.ps.get_children(recursive=True):
try:
c_cpu = child.get_cpu_percent(interval=0)
if c_cpu > cpu_time:
cpu_time = c_cpu
except psutil.NoSuchProcess:
pass
except psutil.NoSuchProcess:
return False
if cpu_time < self.IDLE_THRESHOLD:
if self.idle_start and (now - self.idle_start) > self.limit["idle"]:
return True
if self.idle_start is None:
self.idle_start = now
else:
self.idle_start = None
return False
def check_memory(self):
now = prof_timer()
if self.limit["memory"] and self.ps and (now - self.check["memory"]) > self.MEMORY_CHECK:
self.check["memory"] = now
try:
target_mem = self.ps.get_memory_info()[0] # target memory usage
for child in self.ps.get_children(recursive=True):
try:
target_mem += child.get_memory_info()[0]
except psutil.NoSuchProcess:
pass
except psutil.NoSuchProcess:
target_mem = 0
if target_mem > self.limit["memory"]:
return True
return False
def check_timeout(self):
now = time.time()
if self.limit["time"] and self.ps and (now - self.check["time"]) > self.TIMEOUT_CHECK:
self.check["time"] = now
try:
target_time = self.ps.create_time()
except psutil.NoSuchProcess:
return False
if time.time() - target_time > self.limit["time"]:
return True
return False
if _platform == "Windows":
TOOL_GFLAGS = os.path.join(PATH_DBG, "gflags.exe")
_gflags_enabled = dict()
_gflags_args = {"backwards":False, "full":True, "leaks":False, "no_sync":False,
"notraces":False, "protect":True, "unaligned":True}
def _set_gflags(target, **kwargs):
"""
Enable page heap with gflags
backwards: Places the zone of reserved virtual memory at the beginning of an allocation,
rather than at the end. As a result, the debugger traps overruns at the beginning of the
buffer, instead of those at the end of the buffer. Valid only with the /full parameter.
full: Turns on full page heap verification for the process. Full page heap verification
places a zone of reserved virtual memory at the end of each allocation.
leaks: Checks for heap leaks when a process ends. The /leaks parameter disables
full page heap. When /leaks is used, the /full parameter and parameters that modify
the /full parameter, such as /backwards, are ignored, and GFlags performs standard
page heap verification with a leak check.
no_sync: Checks for unsynchronized access. This parameter causes a break if it detects that
a heap created with the HEAP_NO_SERIALIZE flag is accessed by different threads.
Do not use this flag to debug a program that includes a customized heap manager.
Functions that synchronize heap access cause the page heap verifier to report
synchronization faults that do not exist.
notraces: Specifies that run-time stack traces are not saved. This option improves
performance slightly, but it makes debugging much more difficult. This parameter is valid,
but its use is not recommended.
protect: Protects heap internal structures. This test is used to detect random heap
corruptions. It can make execution significantly slower.
unaligned: Place allocation at the end of the page so off-by-one issues trigger an AV.
NOTE: Some programs make assumptions about 8-byte alignment and they stop working
correctly with the /unaligned parameter.
More info: http://msdn.microsoft.com/en-us/library/windows/hardware/ff549566
"""
target = os.path.basename(target)
if target in _gflags_enabled and _gflags_enabled[target] == kwargs:
return # no changes necessary
command = [TOOL_GFLAGS, "/p", "/enable", target]
if not kwargs:
kwargs = _gflags_args
for arg, value in kwargs.items():
if arg not in _gflags_args:
raise RuntimeError("Invalid argument: %s" % arg)
if not isinstance(value, bool):
raise RuntimeError("Invalid type for argument '%s', should be " \
"bool not %s" % (arg, type(value).__name__))
if value:
command.append("/%s" % arg)
with open(os.devnull, "w") as nul:
assert(subprocess.Popen(command, stderr=nul, stdout=nul).wait() == 0)
_gflags_enabled[target] = kwargs
def _disable_gflags(target):
# disable page heap with gflags
target = os.path.basename(target)
command = [TOOL_GFLAGS, "/p", "/disable", target]
with open(os.devnull, "w") as nul:
assert(subprocess.Popen(command, stderr=nul, stdout=nul).wait() == 0)
return _gflags_enabled.pop(target, None)
| {
"content_hash": "50dd5574a4d0386dc343df7ee842a566",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 123,
"avg_line_length": 39.68238993710692,
"alnum_prop": 0.5718757429273318,
"repo_name": "blackberry/ALF",
"id": "8cda15eea03fb6468a915e60159c11c93e7db6b2",
"size": "26079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alf/debug/_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "97347"
},
{
"name": "Python",
"bytes": "280755"
},
{
"name": "Shell",
"bytes": "4796"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, Integer, String, Boolean
from todo.database import Base
class Entry(Base):
__tablename__ = "entries"
id = Column(Integer, primary_key=True)
title = Column(String)
order = Column(Integer)
completed = Column(Boolean)
def __init__(self, title=None, order=None):
self.title = title
self.order = order
self.completed = False
def __repr__(self):
return "<Entry: {}>".format(self.title)
| {
"content_hash": "378be16a62095a064f941c497f1b2ce7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 27.88235294117647,
"alnum_prop": 0.6329113924050633,
"repo_name": "Faerbit/todo-backend-flask",
"id": "c97869a1d23a4d2de17c6d43c5d92529ff75bedc",
"size": "474",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "todo/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20256"
}
],
"symlink_target": ""
} |
from nose.tools import ok_, eq_, raises
from flask import Flask, request
from flask.views import MethodView
from flask.ext.admin import base
class MockView(base.BaseView):
# Various properties
allow_call = True
allow_access = True
visible = True
@base.expose('/')
def index(self):
return 'Success!'
@base.expose('/test/')
def test(self):
return self.render('mock.html')
def _handle_view(self, name, **kwargs):
if self.allow_call:
return super(MockView, self)._handle_view(name, **kwargs)
else:
return 'Failure!'
def is_accessible(self):
if self.allow_access:
return super(MockView, self).is_accessible()
return False
def is_visible(self):
if self.visible:
return super(MockView, self).is_visible()
return False
class MockMethodView(base.BaseView):
@base.expose('/')
def index(self):
return 'Success!'
@base.expose_plugview('/_api/1')
class API1(MethodView):
def get(self, cls):
return cls.render('method.html', request=request, name='API1')
def post(self, cls):
return cls.render('method.html', request=request, name='API1')
def put(self, cls):
return cls.render('method.html', request=request, name='API1')
def delete(self, cls):
return cls.render('method.html', request=request, name='API1')
@base.expose_plugview('/_api/2')
class API2(MethodView):
def get(self, cls):
return cls.render('method.html', request=request, name='API2')
def post(self, cls):
return cls.render('method.html', request=request, name='API2')
@base.expose_plugview('/_api/3')
@base.expose_plugview('/_api/4')
class DoubleExpose(MethodView):
def get(self, cls):
return cls.render('method.html', request=request, name='API3')
def test_baseview_defaults():
view = MockView()
eq_(view.name, None)
eq_(view.category, None)
eq_(view.endpoint, None)
eq_(view.url, None)
eq_(view.static_folder, None)
eq_(view.admin, None)
eq_(view.blueprint, None)
def test_base_defaults():
admin = base.Admin()
eq_(admin.name, 'Admin')
eq_(admin.url, '/admin')
eq_(admin.endpoint, 'admin')
eq_(admin.app, None)
ok_(admin.index_view is not None)
eq_(admin.index_view._template, 'admin/index.html')
# Check if default view was added
eq_(len(admin._views), 1)
eq_(admin._views[0], admin.index_view)
def test_custom_index_view():
view = base.AdminIndexView(name='a', category='b', endpoint='c',
url='/d', template='e')
admin = base.Admin(index_view=view)
eq_(admin.endpoint, 'c')
eq_(admin.url, '/d')
ok_(admin.index_view is view)
eq_(view.name, 'a')
eq_(view.category, 'b')
eq_(view._template, 'e')
# Check if view was added
eq_(len(admin._views), 1)
eq_(admin._views[0], view)
def test_base_registration():
app = Flask(__name__)
admin = base.Admin(app)
eq_(admin.app, app)
ok_(admin.index_view.blueprint is not None)
def test_admin_customizations():
app = Flask(__name__)
admin = base.Admin(app, name='Test', url='/foobar', static_url_path='/static/my/admin')
eq_(admin.name, 'Test')
eq_(admin.url, '/foobar')
eq_(admin.index_view.blueprint.static_url_path, '/static/my/admin')
client = app.test_client()
rv = client.get('/foobar/')
eq_(rv.status_code, 200)
def test_baseview_registration():
admin = base.Admin()
view = MockView()
bp = view.create_blueprint(admin)
# Base properties
eq_(view.admin, admin)
ok_(view.blueprint is not None)
# Calculated properties
eq_(view.endpoint, 'mockview')
eq_(view.url, '/admin/mockview')
eq_(view.name, 'Mock View')
# Verify generated blueprint properties
eq_(bp.name, view.endpoint)
eq_(bp.url_prefix, view.url)
eq_(bp.template_folder, 'templates')
eq_(bp.static_folder, view.static_folder)
# Verify customizations
view = MockView(name='Test', endpoint='foobar')
view.create_blueprint(base.Admin())
eq_(view.name, 'Test')
eq_(view.endpoint, 'foobar')
eq_(view.url, '/admin/foobar')
view = MockView(url='test')
view.create_blueprint(base.Admin())
eq_(view.url, '/admin/test')
view = MockView(url='/test/test')
view.create_blueprint(base.Admin())
eq_(view.url, '/test/test')
view = MockView(endpoint='test')
view.create_blueprint(base.Admin(url='/'))
eq_(view.url, '/test')
view = MockView(static_url_path='/static/my/test')
view.create_blueprint(base.Admin())
eq_(view.blueprint.static_url_path, '/static/my/test')
def test_baseview_urls():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
eq_(len(view._urls), 2)
@raises(Exception)
def test_no_default():
app = Flask(__name__)
admin = base.Admin(app)
admin.add_view(base.BaseView())
def test_call():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/')
eq_(rv.status_code, 200)
rv = client.get('/admin/mockview/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/mockview/test/')
eq_(rv.data, b'Success!')
# Check authentication failure
view.allow_call = False
rv = client.get('/admin/mockview/')
eq_(rv.data, b'Failure!')
def test_permissions():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
client = app.test_client()
view.allow_access = False
rv = client.get('/admin/mockview/')
eq_(rv.status_code, 404)
def get_visibility():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView(name='TestMenuItem')
view.visible = False
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/mockview/')
ok_('TestMenuItem' not in rv.data.decode('utf-8'))
def test_submenu():
app = Flask(__name__)
admin = base.Admin(app)
admin.add_view(MockView(name='Test 1', category='Test', endpoint='test1'))
# Second view is not normally accessible
view = MockView(name='Test 2', category='Test', endpoint='test2')
view.allow_access = False
admin.add_view(view)
ok_('Test' in admin._menu_categories)
eq_(len(admin._menu), 2)
eq_(admin._menu[1].name, 'Test')
eq_(len(admin._menu[1]._children), 2)
# Categories don't have URLs and they're not accessible
eq_(admin._menu[1].get_url(), None)
eq_(admin._menu[1].is_accessible(), False)
eq_(len(admin._menu[1].get_children()), 1)
def test_delayed_init():
app = Flask(__name__)
admin = base.Admin()
admin.add_view(MockView())
admin.init_app(app)
client = app.test_client()
rv = client.get('/admin/mockview/')
eq_(rv.data, b'Success!')
def test_multi_instances_init():
app = Flask(__name__)
_ = base.Admin(app)
class ManageIndex(base.AdminIndexView):
pass
_ = base.Admin(app, index_view=ManageIndex(url='/manage', endpoint='manage'))
@raises(Exception)
def test_double_init():
app = Flask(__name__)
admin = base.Admin(app)
admin.init_app(app)
def test_nested_flask_views():
app = Flask(__name__)
admin = base.Admin(app)
view = MockMethodView()
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/mockmethodview/_api/1')
print('"', rv.data, '"')
eq_(rv.data, b'GET - API1')
rv = client.put('/admin/mockmethodview/_api/1')
eq_(rv.data, b'PUT - API1')
rv = client.post('/admin/mockmethodview/_api/1')
eq_(rv.data, b'POST - API1')
rv = client.delete('/admin/mockmethodview/_api/1')
eq_(rv.data, b'DELETE - API1')
rv = client.get('/admin/mockmethodview/_api/2')
eq_(rv.data, b'GET - API2')
rv = client.post('/admin/mockmethodview/_api/2')
eq_(rv.data, b'POST - API2')
rv = client.delete('/admin/mockmethodview/_api/2')
eq_(rv.status_code, 405)
rv = client.put('/admin/mockmethodview/_api/2')
eq_(rv.status_code, 405)
rv = client.get('/admin/mockmethodview/_api/3')
eq_(rv.data, b'GET - API3')
rv = client.get('/admin/mockmethodview/_api/4')
eq_(rv.data, b'GET - API3')
def test_root_mount():
app = Flask(__name__)
admin = base.Admin(app, url='/')
admin.add_view(MockView())
client = app.test_client()
rv = client.get('/mockview/')
eq_(rv.data, b'Success!')
def test_menu_links():
app = Flask(__name__)
admin = base.Admin(app)
admin.add_link(base.MenuLink('TestMenuLink1', endpoint='.index'))
admin.add_link(base.MenuLink('TestMenuLink2', url='http://python.org/'))
client = app.test_client()
rv = client.get('/admin/')
data = rv.data.decode('utf-8')
ok_('TestMenuLink1' in data)
ok_('TestMenuLink2' in data)
| {
"content_hash": "623cb6b439c08baa67adcf32a91ccc94",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 91,
"avg_line_length": 25.49438202247191,
"alnum_prop": 0.6067650947553989,
"repo_name": "michaelBenin/flask-admin",
"id": "ffa6c936cfc45b2ff004409b881fd03c2b8754f9",
"size": "9076",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_admin/tests/test_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import osgtest.library.core as core
import osgtest.library.files as files
import osgtest.library.service as service
import osgtest.library.osgunittest as osgunittest
class TestStopCondor(osgunittest.OSGTestCase):
def test_01_stop_condor(self):
core.skip_ok_unless_installed('condor')
self.skip_ok_unless(core.state['condor.started-service'], 'did not start server')
service.check_stop('condor')
files.restore(core.config['condor.personal_condor'], 'condor')
core.state['condor.running-service'] = False
| {
"content_hash": "78eae05ea1ccb8124e685fb09e50b477",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 42.23076923076923,
"alnum_prop": 0.7340619307832422,
"repo_name": "efajardo/osg-test",
"id": "b1e805af745b5fb421dd69a189d796075c9e2115",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osgtest/tests/test_890_condor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4437"
},
{
"name": "Python",
"bytes": "247612"
},
{
"name": "Shell",
"bytes": "6439"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
import unittest
from dnc.controller import BaseController
class DummyController(BaseController):
def network_vars(self):
self.W = tf.Variable(tf.truncated_normal([self.nn_input_size, 64]))
self.b = tf.Variable(tf.zeros([64]))
def network_op(self, X):
return tf.matmul(X, self.W) + self.b
class DummyRecurrentController(BaseController):
def network_vars(self):
self.lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(64)
self.state = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
self.output = tf.Variable(tf.zeros([self.batch_size, 64]), trainable=False)
def network_op(self, X, state):
X = tf.convert_to_tensor(X)
return self.lstm_cell(X, state)
def update_state(self, new_state):
return tf.group(
self.output.assign(new_state[0]),
self.state.assign(new_state[1])
)
def get_state(self):
return (self.output, self.state)
class DNCControllerTest(unittest.TestCase):
def test_construction(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 1)
self.assertFalse(controller.has_recurrent_nn)
self.assertEqual(controller.nn_input_size, 20)
self.assertEqual(controller.interface_vector_size, 38)
self.assertEqual(controller.interface_weights.get_shape().as_list(), [64, 38])
self.assertEqual(controller.nn_output_weights.get_shape().as_list(), [64, 10])
self.assertEqual(controller.mem_output_weights.get_shape().as_list(), [10, 10])
self.assertTrue(rcontroller.has_recurrent_nn)
self.assertEqual(rcontroller.nn_input_size, 20)
self.assertEqual(rcontroller.interface_vector_size, 38)
self.assertEqual(rcontroller.interface_weights.get_shape().as_list(), [64, 38])
self.assertEqual(rcontroller.nn_output_weights.get_shape().as_list(), [64, 10])
self.assertEqual(rcontroller.mem_output_weights.get_shape().as_list(), [10, 10])
def test_get_nn_output_size(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as Session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 1)
self.assertEqual(controller.get_nn_output_size(), 64)
self.assertEqual(rcontroller.get_nn_output_size(), 64)
def test_parse_interface_vector(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
zeta = np.random.uniform(-2, 2, (2, 38)).astype(np.float32)
read_keys = np.reshape(zeta[:, :10], (-1, 5, 2))
read_strengths = 1 + np.log(np.exp(np.reshape(zeta[:, 10:12], (-1, 2, ))) + 1)
write_key = np.reshape(zeta[:, 12:17], (-1, 5, 1))
write_strength = 1 + np.log(np.exp(np.reshape(zeta[:, 17], (-1, 1))) + 1)
erase_vector = 1.0 / (1 + np.exp(-1 * np.reshape(zeta[:, 18:23], (-1, 5))))
write_vector = np.reshape(zeta[:, 23:28], (-1, 5))
free_gates = 1.0 / (1 + np.exp(-1 * np.reshape(zeta[:, 28:30], (-1, 2))))
allocation_gate = 1.0 / (1 + np.exp(-1 * zeta[:, 30, np.newaxis]))
write_gate = 1.0 / (1 + np.exp(-1 * zeta[:, 31, np.newaxis]))
read_modes = np.reshape(zeta[:, 32:], (-1, 3, 2))
read_modes = np.transpose(read_modes, [0, 2, 1])
read_modes = np.reshape(read_modes, (-1, 3))
read_modes = np.exp(read_modes) / np.sum(np.exp(read_modes), axis=-1, keepdims=True)
read_modes = np.reshape(read_modes, (2, 2, 3))
read_modes = np.transpose(read_modes, [0, 2, 1])
op = controller.parse_interface_vector(zeta)
session.run(tf.initialize_all_variables())
parsed = session.run(op)
self.assertTrue(np.allclose(parsed['read_keys'], read_keys))
self.assertTrue(np.allclose(parsed['read_strengths'], read_strengths))
self.assertTrue(np.allclose(parsed['write_key'], write_key))
self.assertTrue(np.allclose(parsed['write_strength'], write_strength))
self.assertTrue(np.allclose(parsed['erase_vector'], erase_vector))
self.assertTrue(np.allclose(parsed['write_vector'], write_vector))
self.assertTrue(np.allclose(parsed['free_gates'], free_gates))
self.assertTrue(np.allclose(parsed['allocation_gate'], allocation_gate))
self.assertTrue(np.allclose(parsed['write_gate'], write_gate))
self.assertTrue(np.allclose(parsed['read_modes'], read_modes))
def test_process_input(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
rcontroller = DummyRecurrentController(10, 10, 2, 5, 2)
input_batch = np.random.uniform(0, 1, (2, 10)).astype(np.float32)
last_read_vectors = np.random.uniform(-1, 1, (2, 5, 2)).astype(np.float32)
v_op, zeta_op = controller.process_input(input_batch, last_read_vectors)
rv_op, rzeta_op, rs_op = rcontroller.process_input(input_batch, last_read_vectors, rcontroller.get_state())
session.run(tf.initialize_all_variables())
v, zeta = session.run([v_op, zeta_op])
rv, rzeta, rs = session.run([rv_op, rzeta_op, rs_op])
self.assertEqual(v.shape, (2, 10))
self.assertEqual(np.concatenate([np.reshape(val, (2, -1)) for _,val in zeta.iteritems()], axis=1).shape, (2, 38))
self.assertEqual(rv.shape, (2, 10))
self.assertEqual(np.concatenate([np.reshape(val, (2, -1)) for _,val in rzeta.iteritems()], axis=1).shape, (2, 38))
self.assertEqual([_s.shape for _s in rs], [(2, 64), (2, 64)])
def test_final_output(self):
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
controller = DummyController(10, 10, 2, 5)
output_batch = np.random.uniform(0, 1, (2, 10)).astype(np.float32)
new_read_vectors = np.random.uniform(-1, 1, (2, 5, 2)).astype(np.float32)
op = controller.final_output(output_batch, new_read_vectors)
session.run(tf.initialize_all_variables())
y = session.run(op)
self.assertEqual(y.shape, (2, 10))
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "1bcf2e088d5ea4e0fb0795830e8f04d5",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 130,
"avg_line_length": 45.12658227848101,
"alnum_prop": 0.5706872370266479,
"repo_name": "nazoking/DNC-tensorflow",
"id": "7f3136630f59871d4ac2e8ff679a685ec95fc61b",
"size": "7130",
"binary": false,
"copies": "2",
"ref": "refs/heads/ja",
"path": "unit-tests/controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "230003"
},
{
"name": "Python",
"bytes": "79013"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
"""Execute the tests for snp_store.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for snp_store'
print '========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'core/apps/snp_store/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'core/apps/snp_store', 'snp_store')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# We prepare a list of transforms to apply to the output files. This is
# used to strip the input/output paths from the programs' output to
# make it more canonical and host independent.
ph.outFile('-') # To ensure that the out path is set.
transforms = [
app_tests.RegexpReplaceTransform("#.*snp_store.exe", "#snp_store"),
app_tests.RegexpReplaceTransform("#[^ ]+snp_store", "#snp_store"),
app_tests.ReplaceTransform(ph.inFile(''), ''),
app_tests.ReplaceTransform(ph.outFile(''), ''),
]
# ============================================================
# First Section.
# ============================================================
# App TestConf objects to conf_list, just like this for each
# test you want to run.
# default
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_default.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.gff'),
'-o', ph.outFile('snps_default.vcf'),
'-id', ph.outFile('indels_default.gff'),],
to_diff=[(ph.inFile('snp_store_default.stdout'),
ph.outFile('snp_store_default.stdout')),
(ph.inFile('snps_default.vcf'),
ph.outFile('snps_default.vcf'),
transforms),
(ph.inFile('indels_default.gff'),
ph.outFile('indels_default.gff',))])
conf_list.append(conf)
# test 2
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.sam'),
'-re',
'-o', ph.outFile('snps_realign.vcf'),
'-id', ph.outFile('indels_realign.gff')],
to_diff=[(ph.inFile('snp_store_realign.stdout'),
ph.outFile('snp_store_realign.stdout')),
(ph.inFile('snps_realign.vcf'),
ph.outFile('snps_realign.vcf'),
transforms),
(ph.inFile('indels_realign.gff'),
ph.outFile('indels_realign.gff'))])
conf_list.append(conf)
# test 3
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign_m1mp1oa.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.sam'),
'-it', str(1), '-re', '-oa', '-mp', str(1), '-m', 'maq', '-hq',
'-o', ph.outFile('snps_realign_m1mp1oa.vcf'),
'-id', ph.outFile('indels_realign_m1mp1oa.gff')],
to_diff=[(ph.inFile('snp_store_realign_m1mp1oa.stdout'),
ph.outFile('snp_store_realign_m1mp1oa.stdout')),
(ph.inFile('snps_realign_m1mp1oa.vcf'),
ph.outFile('snps_realign_m1mp1oa.vcf'),
transforms),
(ph.inFile('indels_realign_m1mp1oa.gff'),
ph.outFile('indels_realign_m1mp1oa.gff'))])
conf_list.append(conf)
# test 4
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign_m0mp1oa.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.gff'),
'-it', str(2), '-re', '-oa', '-mp', str(1), '-hq',
'-o', ph.outFile('snps_realign_m0mp1oa.vcf'),
'-id', ph.outFile('indels_realign_m0mp1oa.gff')],
to_diff=[(ph.inFile('snp_store_realign_m0mp1oa.stdout'),
ph.outFile('snp_store_realign_m0mp1oa.stdout')),
(ph.inFile('snps_realign_m0mp1oa.vcf'),
ph.outFile('snps_realign_m0mp1oa.vcf'),
transforms),
(ph.inFile('indels_realign_m0mp1oa.gff'),
ph.outFile('indels_realign_m0mp1oa.gff'))])
conf_list.append(conf)
# test 5
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign_m0mp1oa_it1ipt01.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.sam'),
'-it', str(1), '-ipt', str(0.1), '-osc', '-re', '-oa', '-hq',
'-o', ph.outFile('snps_realign_m0mp1oa_it1ipt01.vcf'),
'-id', ph.outFile('indels_realign_m0mp1oa_it1ipt01.gff')],
to_diff=[(ph.inFile('snp_store_realign_m0mp1oa_it1ipt01.stdout'),
ph.outFile('snp_store_realign_m0mp1oa_it1ipt01.stdout')),
(ph.inFile('snps_realign_m0mp1oa_it1ipt01.vcf'),
ph.outFile('snps_realign_m0mp1oa_it1ipt01.vcf'),
transforms),
(ph.inFile('indels_realign_m0mp1oa_it1ipt01.gff'),
ph.outFile('indels_realign_m0mp1oa_it1ipt01.gff'))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['snp_store'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| {
"content_hash": "a8481fa13db1af684279ccbc700ba657",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 39.84153005464481,
"alnum_prop": 0.5185845563022905,
"repo_name": "h-2/seqan",
"id": "b0841e1bdb75c30ed4f780b1062c188f8cb01613",
"size": "7313",
"binary": false,
"copies": "3",
"ref": "refs/heads/feature/lambda",
"path": "core/apps/snp_store/tests/run_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "10606"
},
{
"name": "Batchfile",
"bytes": "8403"
},
{
"name": "C",
"bytes": "251129"
},
{
"name": "C++",
"bytes": "22918154"
},
{
"name": "CMake",
"bytes": "401552"
},
{
"name": "CSS",
"bytes": "465104"
},
{
"name": "Cuda",
"bytes": "29155"
},
{
"name": "GLSL",
"bytes": "1140"
},
{
"name": "Groff",
"bytes": "449001"
},
{
"name": "HTML",
"bytes": "419293"
},
{
"name": "JavaScript",
"bytes": "173341"
},
{
"name": "Makefile",
"bytes": "518"
},
{
"name": "Objective-C",
"bytes": "386955"
},
{
"name": "PHP",
"bytes": "11330877"
},
{
"name": "Perl",
"bytes": "6909"
},
{
"name": "Python",
"bytes": "1951362"
},
{
"name": "R",
"bytes": "34940"
},
{
"name": "Shell",
"bytes": "87752"
},
{
"name": "Tcl",
"bytes": "3861"
},
{
"name": "TeX",
"bytes": "8613"
}
],
"symlink_target": ""
} |
from .MusicWebsiteParser import MusicWebsiteParser
from ..FileDownload import FileDownload
from BeautifulSoup import BeautifulSoup
class YoutubeParser(MusicWebsiteParser):
def Parse(self,song_name):
song_name = '+'.join(song_name)
url="https://www.youtube.com/results?search_query="
url=url+song_name
file_download=FileDownload()
html=file_download.get_html_response(url)
soup=BeautifulSoup(html)
download_url = soup.find('a',attrs={'class':'yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 yt-uix-sessionlink spf-link '})
temp_url='https://www.youtube.com'
final_url=temp_url+download_url.get('href')
return final_url
| {
"content_hash": "705df82acfb05e9bdb6ad4b3efe2aa1a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 132,
"avg_line_length": 34.21052631578947,
"alnum_prop": 0.7492307692307693,
"repo_name": "ankitmathur3193/song-cli",
"id": "ddecf390e4ece1a245d12bf534070c064cbfc12b",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "song/commands/MusicWebsiteParser/YoutubeParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15641"
}
],
"symlink_target": ""
} |
"""
Copyright 2020 Google LLC
Copyright 2020 PerfectVIPs Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import math
from pygen_src.riscv_instr_pkg import (privileged_reg_t, satp_mode_t,
riscv_instr_group_t, mtvec_mode_t,
privileged_mode_t)
# -----------------------------------------------------------------------------
# Processor feature configuration
# -----------------------------------------------------------------------------
# XLEN
XLEN = 32
# set to BARE if address translation is not supported
SATP_MODE = satp_mode_t.BARE
# Supported Privileged mode
supported_privileged_mode = [privileged_mode_t.MACHINE_MODE]
# Unsupported instructions
unsupported_instr = []
# ISA supported by the processor
supported_isa = [riscv_instr_group_t.RV32I, riscv_instr_group_t.RV32M,
riscv_instr_group_t.RV32C, riscv_instr_group_t.RV32F,
riscv_instr_group_t.RV32FC, riscv_instr_group_t.RV32D,
riscv_instr_group_t.RV32DC, riscv_instr_group_t.RV32A]
# Interrupt mode support
supported_interrupt_mode = [mtvec_mode_t.DIRECT, mtvec_mode_t.VECTORED]
# The number of interrupt vectors to be generated, only used if VECTORED
# interrupt mode is supported
max_interrupt_vector_num = 16
# Physical memory protection support
support_pmp = 0
# Debug mode support
support_debug_mode = 0
# Support delegate trap to user mode
support_umode_trap = 0
# Support sfence.vma instruction
support_sfence = 0
# Support unaligned load/store
support_unaligned_load_store = 1
# GPR Setting
NUM_FLOAT_GPR = 32
NUM_GPR = 32
NUM_VEC_GPR = 32
# -----------------------------------------------------------------------------
# Vector extension configuration
# -----------------------------------------------------------------------------
# Parameter for vector extension
VECTOR_EXTENSION_ENABLE = 0
VLEN = 512
# Maximum size of a single vector element
ELEN = 32
# Minimum size of a sub-element, which must be at most 8-bits.
SELEN = 8
# Maximum size of a single vector element (encoded in vsew format)
VELEN = int(math.log(ELEN) // math.log(2)) - 3
# Maxium LMUL supported by the core
MAX_LMUL = 8
# -----------------------------------------------------------------------------
# Multi-harts configuration
# -----------------------------------------------------------------------------
# Number of harts
NUM_HARTS = 1
# -----------------------------------------------------------------------------
# Previleged CSR implementation
# -----------------------------------------------------------------------------
# Implemented previlieged CSR list
implemented_csr = [privileged_reg_t.MVENDORID, # Vendor ID
privileged_reg_t.MARCHID, # Architecture ID
privileged_reg_t.MIMPID, # Implementation ID
privileged_reg_t.MHARTID, # Hardware thread ID
privileged_reg_t.MSTATUS, # Machine status
privileged_reg_t.MISA, # ISA and extensions
privileged_reg_t.MIE, # Machine interrupt-enable register
privileged_reg_t.MTVEC, # Machine trap-handler base address
privileged_reg_t.MCOUNTEREN, # Machine counter enable
privileged_reg_t.MSCRATCH, # Scratch register for machine trap handlers
privileged_reg_t.MEPC, # Machine exception program counter
privileged_reg_t.MCAUSE, # Machine trap cause
privileged_reg_t.MTVAL, # Machine bad address or instruction
privileged_reg_t.MIP # Machine interrupt pending
]
# Implementation-specific custom CSRs
custom_csr = []
| {
"content_hash": "805e1e1800a1e268767028d17b7bbdbf",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 91,
"avg_line_length": 34.64705882352941,
"alnum_prop": 0.5745816153286442,
"repo_name": "google/riscv-dv",
"id": "5de8125c66b8c67c4c36fe054faa3ed35ff43b82",
"size": "4123",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pygen/pygen_src/target/rv32imafdc/riscv_core_setting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "113"
},
{
"name": "C",
"bytes": "41"
},
{
"name": "D",
"bytes": "773247"
},
{
"name": "Filebench WML",
"bytes": "403"
},
{
"name": "Forth",
"bytes": "1652"
},
{
"name": "Makefile",
"bytes": "9429"
},
{
"name": "Python",
"bytes": "1379201"
},
{
"name": "Shell",
"bytes": "3154"
},
{
"name": "SystemVerilog",
"bytes": "760029"
},
{
"name": "Tcl",
"bytes": "79"
}
],
"symlink_target": ""
} |
import sys
import django
from django.conf import settings
SETTINGS = {
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
'INSTALLED_APPS': (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'macaddress.tests',
'macaddress',
),
'SITE_ID': 1,
'SECRET_KEY': 'this-is-just-for-tests-so-not-that-secret',
'TEMPLATES': [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
}
}
],
}
if django.VERSION < (2, 2):
SETTINGS['MIDDLEWARE_CLASSES'] = (
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
)
else:
SETTINGS['MIDDLEWARE'] = (
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
)
if not settings.configured:
settings.configure(**SETTINGS)
from django.test.utils import get_runner
def runtests():
if hasattr(django, 'setup'):
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True, failfast=False)
apps = ['macaddress', ]
failures = test_runner.run_tests(apps)
sys.exit(failures)
if __name__ == '__main__':
runtests()
| {
"content_hash": "622da2a849ad21c6543d1217a9591981",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 75,
"avg_line_length": 26.84,
"alnum_prop": 0.5946348733233979,
"repo_name": "tubaman/django-macaddress",
"id": "e94ea25fe1d8c54dd0c0615eac9ad857dd4408d8",
"size": "2035",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10358"
}
],
"symlink_target": ""
} |
"""
A module that contains the interfaces which describing our Logger API
"""
from zope.interface import Interface, Attribute
__author__ = 'dimd'
class ILogger(Interface):
"""
A Base logger interface
"""
default = Attribute("default logging module")
origin = Attribute("Current module")
def info(msg):
"""
Info logging level
:param msg: info message
"""
def debug(msg):
"""
debug logging level
:param msg: debug message
"""
def error(msg):
"""
error level logging
:param msg: error message
:return:
"""
def warning(msg):
"""
warning level logging
:param msg: warrning message
:return:
"""
def critical(msg):
"""
critical logging level
:param msg:
:return:
"""
| {
"content_hash": "72acc1fa587f66896674fe90bfc63020",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 69,
"avg_line_length": 18.604166666666668,
"alnum_prop": 0.5274356103023516,
"repo_name": "dimddev/NetCatKS",
"id": "1b7121f3454dfcb00022a4c37b6127faeeb4a515",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NetCatKS/Logger/api/interfaces/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "182697"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from datetime import datetime
from gocept.runner import once
from logging import getLogger
from pprint import pformat
from zeit.cms.interfaces import ICMSContent
import zeit.find.search
log = getLogger(__name__)
def convert(conditions):
for condition in conditions:
name, value = condition.split(':', 1)
if name in ('from_', 'until'):
value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
yield name, value
def parse():
parser = ArgumentParser(description='Elasticsearch debug client')
parser.add_argument('conditions', nargs='+', help='Search conditions')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Report query & results')
parser.add_argument(
'-p', '--payload', action='store_true', help='Dump result payload')
args = parser.parse_args()
return args, dict(convert(args.conditions))
@once(principal='zope.manager')
def search_elastic():
args, conditions = parse()
query = zeit.find.search.query(**conditions)
if args.verbose:
log.info('using query: {}'.format(query))
response = zeit.find.search.search(query, include_payload=args.payload)
log.info('got {} results'.format(response.hits))
if args.verbose:
for idx, item in enumerate(response):
info = '#{}: {}'.format(idx, ICMSContent(item).uniqueId)
if args.payload:
info += '\n' + pformat(item)
log.info(info)
| {
"content_hash": "33b8c223ac73e292b272b10ed095cdf1",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 33.422222222222224,
"alnum_prop": 0.648936170212766,
"repo_name": "ZeitOnline/zeit.find",
"id": "cd9c1211f13afeabca3b750fcc93e14015f37d0c",
"size": "1504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/find/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5885"
},
{
"name": "JavaScript",
"bytes": "13540"
},
{
"name": "Python",
"bytes": "45203"
}
],
"symlink_target": ""
} |
import kid
import sys
import time
import webob
from webob import exc as web_exc
from mint import shimclient
from mint import userlevels
from mint.helperfuncs import getProjectText, weak_signature_call
from mint.mint_error import MintError
from mint.web import fields
from mint.web.admin import AdminHandler
from mint.web.site import SiteHandler
from mint.web.webhandler import WebHandler, normPath
# called from hooks.py if an exception was not caught
class ErrorHandler(WebHandler):
def handle(self, context):
self.__dict__.update(**context)
return self.errorPage
def errorPage(self, *args, **kwargs):
return self._write('error', error = ' An unknown error occurred while handling your request. Site maintainers have been notified.')
class MintApp(WebHandler):
project = None
userLevel = userlevels.NONMEMBER
user = None
responseFactory = webob.Response
def __init__(self, req, cfg, repServer = None, db=None, session=None,
authToken=None, reposShim=None):
self.req = req
self.cfg = cfg
self.db = db
self.reposShim = reposShim
# always send html-strict; xhtml FTL
# The default behavior of kid changed between 0.9.1 and 0.9.6
# in 0.9.1 html-strict produced upper case tags and HTML-strict did not
# exist. in 0.9.6 HTML-strict produces upper case tags and html-strict
# produces lower case tags. we want upper case tags.
if 'HTML-strict' in kid.output_methods:
self.output = 'HTML-strict'
else:
self.output = 'html-strict'
self.content_type = 'text/html; charset=utf-8'
self.response = self.responseFactory(content_type=self.content_type)
self.fields = req.params.mixed()
self.basePath = normPath(req.script_name)
if session is None:
session = {}
self.session = session
self.authToken = authToken
self.siteHandler = SiteHandler()
self.adminHandler = AdminHandler()
self.errorHandler = ErrorHandler()
def _handle(self):
method = self.req.method.upper()
allowed = ['GET', 'POST', 'PUT']
if method not in allowed:
return web_exc.HTTPMethodNotAllowed(allow=allowed)
if not self.authToken:
self.authToken = ('anonymous', 'anonymous')
# open up a new client with the retrieved authToken
self.client = shimclient.ShimMintClient(self.cfg, self.authToken,
self.db)
self.auth = self.client.checkAuth()
if self.auth.authorized:
self.user = self.client.getUser(self.auth.userId)
self.auth.setToken(self.authToken)
method = self._getHandler()
d = self.fields.copy()
d['auth'] = self.auth
def logTraceback():
import traceback
e_type, e_value, e_tb = sys.exc_info()
formatted = ''.join(traceback.format_exception(
e_type, e_value, e_tb))
return formatted
try:
output = weak_signature_call(method, **d)
except MintError, e:
tb = logTraceback()
err_name = sys.exc_info()[0].__name__
output = self._write("error", shortError = err_name, error = str(e),
traceback = self.cfg.debugMode and tb or None)
except fields.MissingParameterError, e:
tb = logTraceback()
output = self._write("error", shortError = "Missing Parameter", error = str(e))
except fields.BadParameterError, e:
tb = logTraceback()
output = self._write("error", shortError = "Bad Parameter", error = str(e),
traceback = self.cfg.debugMode and tb or None)
else:
self.response.last_modified = time.time()
self.response.body = output
self._clearAllMessages()
return self.response
def _getHandler(self):
self.baseUrl = self.req.application_url + '/'
self.httpsUrl = self.req.application_url.replace('http://', 'https://') + '/'
self.hostName = self.req.host.rsplit(':', 1)[0]
self.SITE = self.req.host + '/'
self.siteHost = self.cfg.siteHost
self.isOwner = self.userLevel == userlevels.OWNER or self.auth.admin
# Handle messages stashed in the session
self.infoMsg = self.session.setdefault('infoMsg', '')
self.searchType = self.session.setdefault('searchType', getProjectText().title()+"s")
self.searchTerms = ''
self.errorMsgList = self._getErrors()
# a set of information to be passed into the next handler
context = {
'auth': self.auth,
'authToken': self.auth.getToken(),
'client': self.client,
'cfg': self.cfg,
'db': self.db,
'fields': self.fields,
'req': self.req,
'response': self.response,
'session': self.session,
'siteHost': self.cfg.siteHost,
'searchType': self.searchType,
'searchTerms': '',
'toUrl': self.req.url,
'baseUrl': self.baseUrl,
'basePath': self.basePath,
'httpsUrl': self.httpsUrl,
'hostName': self.hostName,
'project': None,
'SITE': self.SITE,
'userLevel': self.userLevel,
'user': self.user,
'isOwner': self.isOwner,
'infoMsg': self.infoMsg,
'errorMsgList': self.errorMsgList,
'output': self.output,
'remoteIp': self.req.client_addr,
'reposShim': self.reposShim,
}
# match the requested url to the right url handler
for match, urlHandler in [
('admin', self.adminHandler),
('administer', self.adminHandler),
('unknownError', self.errorHandler),
]:
if self.req.path_info_peek() == match:
self.req.path_info_pop()
break
else:
urlHandler = self.siteHandler
context['cmd'] = self.req.path_info
return urlHandler.handle(context)
| {
"content_hash": "7ea04a41caea5ec070a353e38c725cf9",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 139,
"avg_line_length": 37.55232558139535,
"alnum_prop": 0.5647933116581514,
"repo_name": "sassoftware/mint",
"id": "1a2fcf7cb5e06e69de07884ce6917862b2db8626",
"size": "7046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mint/web/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50165"
},
{
"name": "Genshi",
"bytes": "58741"
},
{
"name": "HTML",
"bytes": "2814"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Makefile",
"bytes": "92418"
},
{
"name": "NASL",
"bytes": "582"
},
{
"name": "PLpgSQL",
"bytes": "5358"
},
{
"name": "Puppet",
"bytes": "17914"
},
{
"name": "Python",
"bytes": "3239135"
},
{
"name": "Ruby",
"bytes": "9268"
},
{
"name": "Shell",
"bytes": "24834"
}
],
"symlink_target": ""
} |
from __future__ import division
import os
import sys
import socket
import signal
import functools
import atexit
import tempfile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from time import sleep
try:
import simplejson as json
except ImportError:
import json
from .exceptions import CommandError, TimeoutWaitingFor
USED_PORTS = set()
ON_POSIX = 'posix' in sys.builtin_module_names
# Directory relative to basetest module location
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Location of binary files (usually the src/ folder)
BIN_PREFIX = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "..", "src")
)
# Default location of test certificates
DEFAULT_CERT_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_certs")
)
# Default location of test hooks
DEFAULT_HOOK_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_hooks")
)
# Environment flags to control skipping of task and taskd tests
TASKW_SKIP = os.environ.get("TASKW_SKIP", False)
TASKD_SKIP = os.environ.get("TASKD_SKIP", False)
# Environment flags to control use of PATH or in-tree binaries
TASK_USE_PATH = os.environ.get("TASK_USE_PATH", False)
TASKD_USE_PATH = os.environ.get("TASKD_USE_PATH", False)
UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}")
def task_binary_location(cmd="task"):
"""If TASK_USE_PATH is set rely on PATH to look for task binaries.
Otherwise ../src/ is used by default.
"""
return binary_location(cmd, TASK_USE_PATH)
def taskd_binary_location(cmd="taskd"):
"""If TASKD_USE_PATH is set rely on PATH to look for taskd binaries.
Otherwise ../src/ is used by default.
"""
return binary_location(cmd, TASKD_USE_PATH)
def binary_location(cmd, USE_PATH=False):
"""If USE_PATH is True rely on PATH to look for taskd binaries.
Otherwise ../src/ is used by default.
"""
if USE_PATH:
return cmd
else:
return os.path.join(BIN_PREFIX, cmd)
def wait_condition(cond, timeout=1, sleeptime=.01):
"""Wait for condition to return anything other than None
"""
# NOTE Increasing sleeptime can dramatically increase testsuite runtime
# It also reduces CPU load significantly
if timeout is None:
timeout = 1
if timeout < sleeptime:
print("Warning, timeout cannot be smaller than", sleeptime)
timeout = sleeptime
# Max number of attempts until giving up
tries = int(timeout / sleeptime)
for i in range(tries):
val = cond()
if val is not None:
break
sleep(sleeptime)
return val
def wait_process(pid, timeout=None):
"""Wait for process to finish
"""
def process():
try:
os.kill(pid, 0)
except OSError:
# Process is dead
return True
else:
# Process is still ticking
return None
return wait_condition(process, timeout)
def _queue_output(arguments, pidq, outputq):
"""Read/Write output/input of given process.
This function is meant to be executed in a thread as it may block
"""
kwargs = arguments["process"]
input = arguments["input"]
try:
proc = Popen(**kwargs)
except OSError as e:
# pid None is read by the main thread as a crash of the process
pidq.put(None)
outputq.put((
"",
("Unexpected exception caught during execution of taskw: '{0}' . "
"If you are running out-of-tree tests set TASK_USE_PATH=1 or "
"TASKD_USE_PATH=1 in shell env before execution and add the "
"location of the task(d) binary to the PATH".format(e)),
255)) # false exitcode
return
# Put the PID in the queue for main process to know.
pidq.put(proc.pid)
# Send input and wait for finish
out, err = proc.communicate(input)
if sys.version_info > (3,):
out, err = out.decode('utf-8'), err.decode('utf-8')
# Give the output back to the caller
outputq.put((out, err, proc.returncode))
def _retrieve_output(thread, timeout, queue, thread_error):
"""Fetch output from taskw subprocess queues
"""
# Try to join the thread on failure abort
thread.join(timeout)
if thread.isAlive():
# Join should have killed the thread. This is unexpected
raise TimeoutWaitingFor(thread_error + ". Unexpected error")
# Thread died so we should have output
try:
# data = (stdout, stderr, exitcode)
data = queue.get(timeout=timeout)
except Empty:
data = TimeoutWaitingFor("streams from TaskWarrior")
return data
def _get_output(arguments, timeout=None):
"""Collect output from the subprocess without blocking the main process if
subprocess hangs.
"""
# NOTE Increase this value if tests fail with None being received as
# stdout/stderr instead of the expected content
output_timeout = 0.1 # seconds
pidq = Queue()
outputq = Queue()
t = Thread(target=_queue_output, args=(arguments, pidq, outputq))
t.daemon = True
t.start()
try:
pid = pidq.get(timeout=timeout)
except Empty:
pid = None
# Process crashed or timed out for some reason
if pid is None:
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior to start")
# Wait for process to finish (normal execution)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior thread to join")
# If we reach this point we assume the process got stuck or timed out
for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):
# Start with lower signals and escalate if process ignores them
try:
os.kill(pid, signal.SIGABRT)
except OSError as e:
# 3 means the process finished/died between last check and now
if e.errno != 3:
raise
# Wait for process to finish (should die/exit after signal)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior to die")
# This should never happen but in case something goes really bad
raise OSError("TaskWarrior stopped responding and couldn't be killed")
def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,
merge_streams=False, env=os.environ, timeout=None):
"Run a subprocess and wait for it to finish"
if input is None:
stdin = None
else:
stdin = PIPE
if merge_streams:
stderr = STDOUT
else:
stderr = PIPE
arguments = {
"process": {
"args": cmd,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"bufsize": 1,
"close_fds": ON_POSIX,
"env": env,
},
"input": input,
}
out, err, exit = _get_output(arguments, timeout)
if merge_streams:
if exit != 0:
raise CommandError(cmd, exit, out)
else:
return exit, out
else:
if exit != 0:
raise CommandError(cmd, exit, out, err)
else:
return exit, out, err
def run_cmd_wait_nofail(*args, **kwargs):
"Same as run_cmd_wait but silence the exception if it happens"
try:
return run_cmd_wait(*args, **kwargs)
except CommandError as e:
return e.code, e.out, e.err
def get_IPs(hostname):
output = {}
addrs = socket.getaddrinfo(hostname, 0, 0, 0, socket.IPPROTO_TCP)
for family, socktype, proto, canonname, sockaddr in addrs:
addr = sockaddr[0]
output[family] = addr
return output
def port_used(addr="localhost", port=None):
"Return True if port is in use, False otherwise"
if port is None:
raise TypeError("Argument 'port' may not be None")
# If we got an address name, resolve it both to IPv6 and IPv4.
IPs = get_IPs(addr)
# Taskd seems to prefer IPv6 so we do it first
for family in (socket.AF_INET6, socket.AF_INET):
try:
addr = IPs[family]
except KeyError:
continue
s = socket.socket(family, socket.SOCK_STREAM)
result = s.connect_ex((addr, port))
s.close()
if result == 0:
# connection was successful
return True
else:
return False
def find_unused_port(addr="localhost", start=53589, track=True):
"""Find an unused port starting at `start` port
If track=False the returned port will not be marked as in-use and the code
will rely entirely on the ability to connect to addr:port as detection
mechanism. Note this may cause problems if ports are assigned but not used
immediately
"""
maxport = 65535
unused = None
for port in xrange(start, maxport):
if not port_used(addr, port):
if track and port in USED_PORTS:
continue
unused = port
break
if unused is None:
raise ValueError("No available port in the range {0}-{1}".format(
start, maxport))
if track:
USED_PORTS.add(unused)
return unused
def release_port(port):
"""Forget that given port was marked as'in-use
"""
try:
USED_PORTS.remove(port)
except KeyError:
pass
def memoize(obj):
"""Keep an in-memory cache of function results given it's inputs
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
try:
from shutil import which
which = memoize(which)
except ImportError:
# NOTE: This is shutil.which backported from python-3.3.3
@memoize
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def parse_datafile(file):
"""Parse .data files on the client and server treating files as JSON
"""
data = []
with open(file) as fh:
for line in fh:
line = line.rstrip("\n")
# Turn [] strings into {} to be treated properly as JSON hashes
if line.startswith('[') and line.endswith(']'):
line = '{' + line[1:-1] + '}'
if line.startswith("{"):
data.append(json.loads(line))
else:
data.append(line)
return data
def mkstemp(data):
"""
Create a temporary file that is removed at process exit
"""
def rmtemp(name):
try:
os.remove(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data)
f.close()
# Ensure removal at end of python session
atexit.register(rmtemp, f.name)
return f.name
def mkstemp_exec(data):
"""Create a temporary executable file that is removed at process exit
"""
name = mkstemp(data)
os.chmod(name, 0o755)
return name
# vim: ai sts=4 et sw=4
| {
"content_hash": "0be41311196cd2ec3d3a619665d8589a",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 79,
"avg_line_length": 28.804979253112034,
"alnum_prop": 0.6011956208585422,
"repo_name": "tbabej/task",
"id": "d41ac09ccc792f5dd4e33aaf8e554891ccfeebf8",
"size": "13908",
"binary": false,
"copies": "3",
"ref": "refs/heads/2.5.0",
"path": "test/basetest/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "567753"
},
{
"name": "C++",
"bytes": "1864645"
},
{
"name": "CMake",
"bytes": "17102"
},
{
"name": "Perl",
"bytes": "1100043"
},
{
"name": "Python",
"bytes": "902996"
},
{
"name": "Shell",
"bytes": "39090"
},
{
"name": "VimL",
"bytes": "12757"
}
],
"symlink_target": ""
} |
import sys
import contextlib
import click
from ..consts import DEFAULT_APP_PORT
from ...client import Client
import __main__
@contextlib.contextmanager
def _client_ctx(app_name, service, host, port):
pass
@click.command()
@click.option('-h', '--host', default='localhost',
help='service host(default localhost)')
@click.option('-p', '--port', default=DEFAULT_APP_PORT, help='service port')
@click.option('-r', '--rpc', default=False, is_flag=True,
help='if use rpc connect')
@click.option('--profile', default=False, is_flag=True,
help='Profile all apis calls')
def shell(host, port, rpc, profile):
"""
# run a client in ipython
"""
user_ns = __main__.__dict__
c = Client(host, port)
def embed_with_cli(client):
import IPython
del sys.exitfunc
user_ns.update(dict(c=client))
IPython.embed(user_ns=user_ns)
embed_with_cli(c)
| {
"content_hash": "b82b5f71104df841bdf1af4ba5837dd7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 26.742857142857144,
"alnum_prop": 0.6282051282051282,
"repo_name": "MrKiven/REST_ARCH",
"id": "ee1d7484252adae0280664d01d7c9a00ae58ee7d",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_arch/skt/cmds/shell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1945"
},
{
"name": "Makefile",
"bytes": "595"
},
{
"name": "Python",
"bytes": "69071"
}
],
"symlink_target": ""
} |
import os
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Everyone, Authenticated, Allow
from pyramid.session import SignedCookieSessionFactory
from passlib.apps import custom_app_context as context
class MyRoot(object):
"""Takes the request as an argument for initialization."""
def __init__(self, request):
self.request = request
__acl__ = [
(Allow, Everyone, 'view'),
(Allow, Authenticated, 'secret'),
]
def includeme(config):
"""Set up security config."""
auth_secret = os.environ.get('AUTH_SECRET', '')
authn_policy = AuthTktAuthenticationPolicy(
secret=auth_secret,
hashalg='sha512'
)
config.set_authentication_policy(authn_policy)
authz_policy = ACLAuthorizationPolicy()
config.set_authorization_policy(authz_policy)
config.set_root_factory(MyRoot)
session_secret = os.environ.get('SESSION_SECRET', '')
session_factory = SignedCookieSessionFactory(session_secret)
config.set_session_factory(session_factory)
config.set_default_csrf_options(require_csrf=True)
def check_credentials(username, password):
"""Check user and pass if valid."""
stored_username = os.environ.get('AUTH_USERNAME', '')
stored_password = os.environ.get('AUTH_PASSWORD', '')
is_authenticated = False
if stored_username and stored_password:
if username == stored_username:
if context.verify(password, stored_password):
is_authenticated = True
return is_authenticated
| {
"content_hash": "efd389cad3458c63b922f8ecc9073362",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 64,
"avg_line_length": 33.708333333333336,
"alnum_prop": 0.7033374536464772,
"repo_name": "Casey0Kane/pyramid-learning-journal",
"id": "859f0bafbbf2c9f37604513caddee2d3e8184eaa",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learning_journal/learning_journal/security.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3935"
},
{
"name": "JavaScript",
"bytes": "805"
},
{
"name": "Python",
"bytes": "23723"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
from ehb_client.requests.base import JsonRequestBase, RequestBase, IdentityBase
import json
from ehb_client.requests.exceptions import InvalidArguments
from ehb_client.requests.subject_request_handler import Subject
from ehb_client.requests.external_record_request_handler import ExternalRecord
class Group(IdentityBase):
def __init__(self, name, description, is_locking, client_key, ehb_key=None,
modified=None, created=None, id=-1):
'''
Represents a Group in the eHB system. Note that client_key is NEVER
provided by a response as the eHB only stores salted and hashed versions
of client_key. A value of client_key should be provided when
1. creating a new Group (required)
2. updating an existing Group (optional, not needed if keeping existing client_key)
When updating a Group using the request handler's update method it is
necessary to have called the 'current_client_key' method on this Group
'''
self.name = name
self.description = description
self.is_locking = is_locking
self.client_key = client_key
self.ehb_key = ehb_key
self.modified = modified
self.created = created
self._current_client_key = None
self.id = id
def current_client_key(self, key):
self._current_client_key = key
@staticmethod
def findIdentity(searchTermsDict, *identities):
gid = searchTermsDict.get('id')
if gid:
for grp in identities:
if grp.id == int(gid):
return grp
gname = searchTermsDict.get('name')
if gname:
for grp in identities:
if grp.name == gname:
return grp
identityLabel = 'group'
@staticmethod
def identity_from_json(groupJsonString):
return Group.identity_from_jsonObject(json.loads(groupJsonString))
@staticmethod
def identity_from_jsonObject(jsonObj):
n = jsonObj.get('name')
des = jsonObj.get('description')
lm = RequestBase.dateTimeFromJsonString(jsonObj.get('modified'))
c = RequestBase.dateTimeFromJsonString(jsonObj.get('created'))
gid = int(jsonObj.get('id'))
il = jsonObj.get('is_locking', False)
ek = jsonObj.get('ehb_key')
ck = None
return Group(name=n, description=des, is_locking=il, ehb_key=ek,
client_key=ck, modified=lm, created=c, id=gid)
@staticmethod
def json_from_identity(grp):
obj = {
'name': grp.name,
'description': grp.description,
'is_locking': grp.is_locking,
'client_key': grp.client_key,
}
if grp._current_client_key:
obj['current_client_key'] = grp._current_client_key
return json.dumps(obj)
class GroupRequestHandler(JsonRequestBase):
def __init__(self, host, root_path='', secure=False, api_key=None):
RequestBase.__init__(self, host, '{0}/api/group/'.format(root_path), secure, api_key)
def _process_by_id_or_name(self, func, **id_or_name):
gid = id_or_name.pop('id', None)
if gid:
path = self.root_path + '?id=' + str(gid)
return func(path)
gname = id_or_name.pop('name', None)
if gname:
path = self.root_path + '?name=' + gname
return func(path)
raise InvalidArguments('id OR name')
def get(self, **id_or_name):
def func(path):
return Group.identity_from_json(self.processGet(path))
return self._process_by_id_or_name(func, **id_or_name)
def get_subjects(self, group):
'''
Attempts to get all the subjects, if any associated with this group
'''
return self._get_x_in_group(group, Subject, '/subjects/')
def add_subjects(self, group, subjects):
'''
Attempts to add each subject to the group. These subjects must already be in the eHB
'''
return self._add_x_to_group(group, Subject, '/subjects/', subjects)
def remove_subject(self, group, subject):
'''
Attempts to remove subject from the group. The subject must already be in the eHB
'''
return self._remove_x_from_group(group, '/subjects/', subject)
def get_records(self, group):
'''
Attempts to get all the externalRecords, if any assoicated with this group
'''
return self._get_x_in_group(group, ExternalRecord, '/records/')
def add_records(self, group, external_records):
'''
Attempts to add each record to the group. These records must already be in the eHB
'''
return self._add_x_to_group(group, ExternalRecord, '/records/', external_records)
def remove_record(self, group, external_record):
'''
Attempts to remove external_record from the group. The ExternalRecord must arleady be in the eHB
'''
return self._remove_x_from_group(group, '/records/', external_record)
def _add_x_to_group(self, group, X, xpath, xs):
'''
Attempts to add each x from x of type X to the group. These xs must already be in the eHB
'''
ehb_service_path = self.root_path + 'id/' + str(group.id) + xpath
headers = {'Content-Type': 'application/json'}
if group.is_locking:
headers = {'GROUP-CLIENT-KEY': group.client_key, 'Content-Type': 'application/json'}
body = '['
for x in xs:
body += str(x.id) + ','
body = body[0:len(body) - 1] + ']'
response = self.processPost(ehb_service_path, body, headers)
return json.loads(response)
def _get_x_in_group(self, group, X, xpath):
'''
Attempts to get objects of type x, if any, associated with this group
'''
ehb_service_path = self.root_path + 'id/' + str(group.id) + xpath
headers = {'Accept': 'application/json'}
if group.is_locking:
headers = {'GROUP-CLIENT-KEY': group.client_key, 'Accept': 'application/json'}
response = self.processGet(ehb_service_path, headers)
return [X.identity_from_jsonObject(o) for o in json.loads(response)]
def _remove_x_from_group(self, group, xpath, x):
'''
Attempts to remove x of type X from the group. The x must already be in the eHB
'''
ehb_service_path = self.root_path + 'id/' + str(group.id) + xpath + 'id/' + str(x.id) + '/'
headers = {'Accept': 'application/json'}
if group.is_locking:
headers = {'GROUP-CLIENT-KEY': group.client_key, 'Accept': 'application/json'}
return self.processDelete(ehb_service_path, headers)
def delete(self, **kwargs):
'''
Delete a Group. kwargs MUST include the following:
client_key : current value of the Group's client_key
id : the Group's id
OR
name : the Group's name
'''
cck = kwargs.get('client_key')
if not cck:
raise InvalidArguments('client_key')
def func(path):
return self.processDelete(path, {'GROUP-CLIENT-KEY': cck, 'Accept': 'application/json'})
return self._process_by_id_or_name(func, **kwargs)
def create(self, *groups):
'''
Given an arbitrary number of Group objects, this method attempts to
create the groups in the eHB server database.
'''
def onSuccess(grp, o):
grp.id = int(o.get('id'))
grp.created = RequestBase.dateTimeFromJsonString(o.get('created'))
grp.modified = RequestBase.dateTimeFromJsonString(o.get('modified'))
grp.ehb_key = o.get('ehb_key')
return self.standardCreate(Group, onSuccess, *groups)
def update(self, *groups):
'''
This method will fail if 'current_client_key' method has not been
called on each Group.
If successful this method will update the current_client_key to the
value of client_key
'''
def onSuccess(g, o):
g.current_client_key(g.client_key)
g.modified = RequestBase.dateTimeFromJsonString(o.get('modified'))
return self.standardUpdate(Group, onSuccess, *groups)
| {
"content_hash": "f75d665959a479b083dda9144b0312b1",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 104,
"avg_line_length": 38.742990654205606,
"alnum_prop": 0.6046315281630684,
"repo_name": "chop-dbhi/ehb-client",
"id": "357caea0f2fb14167bb133a4325ceed14f6deb0e",
"size": "8291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ehb_client/requests/group_request_handler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "179924"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
class Algorithm(object):
__meta__ = ABCMeta
def __init__(self, data, cache_size):
self.data = data
self.cache_size = cache_size
@abstractmethod
def compute(self):
pass
| {
"content_hash": "602c1f219e20dc2aedcb31c6fcc7e2ca",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 41,
"avg_line_length": 19.384615384615383,
"alnum_prop": 0.6150793650793651,
"repo_name": "vtemian/uni-west",
"id": "3d6bd7087d3b295d609e9f8e8d566d66c1b2a5cc",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "second_year/os/exams/round2/paging/algorithms/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "59211"
},
{
"name": "C++",
"bytes": "67296"
},
{
"name": "CLIPS",
"bytes": "10727"
},
{
"name": "CMake",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "182870"
},
{
"name": "Java",
"bytes": "231703"
},
{
"name": "JavaScript",
"bytes": "1238"
},
{
"name": "Lua",
"bytes": "820"
},
{
"name": "Makefile",
"bytes": "419"
},
{
"name": "Python",
"bytes": "29914"
},
{
"name": "Racket",
"bytes": "31132"
},
{
"name": "Shell",
"bytes": "192"
}
],
"symlink_target": ""
} |
import typer
def main(file: typer.FileBinaryWrite = typer.Option(...)):
first_line_str = "some settings\n"
# You cannot write str directly to a binary file, you have to encode it to get bytes
first_line_bytes = first_line_str.encode("utf-8")
# Then you can write the bytes
file.write(first_line_bytes)
# This is already bytes, it starts with b"
second_line = b"la cig\xc3\xbce\xc3\xb1a trae al ni\xc3\xb1o"
file.write(second_line)
print("Binary file written")
if __name__ == "__main__":
typer.run(main)
| {
"content_hash": "77e728f5ed583e6b44cbb296069173d2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 88,
"avg_line_length": 32.1764705882353,
"alnum_prop": 0.6654478976234004,
"repo_name": "tiangolo/typer",
"id": "1fa3ae825c9f23b4e0ab3db5ab39759fe5de2f5f",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/parameter_types/file/tutorial004.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "380062"
},
{
"name": "Shell",
"bytes": "2257"
}
],
"symlink_target": ""
} |
"""FederatedData backed by SQLite."""
from typing import Callable, Iterable, Iterator, Optional, Tuple, List
import zlib
from fedjax.core import client_datasets
from fedjax.core import federated_data
from fedjax.core import serialization
import numpy as np
import sqlite3
def decompress_and_deserialize(data: bytes):
data = zlib.decompress(data)
return serialization.msgpack_deserialize(data)
class SQLiteFederatedData(federated_data.FederatedData):
"""Federated dataset backed by SQLite.
The SQLite database should contain a table named "federated_data" created with
the following command::
CREATE TABLE federated_data (
client_id BLOB NOT NULL PRIMARY KEY,
data BLOB NOT NULL,
num_examples INTEGER NOT NULL
);
where,
- `client_id` is the bytes client id.
- `data` is the serialized client dataset examples.
- `num_examples` is the number of examples in the client dataset.
By default we use zlib compressed msgpack blobs for `data` (see
decompress_and_deserialize()).
"""
@staticmethod
def new(
path: str,
parse_examples: Callable[
[bytes], client_datasets.Examples] = decompress_and_deserialize
) -> 'SQLiteFederatedData':
"""Opens a federated dataset stored as an SQLite3 database.
Args:
path: Path to the SQLite database file.
parse_examples: Function for deserializing client dataset examples.
Returns:
SQLite3DataSource.
"""
connection = sqlite3.connect(path)
return SQLiteFederatedData(connection, parse_examples)
def __init__(self,
connection: sqlite3.Connection,
parse_examples: Callable[[bytes], client_datasets.Examples],
start: Optional[federated_data.ClientId] = None,
stop: Optional[federated_data.ClientId] = None,
preprocess_client: federated_data
.ClientPreprocessor = federated_data.NoOpClientPreprocessor,
preprocess_batch: client_datasets
.BatchPreprocessor = client_datasets.NoOpBatchPreprocessor):
self._connection = connection
self._parse_examples = parse_examples
self._start = start
self._stop = stop
self._preprocess_client = preprocess_client
self._preprocess_batch = preprocess_batch
def slice(
self,
start: Optional[federated_data.ClientId] = None,
stop: Optional[federated_data.ClientId] = None) -> 'SQLiteFederatedData':
start, stop = federated_data.intersect_slice_ranges(self._start, self._stop,
start, stop)
return SQLiteFederatedData(self._connection, self._parse_examples, start,
stop, self._preprocess_client,
self._preprocess_batch)
def preprocess_client(
self, fn: Callable[[federated_data.ClientId, client_datasets.Examples],
client_datasets.Examples]
) -> 'SQLiteFederatedData':
return SQLiteFederatedData(self._connection, self._parse_examples,
self._start, self._stop,
self._preprocess_client.append(fn),
self._preprocess_batch)
def preprocess_batch(
self, fn: Callable[[client_datasets.Examples], client_datasets.Examples]
) -> 'SQLiteFederatedData':
return SQLiteFederatedData(self._connection, self._parse_examples,
self._start, self._stop, self._preprocess_client,
self._preprocess_batch.append(fn))
def _range_where(self) -> str:
"""Builds appropriate WHERE clauses for start/stop ranges."""
if self._start is None and self._stop is None:
return '(1)'
elif self._start is not None and self._stop is not None:
return '(:start <= client_id AND client_id < :stop)'
elif self._start is None:
return '(client_id < :stop)'
else:
return '(:start <= client_id)'
def num_clients(self) -> int:
cursor = self._connection.execute(
f'SELECT COUNT(*) FROM federated_data WHERE {self._range_where()};', {
'start': self._start,
'stop': self._stop
})
return cursor.fetchone()[0]
def client_ids(self) -> Iterator[federated_data.ClientId]:
cursor = self._connection.execute(
f'SELECT client_id FROM federated_data WHERE {self._range_where()} ORDER BY rowid;',
{
'start': self._start,
'stop': self._stop
})
while True:
result = cursor.fetchone()
if result is None:
break
yield result[0]
def client_sizes(self) -> Iterator[Tuple[federated_data.ClientId, int]]:
cursor = self._connection.execute(
f'SELECT client_id, num_examples FROM federated_data WHERE {self._range_where()} ORDER BY rowid;',
{
'start': self._start,
'stop': self._stop
})
while True:
result = cursor.fetchone()
if result is None:
break
yield tuple(result)
def client_size(self, client_id: federated_data.ClientId) -> int:
if ((self._start is None or self._start <= client_id) and
(self._stop is None or client_id < self._stop)):
cursor = self._connection.execute(
'SELECT num_examples FROM federated_data WHERE client_id = ?',
[client_id])
result = cursor.fetchone()
if result is not None:
return result[0]
raise KeyError
def clients(
self
) -> Iterator[Tuple[federated_data.ClientId, client_datasets.ClientDataset]]:
for k, v in self._read_clients():
yield k, self._client_dataset(k, v)
def _read_clients(self):
cursor = self._connection.execute(
f'SELECT client_id, data FROM federated_data WHERE {self._range_where()} ORDER BY rowid;',
{
'start': self._start,
'stop': self._stop
})
while True:
result = cursor.fetchone()
if result is None:
break
yield tuple(result)
def shuffled_clients(
self,
buffer_size: int,
seed: Optional[int] = None
) -> Iterator[Tuple[federated_data.ClientId, client_datasets.ClientDataset]]:
rng = np.random.RandomState(seed)
while True:
for k, v in client_datasets.buffered_shuffle(self._read_clients(),
buffer_size, rng):
yield k, self._client_dataset(k, v)
def get_clients(
self, client_ids: Iterable[federated_data.ClientId]
) -> Iterator[Tuple[federated_data.ClientId, client_datasets.ClientDataset]]:
for client_id in client_ids:
yield client_id, self.get_client(client_id)
def get_client(
self,
client_id: federated_data.ClientId) -> client_datasets.ClientDataset:
if ((self._start is None or self._start <= client_id) and
(self._stop is None or client_id < self._stop)):
cursor = self._connection.execute(
'SELECT data FROM federated_data WHERE client_id = ?', [client_id])
result = cursor.fetchone()
if result is not None:
return self._client_dataset(client_id, result[0])
raise KeyError
def _client_dataset(self, client_id: federated_data.ClientId,
data: bytes) -> client_datasets.ClientDataset:
examples = self._preprocess_client(client_id, self._parse_examples(data))
return client_datasets.ClientDataset(examples, self._preprocess_batch)
class SQLiteFederatedDataBuilder(federated_data.FederatedDataBuilder):
"""Builds SQLite files from a python dictionary containing an arbitrary mapping of client IDs to NumPy examples."""
def __init__(self, path: str):
"""Initializes SQLiteBuilder by opening a connection and setting up the database with columns.
Args:
path: Path of file to write to (e.g. /tmp/sqlite_federated_data.sqlite).
"""
self._connection = sqlite3.connect(path)
self._connection.execute(""" CREATE TABLE federated_data (
client_id BLOB NOT NULL PRIMARY KEY,
data BLOB NOT NULL,
num_examples INTEGER NOT NULL );""")
self._connection.commit()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self._connection.close()
def add_many(self,
client_ids_examples: Iterable[Tuple[bytes,
client_datasets.Examples]]):
def prepare_parameters(ce):
client_id = ce[0]
examples = ce[1]
num_examples = client_datasets.num_examples(examples, validate=True)
data = zlib.compress(serialization.msgpack_serialize(examples))
return client_id, data, num_examples
client_ids_datas_num_examples = map(prepare_parameters, client_ids_examples)
self._connection.executemany('INSERT INTO federated_data VALUES (?, ?, ?);',
client_ids_datas_num_examples)
self._connection.commit()
class TFFSQLiteClientsIterator:
"""Iterator over clients stored in TensorFlow Federated (TFF) SQLite tables.
TFF uses a "1 example per record" format, where each row in the SQLite table
is a key-value pair from the client id to a protocol buffer message of a
single example. A custom `parse_examples` function is thus necessary for
different datasets in order to parse different example formats.
The TFF. SQLite database should contain two tables named "examples" and
"client_metadata" created with the following command::
CREATE TABLE examples (
split_name TEXT NOT NULL,
client_id TEXT NOT NULL,
serialized_example_proto BLOB NOT NULL);
CREATE INDEX idx_examples_client_id
ON examples (client_id);
CREATE INDEX idx_examples_client_id_split
ON examples (split_name, client_id);
CREATE TABLE client_metadata (
client_id TEXT NOT NULL,
split_name TEXT NOT NULL,
num_examples INTEGER NOT NULL);
CREATE INDEX idx_metadata_client_id
ON client_metadata (client_id);
where,
- `split_name` is the split name (e.g. "train" or "test").
- `client_id` is the client id.
- `serialized_example_proto` is a single serialized `tf.train.Example`
protocol buffer message.
- `num_examples` is the number of examples in the client dataset.
"""
def __init__(self, path: str,
parse_examples: Callable[[List[bytes]],
client_datasets.Examples],
split_name: str):
self._connection = sqlite3.connect(path)
self._parse_examples = parse_examples
self._split_name = split_name
self._client_ids_cursor = self._connection.execute(
'SELECT client_id FROM client_metadata WHERE split_name = ? ORDER BY client_id;',
[split_name])
def __del__(self):
self._connection.close()
def __iter__(
self
) -> Iterator[Tuple[federated_data.ClientId, client_datasets.ClientDataset]]:
return self
def __next__(
self) -> Tuple[federated_data.ClientId, client_datasets.ClientDataset]:
client_ids_result = self._client_ids_cursor.fetchone()
if client_ids_result is None:
raise StopIteration
client_id = client_ids_result[0]
examples_cursor = self._connection.execute(
'SELECT serialized_example_proto FROM examples WHERE split_name = ? AND client_id = ? ORDER BY rowid;',
[self._split_name, client_id])
examples = [r[0] for r in examples_cursor.fetchall()]
return client_id, client_datasets.ClientDataset(
self._parse_examples(examples))
| {
"content_hash": "7af53ce179d101dd4e3e28f35b71d661",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 117,
"avg_line_length": 36.19749216300941,
"alnum_prop": 0.6417251234086776,
"repo_name": "google/fedjax",
"id": "caec2883cd1114da3938caeb7c132361a0675bc3",
"size": "12122",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fedjax/core/sqlite_federated_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "64566"
},
{
"name": "Python",
"bytes": "596793"
},
{
"name": "Shell",
"bytes": "6541"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
from numba import cuda
import unittest
try:
import git # noqa: F401 from gitpython package
except ImportError:
has_gitpython = False
else:
has_gitpython = True
try:
import yaml # from pyyaml package
except ImportError:
has_pyyaml = False
else:
has_pyyaml = True
class TestCase(unittest.TestCase):
"""These test cases are meant to test the Numba test infrastructure itself.
Therefore, the logic used here shouldn't use numba.testing, but only the
upstream unittest, and run the numba test suite only in a subprocess."""
def get_testsuite_listing(self, args, *, subp_kwargs=None):
"""
Use `subp_kwargs` to pass extra argument to `subprocess.check_output`.
"""
subp_kwargs = subp_kwargs or {}
cmd = [sys.executable, '-m', 'numba.runtests', '-l'] + list(args)
out_bytes = subprocess.check_output(cmd, **subp_kwargs)
lines = out_bytes.decode('UTF-8').splitlines()
lines = [line for line in lines if line.strip()]
return lines
def check_listing_prefix(self, prefix):
listing = self.get_testsuite_listing([prefix])
for ln in listing[:-1]:
errmsg = '{!r} not startswith {!r}'.format(ln, prefix)
self.assertTrue(ln.startswith(prefix), msg=errmsg)
def check_testsuite_size(self, args, minsize):
"""
Check that the reported numbers of tests are at least *minsize*.
"""
lines = self.get_testsuite_listing(args)
last_line = lines[-1]
self.assertTrue('tests found' in last_line)
number = int(last_line.split(' ')[0])
# There may be some "skipped" messages at the beginning,
# so do an approximate check.
self.assertIn(len(lines), range(number + 1, number + 10))
self.assertGreaterEqual(number, minsize)
return lines
def check_all(self, ids):
lines = self.check_testsuite_size(ids, 5000)
# CUDA should be included by default
self.assertTrue(any('numba.cuda.tests.' in line for line in lines))
# As well as subpackage
self.assertTrue(any('numba.tests.npyufunc.test_' in line
for line in lines),)
def test_default(self):
self.check_all([])
def test_all(self):
self.check_all(['numba.tests'])
def test_cuda(self):
# Even without CUDA enabled, there is at least one test
# (in numba.cuda.tests.nocuda)
minsize = 100 if cuda.is_available() else 1
self.check_testsuite_size(['numba.cuda.tests'], minsize)
@unittest.skipIf(not cuda.is_available(), "NO CUDA")
def test_cuda_submodules(self):
self.check_listing_prefix('numba.cuda.tests.cudadrv')
self.check_listing_prefix('numba.cuda.tests.cudapy')
self.check_listing_prefix('numba.cuda.tests.nocuda')
self.check_listing_prefix('numba.cuda.tests.cudasim')
def test_module(self):
self.check_testsuite_size(['numba.tests.test_storeslice'], 2)
self.check_testsuite_size(['numba.tests.test_nested_calls'], 10)
# Several modules
self.check_testsuite_size(['numba.tests.test_nested_calls',
'numba.tests.test_storeslice'], 12)
def test_subpackage(self):
self.check_testsuite_size(['numba.tests.npyufunc'], 50)
def test_random(self):
self.check_testsuite_size(
['--random', '0.1', 'numba.tests.npyufunc'], 5)
def test_include_exclude_tags(self):
def get_count(arg_list):
lines = self.get_testsuite_listing(arg_list)
self.assertIn('tests found', lines[-1])
count = int(lines[-1].split()[0])
self.assertTrue(count > 0)
return count
tags = ['long_running', 'long_running, important']
for tag in tags:
total = get_count(['numba.tests'])
included = get_count(['--tags', tag, 'numba.tests'])
excluded = get_count(['--exclude-tags', tag, 'numba.tests'])
self.assertEqual(total, included + excluded)
# check syntax with `=` sign in
total = get_count(['numba.tests'])
included = get_count(['--tags=%s' % tag, 'numba.tests'])
excluded = get_count(['--exclude-tags=%s' % tag, 'numba.tests'])
self.assertEqual(total, included + excluded)
def test_check_slice(self):
tmp = self.get_testsuite_listing(['-j','0,5,1'])
l = [x for x in tmp if x.startswith('numba.')]
self.assertEqual(len(l), 5)
def test_check_slicing_equivalent(self):
def filter_test(xs):
return [x for x in xs if x.startswith('numba.')]
full = filter_test(self.get_testsuite_listing([]))
sliced = []
for i in range(3):
subset = self.get_testsuite_listing(['-j', '{},None,3'.format(i)])
sliced.extend(filter_test(subset))
# The tests must be equivalent
self.assertEqual(sorted(full), sorted(sliced))
@unittest.skipUnless(has_gitpython, "Requires gitpython")
def test_gitdiff(self):
# Check for git
try:
subprocess.call("git",
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except FileNotFoundError:
self.skipTest("no git available")
# default
outs = self.get_testsuite_listing(['-g'])
self.assertNotIn("Git diff by common ancestor", outs)
# using ancestor
outs = self.get_testsuite_listing(['-g=ancestor'])
self.assertIn("Git diff by common ancestor", outs)
# misspelled ancestor
subp_kwargs = dict(stderr=subprocess.DEVNULL)
with self.assertRaises(subprocess.CalledProcessError):
self.get_testsuite_listing(['-g=ancest'], subp_kwargs=subp_kwargs)
@unittest.skipUnless(has_pyyaml, "Requires pyyaml")
def test_azure_config(self):
from yaml import Loader
base_path = os.path.dirname(os.path.abspath(__file__))
azure_pipe = os.path.join(base_path, '..', '..', 'azure-pipelines.yml')
with open(os.path.abspath(azure_pipe), 'rt') as f:
data = f.read()
pipe_yml = yaml.load(data, Loader=Loader)
templates = pipe_yml['jobs']
# first look at the items in the first two templates, this is osx/linux
start_indexes = []
for tmplt in templates[:2]:
matrix = tmplt['parameters']['matrix']
for setup in matrix.values():
start_indexes.append(setup['TEST_START_INDEX'])
# next look at the items in the windows only template
winpath = ['..', '..', 'buildscripts', 'azure', 'azure-windows.yml']
azure_windows = os.path.join(base_path, *winpath)
with open(os.path.abspath(azure_windows), 'rt') as f:
data = f.read()
windows_yml = yaml.load(data, Loader=Loader)
# There's only one template in windows and its keyed differently to the
# above, get its matrix.
matrix = windows_yml['jobs'][0]['strategy']['matrix']
for setup in matrix.values():
start_indexes.append(setup['TEST_START_INDEX'])
# sanity checks
# 1. That the TEST_START_INDEX is unique
self.assertEqual(len(start_indexes), len(set(start_indexes)))
# 2. That the TEST_START_INDEX is a complete range
lim_start_index = max(start_indexes) + 1
expected = [*range(lim_start_index)]
self.assertEqual(sorted(start_indexes), expected)
# 3. That the number of indexes matches the declared test count
self.assertEqual(lim_start_index, pipe_yml['variables']['TEST_COUNT'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4fda8e3d5c864ac9998864bf1cba7d2b",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 79,
"avg_line_length": 39.01990049751244,
"alnum_prop": 0.6033405584597731,
"repo_name": "stonebig/numba",
"id": "e99bd486d76fb4f2cf363a036ea65a8117586bfa",
"size": "7843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/tests/test_runtests.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "228078"
},
{
"name": "C++",
"bytes": "18847"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "2965893"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
import logging
#import logging.config
from logging.handlers import RotatingFileHandler
import sys
#logging.config.fileConfig('logging.conf')
LOG_FILENAME = "/home/tinyos/devel/BerePi/logs/berelogger.log"
#LOG_FILENAME = "/Users/tinyos/devel/BerePi/logs/berelogger.log"
logger = logging.getLogger('BereLogger')
logger.setLevel(logging.DEBUG)
# Choose TimeRoatatingFileHandler or RotatingFileHandler
#handler = logging.handlers.TimedRotatingFileHandler(filename=LOG_FILENAME, when="midnight", interval=1, encoding="utf-8")
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, mode='a', maxBytes=200000, backupCount=9)
handler.formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger.addHandler(handler)
# API for outside
def berelog(msg_name, value=None):
print ("logging to", LOG_FILENAME, 'log file name')
if (value == None):
logger.info(msg_name )
elif (value != None):
logger.info(msg_name + ' ==> ' + value)
def args_proc():
num_of_args = len(sys.argv)
if num_of_args < 2:
print('current number of args --> ', num_of_args )
exit("[bye] you have to write input args ")
arg=[0 for i in range(num_of_args)]
for loop_num in range(num_of_args):
#print ('##loop_num :', loop_num)
#print (arg[loop_num])
arg[loop_num] = sys.argv[loop_num]
return arg
if __name__ == "__main__":
args = args_proc()
berelog('logging cpu temp', args[1])
# 'application' code
#logger.debug('debug message')
#logger.info('info message')
#logger.warn('warn message')
#logger.error('error message')
#logger.critical('critical message')
"""
if you want to use, this berepi_logger
import logging, and use berelog('*****')
"""
'''
To do:
LOG_FILENAME has to have sensor name on the file name
'''
| {
"content_hash": "7d20a06664fb2c634f2c995bdb72ec14",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 122,
"avg_line_length": 29.092307692307692,
"alnum_prop": 0.6583818085668959,
"repo_name": "jeonghoonkang/BerePi",
"id": "dedfc984dcbf39ba7eb26058b4d6abca9623be30",
"size": "1959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/logger/berepi_logger.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "122"
},
{
"name": "C",
"bytes": "5328"
},
{
"name": "CSS",
"bytes": "12303"
},
{
"name": "Dockerfile",
"bytes": "175"
},
{
"name": "HTML",
"bytes": "24280"
},
{
"name": "Java",
"bytes": "2177"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Jupyter Notebook",
"bytes": "933789"
},
{
"name": "Makefile",
"bytes": "111"
},
{
"name": "PHP",
"bytes": "893"
},
{
"name": "Python",
"bytes": "456082"
},
{
"name": "Roff",
"bytes": "415"
},
{
"name": "Shell",
"bytes": "57370"
},
{
"name": "Vim Script",
"bytes": "26865"
},
{
"name": "Visual Basic .NET",
"bytes": "923"
}
],
"symlink_target": ""
} |
import itertools
import unittest
from distutils.version import StrictVersion
import numpy as np
import pandas as pd
import pytest
from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION
from coremltools.models.utils import (_is_macos, _macos_version,
evaluate_classifier)
if _HAS_SKLEARN:
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestClassifier
from coremltools.converters import sklearn as skl_converter
@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.")
class RandomForestClassificationBostonHousingScikitNumericTest(unittest.TestCase):
def _check_metrics(self, metrics, params={}):
self.assertEqual(
metrics["num_errors"],
0,
msg="Failed case %s. Results %s" % (params, metrics),
)
def _train_convert_evaluate_assert(self, **scikit_params):
scikit_model = RandomForestClassifier(random_state=1, **scikit_params)
scikit_model.fit(self.X, self.target)
# Convert the model
spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name)
if _is_macos() and _macos_version() >= (10, 13):
# Get predictions
df = pd.DataFrame(self.X, columns=self.feature_names)
df["target"] = scikit_model.predict(self.X)
# Evaluate it
metrics = evaluate_classifier(spec, df, verbose=False)
self._check_metrics(metrics, scikit_params)
@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.")
class RandomForestBinaryClassifierBostonHousingScikitNumericTest(
RandomForestClassificationBostonHousingScikitNumericTest
):
@classmethod
def setUpClass(self):
"""
Set up the unit test by loading the dataset and training a model.
"""
# Load data and train model
scikit_data = load_boston()
self.X = scikit_data.data.astype("f").astype(
"d"
) ## scikit-learn downcasts data
self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean())
self.feature_names = scikit_data.feature_names
self.output_name = "target"
self.scikit_data = scikit_data
def test_simple_binary_classifier(self):
self._train_convert_evaluate_assert(max_depth=13)
@pytest.mark.slow
def test_binary_classifier_stress_test(self):
options = dict(
n_estimators=[1, 5, 10],
max_depth=[1, 5, None],
min_samples_split=[2, 10, 0.5],
min_samples_leaf=[1, 5],
min_weight_fraction_leaf=[0.0, 0.5],
max_leaf_nodes=[None, 20],
)
if _SKLEARN_VERSION >= StrictVersion("0.19"):
options["min_impurity_decrease"] = [1e-07, 0.1]
# Make a cartesian product of all options
product = itertools.product(*options.values())
args = [dict(zip(options.keys(), p)) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for it, arg in enumerate(args):
self._train_convert_evaluate_assert(**arg)
@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.")
class RandomForestMultiClassClassificationBostonHousingScikitNumericTest(
RandomForestClassificationBostonHousingScikitNumericTest
):
@classmethod
def setUpClass(self):
# Load data and train model
scikit_data = load_boston()
self.X = scikit_data.data.astype("f").astype(
"d"
) ## scikit-learn downcasts data
t = scikit_data.target
num_classes = 3
target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1
# Save the data and the model
self.scikit_data = scikit_data
self.target = target
self.feature_names = scikit_data.feature_names
self.output_name = "target"
def test_simple_multiclass(self):
self._train_convert_evaluate_assert()
@pytest.mark.slow
def test_multiclass_stress_test(self):
options = dict(
n_estimators=[1, 5, 10],
max_depth=[1, 5, None],
min_samples_split=[2, 10, 0.5],
min_samples_leaf=[1, 5],
min_weight_fraction_leaf=[0.0, 0.5],
max_leaf_nodes=[None, 20],
)
if _SKLEARN_VERSION >= StrictVersion("0.19"):
options["min_impurity_decrease"] = [1e-07, 0.1]
# Make a cartesian product of all options
product = itertools.product(*options.values())
args = [dict(zip(options.keys(), p)) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for it, arg in enumerate(args):
self._train_convert_evaluate_assert(**arg)
| {
"content_hash": "9f6840fb0547f24fd9ae3c90e80734a6",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 88,
"avg_line_length": 35.580882352941174,
"alnum_prop": 0.623269270510436,
"repo_name": "apple/coremltools",
"id": "b1be9b5440f5f63119f3f330bc5a29df79a8b1db",
"size": "5055",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "79917"
},
{
"name": "C++",
"bytes": "1420033"
},
{
"name": "CMake",
"bytes": "20418"
},
{
"name": "Makefile",
"bytes": "4258"
},
{
"name": "Mustache",
"bytes": "2676"
},
{
"name": "Objective-C",
"bytes": "4061"
},
{
"name": "Objective-C++",
"bytes": "28933"
},
{
"name": "Python",
"bytes": "5004520"
},
{
"name": "Shell",
"bytes": "19662"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
from django.urls import reverse
from rest_framework import status
from mezzanine.blog.models import BlogCategory
from tests.utils import TestCase
class TestCategoryViewSet(TestCase):
"""
Test the API resources for categories
"""
def setUp(self):
"""
Setup the tests
Create a category for API retrieval testing
"""
super(TestCategoryViewSet, self).setUp()
self.category = BlogCategory.objects.create(title='Fitness')
def tearDown(self):
"""
Clean up after the tests
"""
self.category.delete()
def test_list(self):
"""
Test API list
"""
url = reverse('blogcategory-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_retrieve(self):
"""
Test API retrieve
"""
url = '/api/categories/{}'.format(self.category.pk)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['title'], self.category.title)
def test_create_as_superuser_token(self):
"""
Test API POST CREATE whilst authenticated via OAuth2 as a superuser
"""
post_data = {'title': 'my new category 1'}
url = '/api/categories'
response = self.client.post(url, post_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(BlogCategory.objects.get(pk=response.data['id']).title, post_data['title'])
def test_create_as_superuser(self):
"""
Test API POST CREATE whilst authenticated as a superuser
"""
post_data = {'title': 'my new category 2'}
url = '/api/categories'
self.client.force_authenticate(user=self.superuser)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(BlogCategory.objects.get(pk=response.data['id']).title, post_data['title'])
def test_create_as_user(self):
"""
Test API POST CREATE whilst authenticated as a standard user
"""
post_data = {'title': 'my category'}
url = '/api/categories'
self.client.force_authenticate(user=self.user)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_as_guest(self):
"""
Test API POST CREATE whilst unauthenticated as a guest
"""
post_data = {'title': 'my category'}
url = '/api/categories'
self.client.force_authenticate(user=None)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_as_superuser_token(self):
"""
Test API PUT UPDATE whilst authenticated via OAuth2 as a superuser
"""
put_data = {'title': 'my updated category'}
url = '/api/categories/{}'.format(self.category.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(BlogCategory.objects.get(pk=self.category.pk).title, put_data['title'])
def test_update_as_user(self):
"""
Test API PUT UPDATE whilst authenticated as a standard user
"""
put_data = {'title': 'my updated category'}
url = '/api/categories/{}'.format(self.category.pk)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_as_guest(self):
"""
Test API PUT UPDATE whilst unauthenticated as a guest
"""
put_data = {'title': 'my updated category'}
url = '/api/categories/{}'.format(self.category.pk)
self.client.force_authenticate(user=None)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| {
"content_hash": "7822a68e14095bf773dff10987b6dc76",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 102,
"avg_line_length": 36.4390243902439,
"alnum_prop": 0.6329763498438197,
"repo_name": "gcushen/mezzanine-api",
"id": "18bea6c60ee4709df27c23aeef2e4008e587511f",
"size": "4482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_category.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "3929"
},
{
"name": "Python",
"bytes": "69248"
}
],
"symlink_target": ""
} |
import collections
import datetime
import random
import re
from dashie_sampler import DashieSampler
import requests
class JenkinsSampler(DashieSampler):
JOBS_KEY = ['name']
STATUS_KEY = ['color']
SEVERITY_MAP = {
'red': '1',
'notbuilt': '2',
'blue_anime': '11',
'blue': '11',
'disabled': '5',
'yellow': '6',
'red_anime': '7',
'aborted':'9',
'yellow_anime': '6',
}
SEVERITY_LABEL_MAP = {
'red': 'Failed',
'notbuilt': 'Not Built',
'blue_anime': 'Building',
'blue': 'Built',
'disabled': 'Disabled',
'yellow': 'Unstable',
'red_anime':'Failed-In Progress',
'notbuilt_anime' : 'Not Built-In Progress',
'aborted' : 'Aborted',
'yellow_anime': 'Unstable-In Progress',
}
JOB_FILTER = ''
def name(self):
return 'jenkins'
def __init__(self, *args, **kwargs):
DashieSampler.__init__(self, *args, **kwargs)
def _findByKey(self, val, keys):
if len(keys) == 0:
return val
return self._findByKey(val[keys[0]], keys[1:])
def _jobFilter(self, job):
jobName = self._findByKey(job, self.JOBS_KEY)
return self.JOB_FILTER in jobName
def _parseRequest(self, json):
status = self._findByKey(json, self.STATUS_KEY)
jobName = self._findByKey(json, self.JOBS_KEY)
return {
'label': self.SEVERITY_LABEL_MAP[status],
'value': self._findByKey(json, self.JOBS_KEY),
'importanceLabel': self.SEVERITY_LABEL_MAP[status],
'importanceValue': self.SEVERITY_MAP[status],
}
def sample(self):
r = requests.get('http://nhss-aux.bjss.co.uk:8080/view/Main%20Builds/api/json?pretty=true', auth=('pi.dashboard', 'vertebrae'))
jobs = r.json()['jobs']
return {'items': [self._parseRequest(job) for job in jobs if self._jobFilter(job)]} | {
"content_hash": "27c1feabf096f000161db7ed5ff19d0d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 135,
"avg_line_length": 26.766233766233768,
"alnum_prop": 0.5327510917030568,
"repo_name": "edhiley/pydashie",
"id": "362244d6873c9585c9e712fd5178efe08123a920",
"size": "2061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydashie/notcurrentlyused/jenkins_all_main_priority_list.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50959"
},
{
"name": "CoffeeScript",
"bytes": "10251"
},
{
"name": "HTML",
"bytes": "11619"
},
{
"name": "JavaScript",
"bytes": "557243"
},
{
"name": "Python",
"bytes": "58200"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from traits.api import Any, Bool
from pychron.envisage.tasks.base_editor import BaseTraitsEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
class LaserEditor(BaseTraitsEditor):
component = Any
_execute_thread = None
was_executed = False
_laser_manager = Any
completed = Bool(False)
def stop(self):
pass
def do_execute(self, lm):
self.completed = False
self._laser_manager = lm
return self._do_execute()
def _do_execute(self):
pass
def block(self):
if self._execute_thread:
self._execute_thread.join()
self.completed = True
# ============= EOF =============================================
| {
"content_hash": "92aab2461247fd4d0c6f91f0cc872c60",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 65,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.5311750599520384,
"repo_name": "NMGRL/pychron",
"id": "ffa6179ac61a6a0660521f009d78da53e0ac489a",
"size": "1636",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/lasers/tasks/editors/laser_editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
import http.server
import socketserver
import argparse
import sys
import os
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', dest='port', default=8080, type=int, help='Serving port.')
parser.add_argument('-m', '--make', dest='make', default=True, action='store_true', help='Run the makefile.')
port = parser.parse_args().port
if __name__ == '__main__':
dirname = os.path.dirname(sys.argv[0])
docpath = os.path.realpath(os.path.join(dirname, '../', 'docs'))
os.chdir(docpath)
print('Chaning directory to {}'.format(docpath))
os.system('make html')
docpath = os.path.realpath(os.path.join(dirname, '../', 'docs', '_build', 'html'))
os.chdir(docpath)
print('Chaning directory to {}'.format(docpath))
Handler = http.server.SimpleHTTPRequestHandler
print('Serving at port {}.'.format(port))
httpd = socketserver.TCPServer(("", port), Handler)
httpd.serve_forever()
| {
"content_hash": "0be90db97e252fad5a9f306421056479",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 109,
"avg_line_length": 33.57142857142857,
"alnum_prop": 0.6627659574468086,
"repo_name": "vacancy/TensorArtist",
"id": "1794447fd56c312c7fe83928f5b5f9595332dd52",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/docs-server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "497134"
},
{
"name": "Shell",
"bytes": "630"
}
],
"symlink_target": ""
} |
__version__ = "1.0.0"
"""
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
"""
| {
"content_hash": "ee842022d20738f8daaaf2493433d0b4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 59,
"avg_line_length": 24.75,
"alnum_prop": 0.5909090909090909,
"repo_name": "nmante/image_deduplication",
"id": "ef50b94b7c5be23d16aa867b45a45e97616fa20e",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedup/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "623"
},
{
"name": "Python",
"bytes": "31546"
},
{
"name": "Shell",
"bytes": "1641"
}
],
"symlink_target": ""
} |
import sys
import unittest
from supervisor.web import MeldView, StatusView
from supervisor.xmlrpc import RPCError
from unittest.mock import call, patch, Mock
from supvisors.tests.base import ProcessInfoDatabase
class ViewProcAddressTest(unittest.TestCase):
""" Test case for the viewprocaddress module. """
def setUp(self):
""" Create a logger that stores log traces. """
# apply the forced inheritance done in supvisors.plugin
from supvisors.tests.base import DummyHttpContext
from supvisors.viewhandler import ViewHandler
from supervisor.web import StatusView
StatusView.__bases__ = (ViewHandler,)
# create the instance to be tested
from supvisors.viewprocaddress import ProcAddressView
self.view = ProcAddressView(DummyHttpContext('ui/procaddress.html'))
def test_init(self):
""" Test the values set at construction. """
# test instance inheritance
from supvisors.viewhandler import ViewHandler
from supvisors.viewsupstatus import SupvisorsAddressView
from supvisors.webutils import PROC_ADDRESS_PAGE
for klass in [SupvisorsAddressView, StatusView, ViewHandler, MeldView]:
self.assertIsInstance(self.view, klass)
# test default page name
self.assertEqual(PROC_ADDRESS_PAGE, self.view.page_name)
@patch('supvisors.viewhandler.ViewHandler.write_process_statistics')
@patch('supvisors.viewprocaddress.ProcAddressView.write_process_table')
@patch('supvisors.viewprocaddress.ProcAddressView.get_process_data',
side_effect=([{'namespec': 'dummy'}], [{'namespec': 'dummy'}],
[{'namespec': 'dummy_proc'}], [{'namespec': 'dummy_proc'}]))
def test_write_contents(self, mocked_data, mocked_table, mocked_stats):
""" Test the write_contents method. """
from supvisors.viewcontext import PROCESS
# patch context
self.view.view_ctx = Mock(parameters={PROCESS: None}, local_address='10.0.0.1',
**{'get_process_status.return_value': None})
# patch the meld elements
mocked_root = Mock()
# test call with no process selected
self.view.write_contents(mocked_root)
self.assertEqual([call()], mocked_data.call_args_list)
self.assertEqual([call(mocked_root, [{'namespec': 'dummy'}])], mocked_table.call_args_list)
self.assertEqual([call(mocked_root, {})], mocked_stats.call_args_list)
mocked_data.reset_mock()
mocked_table.reset_mock()
mocked_stats.reset_mock()
# test call with process selected and no corresponding status
self.view.view_ctx.parameters[PROCESS] = 'dummy_proc'
self.view.write_contents(mocked_root)
self.assertEqual([call()], mocked_data.call_args_list)
self.assertEqual([call(mocked_root, [{'namespec': 'dummy'}])], mocked_table.call_args_list)
self.assertEqual('', self.view.view_ctx.parameters[PROCESS])
self.assertEqual([call(mocked_root, {})], mocked_stats.call_args_list)
mocked_data.reset_mock()
mocked_table.reset_mock()
mocked_stats.reset_mock()
# test call with process selected but not running on considered address
self.view.view_ctx.parameters[PROCESS] = 'dummy_proc'
self.view.view_ctx.get_process_status.return_value = Mock(addresses={'10.0.0.2'})
self.view.write_contents(mocked_root)
self.assertEqual([call()], mocked_data.call_args_list)
self.assertEqual([call(mocked_root, [{'namespec': 'dummy_proc'}])], mocked_table.call_args_list)
self.assertEqual('', self.view.view_ctx.parameters[PROCESS])
self.assertEqual([call(mocked_root, {})], mocked_stats.call_args_list)
mocked_data.reset_mock()
mocked_table.reset_mock()
mocked_stats.reset_mock()
# test call with process selected and running
self.view.view_ctx.parameters[PROCESS] = 'dummy_proc'
self.view.view_ctx.get_process_status.return_value = Mock(addresses={'10.0.0.1'})
self.view.write_contents(mocked_root)
self.assertEqual([call()], mocked_data.call_args_list)
self.assertEqual([call(mocked_root, [{'namespec': 'dummy_proc'}])], mocked_table.call_args_list)
self.assertEqual('dummy_proc', self.view.view_ctx.parameters[PROCESS])
self.assertEqual([call(mocked_root, {'namespec': 'dummy_proc'})], mocked_stats.call_args_list)
@patch('supvisors.viewhandler.ViewHandler.sort_processes_by_config',
return_value=['process_2', 'process_1'])
def test_get_process_data(self, mocked_sort):
""" Test the get_process_data method. """
# patch context
process_status = Mock(rules=Mock(expected_loading=17))
self.view.view_ctx = Mock(local_address='10.0.0.1',
**{'get_process_status.side_effect': [None, process_status],
'get_process_stats.side_effect': [(2, 'stats #1'), (8, 'stats #2')]})
# test RPC Error
with patch.object(self.view.info_source.supervisor_rpc_interface, 'getAllProcessInfo',
side_effect=RPCError('failed RPC')):
self.assertEqual([], self.view.get_process_data())
# test using base process info
def process_info_by_name(name):
return next((info.copy() for info in ProcessInfoDatabase
if info['name'] == name), {})
with patch.object(self.view.info_source.supervisor_rpc_interface, 'getAllProcessInfo',
return_value=[process_info_by_name('xfontsel'),
process_info_by_name('segv')]):
self.assertEqual(['process_2', 'process_1'], self.view.get_process_data())
# test intermediate list
data1 = {'application_name': 'sample_test_1',
'process_name': 'xfontsel',
'namespec': 'sample_test_1:xfontsel',
'address': '10.0.0.1',
'statename': 'RUNNING',
'statecode': 20,
'description': 'pid 80879, uptime 0:01:19',
'loading': '?',
'nb_cores': 2,
'proc_stats': 'stats #1'}
data2 = {'application_name': 'crash',
'process_name': 'segv',
'namespec': 'crash:segv',
'address': '10.0.0.1',
'statename': 'BACKOFF',
'statecode': 30,
'description': 'Exited too quickly (process log may have details)',
'loading': 17,
'nb_cores': 8,
'proc_stats': 'stats #2'}
self.assertEqual(1, mocked_sort.call_count)
self.assertEqual(2, len(mocked_sort.call_args_list[0]))
# access to internal call data
call_data = mocked_sort.call_args_list[0][0][0]
self.assertDictEqual(data1, call_data[0])
self.assertDictEqual(data2, call_data[1])
@patch('supvisors.viewprocaddress.ProcAddressView.write_process')
@patch('supvisors.viewhandler.ViewHandler.write_common_process_status',
side_effect=[True, False, False])
def test_write_process_table(self, mocked_common, mocked_process):
""" Test the write_process_table method. """
# patch the meld elements
table_mid = Mock()
tr_elt_1 = Mock(attrib={'class': ''})
tr_elt_2 = Mock(attrib={'class': ''})
tr_elt_3 = Mock(attrib={'class': ''})
tr_mid = Mock(**{'repeat.return_value': [(tr_elt_1, 'info_1'),
(tr_elt_2, 'info_2'),
(tr_elt_3, 'info_3')]})
mocked_root = Mock(**{'findmeld.side_effect': [table_mid, tr_mid]})
# test call with no data
self.view.write_process_table(mocked_root, {})
self.assertEqual([call('No programs to manage')], table_mid.replace.call_args_list)
self.assertEqual([], mocked_common.replace.call_args_list)
self.assertEqual([], mocked_process.replace.call_args_list)
self.assertEqual('', tr_elt_1.attrib['class'])
self.assertEqual('', tr_elt_2.attrib['class'])
self.assertEqual('', tr_elt_3.attrib['class'])
table_mid.replace.reset_mock()
# test call with data and line selected
self.view.write_process_table(mocked_root, True)
self.assertEqual([], table_mid.replace.call_args_list)
self.assertEqual([call(tr_elt_1, 'info_1'), call(tr_elt_2, 'info_2'), call(tr_elt_3, 'info_3')],
mocked_common.call_args_list)
self.assertEqual([call(tr_elt_1, 'info_1'), call(tr_elt_2, 'info_2'), call(tr_elt_3, 'info_3')],
mocked_process.call_args_list)
self.assertEqual('brightened', tr_elt_1.attrib['class'])
self.assertEqual('shaded', tr_elt_2.attrib['class'])
self.assertEqual('brightened', tr_elt_3.attrib['class'])
def test_write_process(self):
""" Test the write_process method. """
from supvisors.webutils import PROC_ADDRESS_PAGE, TAIL_PAGE
# create a process-like dict
info = {'namespec': 'dummy_appli:dummy_proc'}
# patch the view context
self.view.view_ctx = Mock(**{'format_url.return_value': 'an url'})
# patch the meld elements
name_mid = Mock()
tr_elt = Mock(**{'findmeld.return_value': name_mid})
# test call with stopped process
self.view.write_process(tr_elt, info)
self.assertEqual([call('name_a_mid')], tr_elt.findmeld.call_args_list)
self.assertEqual([call('dummy_appli:dummy_proc')], name_mid.content.call_args_list)
self.assertEqual([call(href='an url')], name_mid.attributes.call_args_list)
self.assertEqual([call('127.0.0.1', TAIL_PAGE, processname=info['namespec'])],
self.view.view_ctx.format_url.call_args_list)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| {
"content_hash": "7aa67c7e37c69ef8d87bd1614fd7e20d",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 106,
"avg_line_length": 52.29441624365482,
"alnum_prop": 0.6008542030673656,
"repo_name": "julien6387/supervisors",
"id": "7d95710cc46d2235ee079184ba41c89eeee41c42",
"size": "11072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supvisors/tests/test_viewprocaddress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18854"
},
{
"name": "HTML",
"bytes": "194932"
},
{
"name": "Java",
"bytes": "120916"
},
{
"name": "Python",
"bytes": "392826"
},
{
"name": "Shell",
"bytes": "2480"
}
],
"symlink_target": ""
} |
import os
from utils import DATA_PROCESSED_DIR
import numpy as np
from vocab import get_vocab, VOCAB_SIZE
from quatrains import get_quatrains
from gensim import models
from numpy.random import uniform
_w2v_path = os.path.join(DATA_PROCESSED_DIR, 'word2vec.npy')
_w2v_model_path = os.path.join(DATA_PROCESSED_DIR, 'word2vec.model')
_w2v_with_alignment_path = os.path.join(DATA_PROCESSED_DIR, 'word2vec_with_alignment.npy')
_w2v_with_alignment_model_path = os.path.join(DATA_PROCESSED_DIR, 'word2vec_with_alignment.model')
def _gen_embedding(ndim, alignment=False):
print "Generating %d-dim word embedding ..." %ndim
int2ch, ch2int = get_vocab()
ch_lists = []
quatrains = get_quatrains()
for idx, poem in enumerate(quatrains):
for sentence in poem['sentences']:
ch_lists.append(filter(lambda ch: ch in ch2int, sentence))
if alignment:
# the i-th characters in the poem, used to boost Dui Zhang
i_characters = [[sentence[j] for sentence in poem['sentences']] for j in range(len(poem['sentences'][0]))]
for characters in i_characters:
ch_lists.append(filter(lambda ch: ch in ch2int, characters))
if 0 == (idx+1)%10000:
print "[Word2Vec] %d/%d poems have been processed." %(idx+1, len(quatrains))
print "Hold on. This may take some time ..."
model = models.Word2Vec(ch_lists, size = ndim, min_count = 5)
embedding = uniform(-1.0, 1.0, [VOCAB_SIZE, ndim])
for idx, ch in enumerate(int2ch):
if ch in model.wv:
embedding[idx,:] = model.wv[ch]
if alignment:
model.save(_w2v_with_alignment_model_path)
print "Word2Vec model is saved."
np.save(_w2v_with_alignment_path, embedding)
print "Word embedding is saved."
else:
model.save(_w2v_model_path)
print "Word2Vec model is saved."
np.save(_w2v_path, embedding)
print "Word embedding is saved."
def get_word_embedding(ndim, alignment=False):
if alignment:
if not os.path.exists(_w2v_with_alignment_path) or not os.path.exists(_w2v_with_alignment_model_path):
_gen_embedding(ndim, alignment=True)
return np.load(_w2v_with_alignment_path)
else:
if not os.path.exists(_w2v_path) or not os.path.exists(_w2v_model_path):
_gen_embedding(ndim)
return np.load(_w2v_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Generate or load a Wrod2Vec embedding')
parser.add_argument('--alignment', help='Use Wrod2Vec with alignment', action='store_true', required=False)
args = parser.parse_args()
if args.alignment:
print "Using Word2vec with alignment, use -h for usage"
embedding = get_word_embedding(128, alignment=True)
print "Finished loading Word2vec with alignment. Size of embedding: (%d, %d)" %embedding.shape
else:
print "Using Word2vec without alignment, use -h for usage"
embedding = get_word_embedding(128)
print "Finished loading Word2vec without alignment. Size of embedding: (%d, %d)" %embedding.shape
| {
"content_hash": "9e4c88291af756bb808c10510715d96b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 118,
"avg_line_length": 44.46478873239437,
"alnum_prop": 0.6607538802660754,
"repo_name": "Disiok/poetry-seq2seq",
"id": "d1700481e700c6f59ca247b0ed38b6f51bd7eb99",
"size": "3203",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "word2vec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1445"
},
{
"name": "HTML",
"bytes": "16485"
},
{
"name": "JavaScript",
"bytes": "11835"
},
{
"name": "Jupyter Notebook",
"bytes": "700661"
},
{
"name": "Makefile",
"bytes": "240"
},
{
"name": "Python",
"bytes": "89572"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
"""
Library functions for Bugbase. Contains all of Bugbase core and logic
"""
__author__ = 'Benjamin Schubert, benjamin.schubert@epfl.ch'
import inspect
def get_subclasses(base_class: callable) -> list:
"""
Gets all non abstract subclasses for a given base class
:param base_class: the base class of which to find children
:return: list of all programs
"""
all_subclasses = []
for subclass in base_class.__subclasses__():
if not inspect.isabstract(subclass):
all_subclasses.append(subclass)
all_subclasses.extend(get_subclasses(subclass))
return all_subclasses
| {
"content_hash": "668d1e49c63600bf9d1425fbb403a055",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 27.26086956521739,
"alnum_prop": 0.6842105263157895,
"repo_name": "dslab-epfl/bugbase",
"id": "2cd5f348e3f9ab52500d91c2302764b637591732",
"size": "666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "185"
},
{
"name": "PHP",
"bytes": "241"
},
{
"name": "Python",
"bytes": "211897"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
} |
"""Functional utilities for Python 2.4 compatibility."""
# License for code in this file that was taken from Python 2.5.
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python
# alone or in any derivative version, provided, however, that PSF's
# License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
# 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation;
# All Rights Reserved" are retained in Python alone or in any derivative
# version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
### Begin from Python 2.5 functools.py ########################################
# Summary of changes made to the Python 2.5 code below:
# * Wrapped the ``setattr`` call in ``update_wrapper`` with a try-except
# block to make it compatible with Python 2.3, which doesn't allow
# assigning to ``__name__``.
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software
# Foundation. All Rights Reserved.
###############################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
def _compat_partial(fun, *args, **kwargs):
"""New function with partial application of the given arguments
and keywords."""
def _curried(*addargs, **addkwargs):
return fun(*(args + addargs), **dict(kwargs, **addkwargs))
return _curried
try:
from functools import partial
except ImportError:
partial = _compat_partial # noqa
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def _compat_update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS,
updated=WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
setattr(wrapper, attr, getattr(wrapped, attr))
except TypeError: # Python 2.3 doesn't allow assigning to __name__.
pass
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
try:
from functools import update_wrapper
except ImportError:
update_wrapper = _compat_update_wrapper # noqa
def _compat_wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS,
updated=WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
try:
from functools import wraps
except ImportError:
wraps = _compat_wraps # noqa
### End from Python 2.5 functools.py ##########################################
| {
"content_hash": "4384e7f33070b9a3544498cc5d9a850a",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 40.75912408759124,
"alnum_prop": 0.7036174785100286,
"repo_name": "pantheon-systems/kombu",
"id": "ad5da24e9a37c22ae7a42596cbe61131a1d075b8",
"size": "5584",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kombu/utils/functional.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "340782"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from .meta import SnorkelBase, snorkel_postgres
from sqlalchemy import Column, String, Integer, Text, ForeignKey, UniqueConstraint
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import relationship, backref
from sqlalchemy.types import PickleType
from sqlalchemy.sql import select, text
class Context(SnorkelBase):
"""
A piece of content from which Candidates are composed.
"""
__tablename__ = 'context'
id = Column(Integer, primary_key=True)
type = Column(String, nullable=False)
stable_id = Column(String, unique=True, nullable=False)
__mapper_args__ = {
'polymorphic_identity': 'context',
'polymorphic_on': type
}
def get_parent(self):
raise NotImplementedError()
def get_children(self):
raise NotImplementedError()
def get_sentence_generator(self):
raise NotImplementedError()
class Document(Context):
"""
A root Context.
"""
__tablename__ = 'document'
id = Column(Integer, ForeignKey('context.id', ondelete='CASCADE'), primary_key=True)
name = Column(String, unique=True, nullable=False)
meta = Column(PickleType)
__mapper_args__ = {
'polymorphic_identity': 'document',
}
def get_parent(self):
return None
def get_children(self):
return self.sentences
def get_sentence_generator(self):
for sentence in self.sentences:
yield sentence
def __repr__(self):
return "Document " + str(self.name)
class Sentence(Context):
"""A sentence Context in a Document."""
__tablename__ = 'sentence'
id = Column(Integer, ForeignKey('context.id', ondelete='CASCADE'), primary_key=True)
document_id = Column(Integer, ForeignKey('document.id', ondelete='CASCADE'))
position = Column(Integer, nullable=False)
document = relationship('Document', backref=backref('sentences', order_by=position, cascade='all, delete-orphan'), foreign_keys=document_id)
text = Column(Text, nullable=False)
if snorkel_postgres:
words = Column(postgresql.ARRAY(String), nullable=False)
char_offsets = Column(postgresql.ARRAY(Integer), nullable=False)
abs_char_offsets = Column(postgresql.ARRAY(Integer), nullable=False)
lemmas = Column(postgresql.ARRAY(String))
pos_tags = Column(postgresql.ARRAY(String))
ner_tags = Column(postgresql.ARRAY(String))
dep_parents = Column(postgresql.ARRAY(Integer))
dep_labels = Column(postgresql.ARRAY(String))
entity_cids = Column(postgresql.ARRAY(String))
entity_types = Column(postgresql.ARRAY(String))
else:
words = Column(PickleType, nullable=False)
char_offsets = Column(PickleType, nullable=False)
abs_char_offsets = Column(PickleType, nullable=False)
lemmas = Column(PickleType)
pos_tags = Column(PickleType)
ner_tags = Column(PickleType)
dep_parents = Column(PickleType)
dep_labels = Column(PickleType)
entity_cids = Column(PickleType)
entity_types = Column(PickleType)
__mapper_args__ = {
'polymorphic_identity': 'sentence',
}
__table_args__ = (
UniqueConstraint(document_id, position),
)
def get_parent(self):
return self.document
def get_children(self):
return self.spans
def _asdict(self):
return {
'id': self.id,
'document': self.document,
'position': self.position,
'text': self.text,
'words': self.words,
'char_offsets': self.char_offsets,
'lemmas': self.lemmas,
'pos_tags': self.pos_tags,
'ner_tags': self.ner_tags,
'dep_parents': self.dep_parents,
'dep_labels': self.dep_labels,
'entity_cids': self.entity_cids,
'entity_types': self.entity_types
}
def get_sentence_generator(self):
yield self
def __repr__(self):
return "Sentence(%s,%s,%s)" % (self.document, self.position, self.text.encode('utf-8'))
class TemporaryContext(object):
"""
A context which does not incur the overhead of a proper ORM-based Context object.
The TemporaryContext class is specifically for the candidate extraction process, during which a CandidateSpace
object will generate many TemporaryContexts, which will then be filtered by Matchers prior to materialization
of Candidates and constituent Context objects.
Every Context object has a corresponding TemporaryContext object from which it inherits.
A TemporaryContext must have specified equality / set membership semantics, a stable_id for checking
uniqueness against the database, and a promote() method which returns a corresponding Context object.
"""
def __init__(self):
self.id = None
def load_id_or_insert(self, session):
if self.id is None:
stable_id = self.get_stable_id()
id = session.execute(select([Context.id]).where(Context.stable_id == stable_id)).first()
if id is None:
self.id = session.execute(
Context.__table__.insert(),
{'type': self._get_table_name(), 'stable_id': stable_id}).inserted_primary_key[0]
insert_args = self._get_insert_args()
insert_args['id'] = self.id
session.execute(text(self._get_insert_query()), insert_args)
else:
self.id = id[0]
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
raise NotImplementedError()
def __hash__(self):
raise NotImplementedError()
def _get_polymorphic_identity(self):
raise NotImplementedError()
def get_stable_id(self):
raise NotImplementedError()
def _get_table_name(self):
raise NotImplementedError()
def _get_insert_query(self):
raise NotImplementedError()
def _get_insert_args(self):
raise NotImplementedError()
class TemporarySpan(TemporaryContext):
"""The TemporaryContext version of Span"""
def __init__(self, sentence, char_start, char_end, meta=None):
super(TemporarySpan, self).__init__()
self.sentence = sentence # The sentence Context of the Span
self.char_end = char_end
self.char_start = char_start
self.meta = meta
def __len__(self):
return self.char_end - self.char_start + 1
def __eq__(self, other):
try:
return self.sentence == other.sentence and self.char_start == other.char_start \
and self.char_end == other.char_end
except AttributeError:
return False
def __ne__(self, other):
try:
return self.sentence != other.sentence or self.char_start != other.char_start \
or self.char_end != other.char_end
except AttributeError:
return True
def __hash__(self):
return hash(self.sentence) + hash(self.char_start) + hash(self.char_end)
def get_stable_id(self):
return construct_stable_id(self.sentence, self._get_polymorphic_identity(), self.char_start, self.char_end)
def _get_table_name(self):
return 'span'
def _get_polymorphic_identity(self):
return 'span'
def _get_insert_query(self):
return """INSERT INTO span VALUES(:id, :sentence_id, :char_start, :char_end, :meta)"""
def _get_insert_args(self):
return {'sentence_id' : self.sentence.id,
'char_start': self.char_start,
'char_end' : self.char_end,
'meta' : self.meta}
def get_word_start(self):
return self.char_to_word_index(self.char_start)
def get_word_end(self):
return self.char_to_word_index(self.char_end)
def get_n(self):
return self.get_word_end() - self.get_word_start() + 1
def char_to_word_index(self, ci):
"""Given a character-level index (offset), return the index of the **word this char is in**"""
i = None
for i, co in enumerate(self.sentence.char_offsets):
if ci == co:
return i
elif ci < co:
return i-1
return i
def word_to_char_index(self, wi):
"""Given a word-level index, return the character-level index (offset) of the word's start"""
return self.sentence.char_offsets[wi]
def get_attrib_tokens(self, a='words'):
"""Get the tokens of sentence attribute _a_ over the range defined by word_offset, n"""
return self.sentence.__getattribute__(a)[self.get_word_start():self.get_word_end() + 1]
def get_attrib_span(self, a, sep=" "):
"""Get the span of sentence attribute _a_ over the range defined by word_offset, n"""
# NOTE: Special behavior for words currently (due to correspondence with char_offsets)
if a == 'words':
return self.sentence.text[self.char_start:self.char_end + 1]
else:
return sep.join(self.get_attrib_tokens(a))
def get_span(self, sep=" "):
return self.get_attrib_span('words', sep)
def __contains__(self, other_span):
return other_span.char_start >= self.char_start and other_span.char_end <= self.char_end
def __getitem__(self, key):
"""
Slice operation returns a new candidate sliced according to **char index**
Note that the slicing is w.r.t. the candidate range (not the abs. sentence char indexing)
"""
if isinstance(key, slice):
char_start = self.char_start if key.start is None else self.char_start + key.start
if key.stop is None:
char_end = self.char_end
elif key.stop >= 0:
char_end = self.char_start + key.stop - 1
else:
char_end = self.char_end + key.stop
return self._get_instance(char_start=char_start, char_end=char_end, sentence=self.sentence)
else:
raise NotImplementedError()
def __repr__(self):
return '%s("%s", sentence=%s, chars=[%s,%s], words=[%s,%s])' \
% (self.__class__.__name__, self.get_span().encode('utf-8'), self.sentence.id, self.char_start, self.char_end,
self.get_word_start(), self.get_word_end())
def _get_instance(self, **kwargs):
return TemporarySpan(**kwargs)
class Span(Context, TemporarySpan):
"""
A span of characters, identified by Context id and character-index start, end (inclusive).
char_offsets are **relative to the Context start**
"""
__tablename__ = 'span'
id = Column(Integer, ForeignKey('context.id', ondelete='CASCADE'), primary_key=True)
sentence_id = Column(Integer, ForeignKey('sentence.id', ondelete='CASCADE'))
char_start = Column(Integer, nullable=False)
char_end = Column(Integer, nullable=False)
meta = Column(PickleType)
__table_args__ = (
UniqueConstraint(sentence_id, char_start, char_end),
)
__mapper_args__ = {
'polymorphic_identity': 'span',
'inherit_condition': (id == Context.id)
}
sentence = relationship('Sentence', backref=backref('spans', cascade='all, delete-orphan'), order_by=char_start, foreign_keys=sentence_id)
def get_parent(self):
return self.sentence
def get_children(self):
return None
def _get_instance(self, **kwargs):
return Span(**kwargs)
# We redefine these to use default semantics, overriding the operators inherited from TemporarySpan
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __hash__(self):
return id(self)
def split_stable_id(stable_id):
"""
Split stable id, returning:
* Document (root) stable ID
* Context polymorphic type
* Character offset start, end *relative to document start*
Returns tuple of four values.
"""
split1 = stable_id.split('::')
if len(split1) == 2:
split2 = split1[1].split(':')
if len(split2) == 3:
return split1[0], split2[0], int(split2[1]), int(split2[2])
raise ValueError("Malformed stable_id:", stable_id)
def construct_stable_id(parent_context, polymorphic_type, relative_char_offset_start, relative_char_offset_end):
"""Contruct a stable ID for a Context given its parent and its character offsets relative to the parent"""
doc_id, _, parent_doc_char_start, _ = split_stable_id(parent_context.stable_id)
start = parent_doc_char_start + relative_char_offset_start
end = parent_doc_char_start + relative_char_offset_end
return "%s::%s:%s:%s" % (doc_id, polymorphic_type, start, end)
| {
"content_hash": "85023046773f24f350145808346bee3d",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 144,
"avg_line_length": 35.77989130434783,
"alnum_prop": 0.6128958760537708,
"repo_name": "jasontlam/snorkel",
"id": "57a0df449d44823c37e46e806f5545b3ba561ccc",
"size": "13167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snorkel/models/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "15678"
},
{
"name": "Jupyter Notebook",
"bytes": "567022"
},
{
"name": "Python",
"bytes": "517209"
},
{
"name": "Shell",
"bytes": "6040"
}
],
"symlink_target": ""
} |
from zirkon.program_config import ModuleConfig
from . import mod0
| {
"content_hash": "a53c754bdfa25d9768481ad46f723e9c",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 46,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.8208955223880597,
"repo_name": "simone-campagna/zirkon",
"id": "bea533e56512ca39a9b374158678bce0c17fe30c",
"size": "67",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_zirkon/pack0/comp2/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "833579"
}
],
"symlink_target": ""
} |
import logging
import time,datetime
from futclient import FutClient
import futfunctions
def autocontract():
email = ["fut2.bryrmeg@neverbox.com"]
password = "Bryrmeg11"
the_hash = "Trondheim"
for i in email:
client = FutClient(i,password,the_hash)
logging.info("%s - %s - %s/30" % (i, client.coins(), client.tradepile()[1]))
client.relist_cards()
checked_tradeids = list()
if client.unassigned_ids()[1] > 0:
logging.info("There are %i unassigned items in the bought pile. Processing these." % client.unassigned_ids()[1])
while client.tradepile()[1] < 30 and client.unassigned_ids()[1] > 0:
for x in client.unassigned_ids()[0]:
client.move_card(x)
logging.info("Number of unassigned items left %s" % client.unassigned_ids()[1])
client.relist_cards()
if i == "fut3.bryrmeg@neverbox.com": break #using this account for other purposes atm
# Transfer coins
# not active atm
if False and client.coins() > 50000: #datetime.datetime.now().hour == 12 and
bin = futfunctions.fut_round(client.coins()-10000)
logging.info("Time to transfer money to main account. Coins before transfer %s" %client.coins())
t_tradeid,t_itemid = transfer_tradeid(bin)
if t_tradeid:
if client.buy_item(t_tradeid, bin):
time.sleep(4)
client.discard(t_itemid)
logging.info("Transfer done. Coins after transfer: %s" %client.coins())
while True and client.unassigned_ids()[1] == 0 and client.tradepile()[1] < 30 and client.coins() > 200:
contracts = client.search_auctions(card_type="development", cat="contract", lev="gold", max_bin=str(200))
for contract in contracts:
if contract.tradeID not in checked_tradeids and client.tradepile()[1] + client.unassigned_ids()[1] < 30: # To fix a bug related to delay when searching
checked_tradeids.append(contract.tradeID)
if client.buy_item(contract.tradeID, contract.BIN):
logging.info("Bought Contract for %s" % contract.BIN)
if client.move_card(str(contract.itemID)):
if str(contract.resourceID) == "1347178283": # non-rare gold player contract
bidPrice = "350"
binPrice = "400"
else:
bidPrice = "200"
binPrice = "250"
if client.post_trade(str(contract.itemID),
bidPrice, binPrice, str(3600)):
logging.info("Listed contract %s for %s / %s. Tradepile %s / %s" % (contract.itemID, bidPrice, binPrice, client.tradepile()[1], 30))
else:
logging.error("Couldn't list card: %s" % contract.itemID)
else:
logging.error("Couldn't move card: %s" % contract.itemID)
def transfer_tradeid(bin):
# return tradeid and itemid
client = FutClient("username","passord","hash")
players = client.search_auctions(card_type="player", max_bin=str(200))
if client.buy_item(players[-1].tradeID,players[-1].BIN):
if client.move_card(str(players[-1].itemID)):
tradeid = client.post_trade(str(players[-1].itemID), 200, bin, str(3600))
return tradeid,players[-1].itemID
def run():
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
while True:
try:
autocontract()
except:
print "error in autocontrac"
minutes = 30
print "sleeping for "+str(minutes)+" mins"
time.sleep(60*minutes)
'''
try:
autocontract()
minutes = 60
print "sleeping for "+str(minutes)+" mins"
time.sleep(60*minutes)
except Exception, e:
print e
print "sleeping for 10 mins before retrying"
time.sleep(60*10)
'''
if __name__ == "__main__":
run()
| {
"content_hash": "3634ee0486dd2a15584daac67aec10b1",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 169,
"avg_line_length": 46.02127659574468,
"alnum_prop": 0.5446139620896903,
"repo_name": "bryrmeg/14",
"id": "399c551618f18b6098a0a6b45ff87ac484877dc1",
"size": "4326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autocontracts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "261195"
},
{
"name": "Python",
"bytes": "86579"
}
],
"symlink_target": ""
} |
"""
Buckets are a collection of events, properties, and funnels belonging to
a user.
"""
import ujson
from twisted.internet.defer import inlineCallbacks, returnValue, DeferredList
from telephus.cassandra.c08.ttypes import NotFoundException
from pylru import lrucache
from ..lib.cassandra import get_relation, delete_relation, get_user, \
insert_relation_by_id, delete_relations, delete_counters
from ..exceptions import BucketException, UserException
from ..lib.profiler import profile
from ..lib.hash import password_hash
from base64 import b64encode
LRU_CACHE = lrucache(1000)
def bucket_check(method):
"""
Decorator.
"""
@inlineCallbacks
def wrapper(*args, **kwargs):
"""
Verifies bucket exists.
"""
request = args[1]
# Dispatcher makes some args into kwargs.
bucket = BucketModel(kwargs["user_name"], kwargs["bucket_name"])
if not bucket.cached():
_exists = yield bucket.exists()
if not _exists:
request.setResponseCode(404)
raise BucketException("Bucket %s does not "
"exist." % bucket.bucket_name)
data = yield method(*args, **kwargs)
returnValue(data)
return wrapper
def bucket_create(method):
"""
Decorator.
"""
@inlineCallbacks
def wrapper(*args, **kwargs):
"""
Creates new bucket if bucket does not exist.
"""
request = args[1]
# Dispatcher makes some args into kwargs.
user_name = kwargs["user_name"]
bucket_name = kwargs["bucket_name"]
bucket = BucketModel(user_name, bucket_name)
if not bucket.cached():
_exists = yield bucket.exists()
if not _exists:
_user_exists = yield bucket.user_exists()
if not _user_exists:
request.setResponseCode(404)
raise UserException("User %s does not exist." % user_name)
yield bucket.create(bucket_name)
data = yield method(*args, **kwargs)
returnValue(data)
return wrapper
class BucketModel(object):
"""
Buckets are a collection of events, properties, and funnels belonging to
a user.
"""
def __init__(self, user_name, bucket_name):
self.user_name = user_name
self.bucket_name = bucket_name
self.cache_key = "|".join((user_name, bucket_name))
@profile
@inlineCallbacks
def user_exists(self):
"""
Returns boolean indicating whether user exists.
"""
try:
yield get_user(self.user_name, "hash")
returnValue(True)
except NotFoundException:
returnValue(False)
def cached(self):
"""
Check local cache for bucket's existence
"""
return self.cache_key in LRU_CACHE
@profile
@inlineCallbacks
def validate_password(self, password):
try:
_hash = yield get_user(self.user_name, "hash")
except NotFoundException:
returnValue(False)
_bucket_hash = b64encode(password_hash(self.bucket_name, _hash))
returnValue(password == _bucket_hash)
@profile
@inlineCallbacks
def exists(self):
"""
Verify bucket exists.
"""
key = (self.user_name, "bucket")
column_id = self.bucket_name
try:
yield get_relation(key, column_id=column_id)
LRU_CACHE[self.cache_key] = None
except NotFoundException:
returnValue(False)
returnValue(True)
@profile
def create(self, description):
"""
Create bucket for username.
"""
LRU_CACHE[self.cache_key] = None
key = (self.user_name, "bucket")
column_id = self.bucket_name
value = ujson.dumps({"description":description})
return insert_relation_by_id(key, column_id, value, commit=True)
@profile
@inlineCallbacks
def get_properties(self):
"""
Return nested dictionary of
property_name -> property_value -> property_id in bucket.
"""
key = (self.user_name, self.bucket_name, "property_name")
data = yield get_relation(key)
returnValue(dict([(x, ujson.loads(data[x])) for x in data]))
@profile
@inlineCallbacks
def get_events(self):
"""
Return event_name/event_id pairs for the bucket.
"""
key = (self.user_name, self.bucket_name, "event")
data = yield get_relation(key)
returnValue(dict([(data[i], {"id":i}) for i in data]))
@profile
@inlineCallbacks
def get_description(self):
"""
Return bucket description.
"""
key = (self.user_name, "bucket")
column_id = self.bucket_name
data = yield get_relation(key, column_id=column_id)
returnValue(ujson.loads(data)["description"])
@profile
@inlineCallbacks
def delete(self):
"""
Delete the bucket.
"""
del LRU_CACHE[self.cache_key]
key = (self.user_name, "bucket")
column_id = self.bucket_name
deferreds = []
deferreds.append(delete_relation(key, column_id=column_id))
keys = [
(self.user_name, self.bucket_name, "event"),
(self.user_name, self.bucket_name, "funnel"),
(self.user_name, self.bucket_name, "property"),
(self.user_name, self.bucket_name, "property_name")]
for i in range(0, 256):
shard = chr(i)
keys.extend([(self.user_name, self.bucket_name, "visitor_property", shard)])
deferreds.append(delete_relations(keys))
keys = []
hash_keys = ["property", "event", "hourly_event", "daily_event",
"unique_event", "hourly_unique_event",
"daily_unique_event", "path", "hourly_path",
"daily_path", "unique_path", "hourly_unique_path",
"daily_unique_path", "visitor_event", "visitor_path"]
for i in range(0, 256):
keys.extend([(self.user_name, self.bucket_name, x, chr(i))
for x in hash_keys])
deferreds.append(delete_counters(keys))
yield DeferredList(deferreds) | {
"content_hash": "4fb3bf875761489c64cf26b220b2d974",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 88,
"avg_line_length": 31.984848484848484,
"alnum_prop": 0.5780830570030001,
"repo_name": "hiidef/hiitrack-api",
"id": "0075dd8d2ac60da9cee379a77975ca0478b58abe",
"size": "6380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hiitrack/models/bucket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172974"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='netdrive-connector',
version='1.3.2',
license='BSD License',
description='GUI tool to setup mountable SFTP and WebDAV connections on Linux/UNIX systems.',
long_description=README,
url='http://github.com/ethoms/netdrive-connector/',
author='Euan Thoms',
author_email='euan@potensol.com',
classifiers=[
'Environment :: X11 Applications :: Qt',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
],
keywords='connect remote network filesystem mount davfs webdav fuse sshfs sftp',
packages=['netdriveconnector'],
package_data={'netdriveconnector': ['*.ui']},
include_package_data=True,
data_files=[
('share/pixmaps',['data/netdrive-connector.png']),
('share/applications',['data/netdrive-connector.desktop']),
],
scripts=['bin/netdrive-connector_run-as-root', 'bin/netdrive-connector_automountd', 'bin/add-sftp-connector', 'bin/add-webdav-connector', 'bin/remove-sftp-connector', 'bin/remove-webdav-connector'],
entry_points={
'console_scripts': [ 'netdrive-connector = netdriveconnector.main:main',],
},
options = {
'build_scripts': {
'executable': '/usr/bin/python2.7',
},
},
)
| {
"content_hash": "aac93af63866b32b54cb4f96f88881a8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 202,
"avg_line_length": 38.87755102040816,
"alnum_prop": 0.6341207349081365,
"repo_name": "ethoms/netdrive-connector",
"id": "af650bc50a4ceb7ca595905c6dca60d206daac8d",
"size": "1905",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "399"
},
{
"name": "Python",
"bytes": "190390"
},
{
"name": "Shell",
"bytes": "96881"
}
],
"symlink_target": ""
} |
from runner.koan import *
class Proxy(object):
def __init__(self, target_object):
self._messages = []
#initialize '_obj' attribute last. Trust me on this!
self._obj = target_object
def __getattr__(self, attr_name):
self._messages.append(attr_name)
return self._obj.__getattribute__(attr_name)
def __setattr__(self, attr_name, value):
names = ["_obj", "_messages", "messages", "was_called", "number_of_times_called"]
if attr_name in names:
return object.__setattr__(self, attr_name, value)
else:
self._messages.append(attr_name + "=")
self._obj.__setattr__(attr_name, value)
def messages(self):
return self._messages
def was_called(self, m):
return m in self._messages
def number_of_times_called(self, m):
count = 0
for message in self._messages:
if message == m:
count += 1
return count
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel='], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
try:
tv.no_such_method()
except AttributeError as ex:
pass
self.assertEqual(AttributeError, type(ex))
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel='))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television(object):
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel) | {
"content_hash": "9d2b76b5f4e7c4db8f10fe22b2bdd5d9",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 87,
"avg_line_length": 26.879518072289155,
"alnum_prop": 0.5434782608695652,
"repo_name": "rameshugar/koans",
"id": "43d4c28a06e3d66754cb27ed369c7f62a91c7edd",
"size": "5165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_proxy_object_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "332477"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
__version__ = '1.4a0'
# Required for npm package for frontend
__semver__ = '1.4.0-alpha'
default_app_config = 'wagtail.wagtailcore.apps.WagtailCoreAppConfig'
def setup():
import warnings
from wagtail.utils.deprecation import removed_in_next_version_warning
warnings.simplefilter("default", removed_in_next_version_warning)
setup()
| {
"content_hash": "fa78e8e553c309e62ab93c31df25b7a6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.7377521613832853,
"repo_name": "gogobook/wagtail",
"id": "8f387d3272e78b14699c6fb0b5a722a0fa879988",
"size": "347",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/wagtailcore/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "155100"
},
{
"name": "HTML",
"bytes": "267043"
},
{
"name": "JavaScript",
"bytes": "109586"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "2059166"
},
{
"name": "Shell",
"bytes": "7388"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
from uuid import UUID
class HDeleteUncollapsedElementMatchDiffRulesRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HDeleteUncollapsedElementMatchDiffRulesRHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HDeleteUncollapsedElementMatchDiffRulesRHS, self).__init__(name='HDeleteUncollapsedElementMatchDiffRulesRHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MoTifRule'
p2
a.""")
self["MT_action__"] = """#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
"""
self["name"] = """"""
self["GUID__"] = UUID('5dc384a2-b642-4dbd-be53-03a8c6251dd3')
# Set the node attributes
from HDeleteUncollapsedElementMatchDiffRulesLHS import HDeleteUncollapsedElementMatchDiffRulesLHS
self.pre = HDeleteUncollapsedElementMatchDiffRulesLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
def execute(self, packet, match):
"""
Transforms the current match of the packet according to the rule %s.
Pivots are also assigned, if any.
@param packet: The input packet.
@param match: The match to rewrite.
"""
graph = packet.graph
# Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite.
# Because of the uniqueness property of labels in a rule, we can store all LHS labels
# and subsequently add the labels corresponding to the nodes to be created.
labels = match.copy()
#===============================================================================
# Update attribute values
#===============================================================================
#===============================================================================
# Create new nodes
#===============================================================================
#===============================================================================
# Create new edges
#===============================================================================
#===============================================================================
# Set the output pivots
#===============================================================================
#===============================================================================
# Perform the post-action
#===============================================================================
try:
self.action(lambda i: graph.vs[labels[i]], graph)
except Exception, e:
raise Exception('An error has occurred while applying the post-action', e)
#===============================================================================
# Finally, delete nodes (this will automatically delete the adjacent edges)
#===============================================================================
# MT_pre__MetaModelElement_S2
graph.delete_nodes([labels["2"]])
| {
"content_hash": "64279476f9cc18b20a3ab001375f3e1d",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 146,
"avg_line_length": 46.776595744680854,
"alnum_prop": 0.4232431203093018,
"repo_name": "levilucio/SyVOLT",
"id": "4ae6c716b91dbdaa3a0668888083efe2192a03ce",
"size": "4399",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/merge_inter_layer_rules/Himesis/HDeleteUncollapsedElementMatchDiffRulesRHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from functools import wraps
def mproperty(fn):
attribute = "_memo_%s" % fn.__name__
@property
@wraps(fn)
def _property(self):
if not hasattr(self, attribute):
setattr(self, attribute, fn(self))
return getattr(self, attribute)
return _property
| {
"content_hash": "f3f196c782aafdacf225d9da90c89d80",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 46,
"avg_line_length": 21.071428571428573,
"alnum_prop": 0.6067796610169491,
"repo_name": "sionide21/memoize",
"id": "4a6ecdbc4c06986cebb73a958e1827debb81773f",
"size": "295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memoize.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Created by René Meusel
This file is part of the CernVM File System auxiliary tools.
"""
import shutil
import os
import tempfile
class FileSandbox:
""" Wraps the creation and automatic removal of temporary files """
def __init__(self, file_prefix):
self.file_prefix = file_prefix
self.temporary_dir = tempfile.mkdtemp(prefix=file_prefix)
def __del__(self):
shutil.rmtree(self.temporary_dir)
def write_to_temporary(self, string_buffer):
""" stores the provided content into a /tmp path that is returned """
filename = None
with tempfile.NamedTemporaryFile(mode='w+b',
prefix=self.file_prefix,
dir=self.temporary_dir,
delete=False) as f:
filename = f.name
f.write(string_buffer)
return filename
def create_directory(self, directory_name):
""" creates a directory under self.temporary_dir with the given name """
os.mkdir(os.path.join(self.temporary_dir, directory_name))
def write_to_file(self, file_path, string_buffer):
""" creates the file_path and writes the data given in string_buffer """
full_file_path = os.path.join(self.temporary_dir, file_path)
if os.path.isfile(full_file_path):
os.unlink(full_file_path)
with open(full_file_path, "w+") as f:
f.write(string_buffer)
| {
"content_hash": "1c42449fde9318c65c970e4c40ff0b50",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 33.86363636363637,
"alnum_prop": 0.5973154362416108,
"repo_name": "reneme/python-cvmfsutils",
"id": "98ba4f419a50ad7415c129998fdf830af689989e",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "cvmfs/test/file_sandbox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "144468"
},
{
"name": "Shell",
"bytes": "100"
}
],
"symlink_target": ""
} |
import os
import re
# Monkey patch for pygments reporting an error when generator expressions are
# used.
# https://bitbucket.org/birkenfeld/pygments-main/issue/942/cmake-generator-expressions-not-handled
from pygments.lexers import CMakeLexer
from pygments.token import Name, Operator
from pygments.lexer import bygroups
CMakeLexer.tokens["args"].append(('(\\$<)(.+?)(>)',
bygroups(Operator, Name.Variable, Operator)))
# Monkey patch for sphinx generating invalid content for qcollectiongenerator
# https://bitbucket.org/birkenfeld/sphinx/issue/1435/qthelp-builder-should-htmlescape-keywords
from sphinx.util.pycompat import htmlescape
from sphinx.builders.qthelp import QtHelpBuilder
old_build_keywords = QtHelpBuilder.build_keywords
def new_build_keywords(self, title, refs, subitems):
old_items = old_build_keywords(self, title, refs, subitems)
new_items = []
for item in old_items:
before, rest = item.split("ref=\"", 1)
ref, after = rest.split("\"")
if ("<" in ref and ">" in ref):
new_items.append(before + "ref=\"" + htmlescape(ref) + "\"" + after)
else:
new_items.append(item)
return new_items
QtHelpBuilder.build_keywords = new_build_keywords
from docutils.parsers.rst import Directive, directives
from docutils.transforms import Transform
try:
from docutils.utils.error_reporting import SafeString, ErrorString
except ImportError:
# error_reporting was not in utils before version 0.11:
from docutils.error_reporting import SafeString, ErrorString
from docutils import io, nodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
from sphinx import addnodes
class CMakeModule(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'encoding': directives.encoding}
def __init__(self, *args, **keys):
self.re_start = re.compile(r'^#\[(?P<eq>=*)\[\.rst:$')
Directive.__init__(self, *args, **keys)
def run(self):
settings = self.state.document.settings
if not settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
env = self.state.document.settings.env
rel_path, path = env.relfn2path(self.arguments[0])
path = os.path.normpath(path)
encoding = self.options.get('encoding', settings.input_encoding)
e_handler = settings.input_encoding_error_handler
try:
settings.record_dependencies.add(path)
f = io.FileInput(source_path=path, encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
raw_lines = f.read().splitlines()
f.close()
rst = None
lines = []
for line in raw_lines:
if rst is not None and rst != '#':
# Bracket mode: check for end bracket
pos = line.find(rst)
if pos >= 0:
if line[0] == '#':
line = ''
else:
line = line[0:pos]
rst = None
else:
# Line mode: check for .rst start (bracket or line)
m = self.re_start.match(line)
if m:
rst = ']%s]' % m.group('eq')
line = ''
elif line == '#.rst:':
rst = '#'
line = ''
elif rst == '#':
if line == '#' or line[:2] == '# ':
line = line[2:]
else:
rst = None
line = ''
elif rst is None:
line = ''
lines.append(line)
if rst is not None and rst != '#':
raise self.warning('"%s" found unclosed bracket "#[%s[.rst:" in %s' %
(self.name, rst[1:-1], path))
self.state_machine.insert_input(lines, path)
return []
class _cmake_index_entry:
def __init__(self, desc):
self.desc = desc
def __call__(self, title, targetid, main = 'main'):
return ('pair', u'%s ; %s' % (self.desc, title), targetid, main, None)
_cmake_index_objs = {
'command': _cmake_index_entry('command'),
'generator': _cmake_index_entry('generator'),
'manual': _cmake_index_entry('manual'),
'module': _cmake_index_entry('module'),
'policy': _cmake_index_entry('policy'),
'prop_cache': _cmake_index_entry('cache property'),
'prop_dir': _cmake_index_entry('directory property'),
'prop_gbl': _cmake_index_entry('global property'),
'prop_inst': _cmake_index_entry('installed file property'),
'prop_sf': _cmake_index_entry('source file property'),
'prop_test': _cmake_index_entry('test property'),
'prop_tgt': _cmake_index_entry('target property'),
'variable': _cmake_index_entry('variable'),
}
def _cmake_object_inventory(env, document, line, objtype, targetid):
inv = env.domaindata['cmake']['objects']
if targetid in inv:
document.reporter.warning(
'CMake object "%s" also described in "%s".' %
(targetid, env.doc2path(inv[targetid][0])), line=line)
inv[targetid] = (env.docname, objtype)
class CMakeTransform(Transform):
# Run this transform early since we insert nodes we want
# treated as if they were written in the documents.
default_priority = 210
def __init__(self, document, startnode):
Transform.__init__(self, document, startnode)
self.titles = {}
def parse_title(self, docname):
"""Parse a document title as the first line starting in [A-Za-z0-9<]
or fall back to the document basename if no such line exists.
The cmake --help-*-list commands also depend on this convention.
Return the title or False if the document file does not exist.
"""
env = self.document.settings.env
title = self.titles.get(docname)
if title is None:
fname = os.path.join(env.srcdir, docname+'.rst')
try:
f = open(fname, 'r')
except IOError:
title = False
else:
for line in f:
if len(line) > 0 and (line[0].isalnum() or line[0] == '<'):
title = line.rstrip()
break
f.close()
if title is None:
title = os.path.basename(docname)
self.titles[docname] = title
return title
def apply(self):
env = self.document.settings.env
# Treat some documents as cmake domain objects.
objtype, sep, tail = env.docname.rpartition('/')
make_index_entry = _cmake_index_objs.get(objtype)
if make_index_entry:
title = self.parse_title(env.docname)
# Insert the object link target.
targetname = title
targetid = '%s:%s' % (objtype, targetname)
targetnode = nodes.target('', '', ids=[targetid])
self.document.note_explicit_target(targetnode)
self.document.insert(0, targetnode)
# Insert the object index entry.
indexnode = addnodes.index()
indexnode['entries'] = [make_index_entry(title, targetid)]
self.document.insert(0, indexnode)
# Add to cmake domain object inventory
_cmake_object_inventory(env, self.document, 1, objtype, targetid)
class CMakeObject(ObjectDescription):
def handle_signature(self, sig, signode):
# called from sphinx.directives.ObjectDescription.run()
signode += addnodes.desc_name(sig, sig)
return sig
def add_target_and_index(self, name, sig, signode):
targetname = name
targetid = '%s:%s' % (self.objtype, targetname)
if targetid not in self.state.document.ids:
signode['names'].append(targetid)
signode['ids'].append(targetid)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
_cmake_object_inventory(self.env, self.state.document,
self.lineno, self.objtype, targetid)
make_index_entry = _cmake_index_objs.get(self.objtype)
if make_index_entry:
self.indexnode['entries'].append(make_index_entry(name, targetid))
class CMakeXRefRole(XRefRole):
# See sphinx.util.nodes.explicit_title_re; \x00 escapes '<'.
_re = re.compile(r'^(.+?)(\s*)(?<!\x00)<(.*?)>$', re.DOTALL)
_re_sub = re.compile(r'^([^()\s]+)\s*\(([^()]*)\)$', re.DOTALL)
def __call__(self, typ, rawtext, text, *args, **keys):
# Translate CMake command cross-references of the form:
# `command_name(SUB_COMMAND)`
# to have an explicit target:
# `command_name(SUB_COMMAND) <command_name>`
if typ == 'cmake:command':
m = CMakeXRefRole._re_sub.match(text)
if m:
text = '%s <%s>' % (text, m.group(1))
# CMake cross-reference targets frequently contain '<' so escape
# any explicit `<target>` with '<' not preceded by whitespace.
while True:
m = CMakeXRefRole._re.match(text)
if m and len(m.group(2)) == 0:
text = '%s\x00<%s>' % (m.group(1), m.group(3))
else:
break
return XRefRole.__call__(self, typ, rawtext, text, *args, **keys)
# We cannot insert index nodes using the result_nodes method
# because CMakeXRefRole is processed before substitution_reference
# nodes are evaluated so target nodes (with 'ids' fields) would be
# duplicated in each evaluted substitution replacement. The
# docutils substitution transform does not allow this. Instead we
# use our own CMakeXRefTransform below to add index entries after
# substitutions are completed.
#
# def result_nodes(self, document, env, node, is_ref):
# pass
class CMakeXRefTransform(Transform):
# Run this transform early since we insert nodes we want
# treated as if they were written in the documents, but
# after the sphinx (210) and docutils (220) substitutions.
default_priority = 221
def apply(self):
env = self.document.settings.env
# Find CMake cross-reference nodes and add index and target
# nodes for them.
for ref in self.document.traverse(addnodes.pending_xref):
if not ref['refdomain'] == 'cmake':
continue
objtype = ref['reftype']
make_index_entry = _cmake_index_objs.get(objtype)
if not make_index_entry:
continue
objname = ref['reftarget']
targetnum = env.new_serialno('index-%s:%s' % (objtype, objname))
targetid = 'index-%s-%s:%s' % (targetnum, objtype, objname)
targetnode = nodes.target('', '', ids=[targetid])
self.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
indexnode['entries'] = [make_index_entry(objname, targetid, '')]
ref.replace_self([indexnode, targetnode, ref])
class CMakeDomain(Domain):
"""CMake domain."""
name = 'cmake'
label = 'CMake'
object_types = {
'command': ObjType('command', 'command'),
'generator': ObjType('generator', 'generator'),
'variable': ObjType('variable', 'variable'),
'module': ObjType('module', 'module'),
'policy': ObjType('policy', 'policy'),
'prop_cache': ObjType('prop_cache', 'prop_cache'),
'prop_dir': ObjType('prop_dir', 'prop_dir'),
'prop_gbl': ObjType('prop_gbl', 'prop_gbl'),
'prop_inst': ObjType('prop_inst', 'prop_inst'),
'prop_sf': ObjType('prop_sf', 'prop_sf'),
'prop_test': ObjType('prop_test', 'prop_test'),
'prop_tgt': ObjType('prop_tgt', 'prop_tgt'),
'manual': ObjType('manual', 'manual'),
}
directives = {
'command': CMakeObject,
'variable': CMakeObject,
# Other object types cannot be created except by the CMakeTransform
# 'generator': CMakeObject,
# 'module': CMakeObject,
# 'policy': CMakeObject,
# 'prop_cache': CMakeObject,
# 'prop_dir': CMakeObject,
# 'prop_gbl': CMakeObject,
# 'prop_inst': CMakeObject,
# 'prop_sf': CMakeObject,
# 'prop_test': CMakeObject,
# 'prop_tgt': CMakeObject,
# 'manual': CMakeObject,
}
roles = {
'command': CMakeXRefRole(fix_parens = True, lowercase = False),
'generator': CMakeXRefRole(),
'variable': CMakeXRefRole(),
'module': CMakeXRefRole(),
'policy': CMakeXRefRole(),
'prop_cache': CMakeXRefRole(),
'prop_dir': CMakeXRefRole(),
'prop_gbl': CMakeXRefRole(),
'prop_inst': CMakeXRefRole(),
'prop_sf': CMakeXRefRole(),
'prop_test': CMakeXRefRole(),
'prop_tgt': CMakeXRefRole(),
'manual': CMakeXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
to_clear = set()
for fullname, (fn, _) in self.data['objects'].items():
if fn == docname:
to_clear.add(fullname)
for fullname in to_clear:
del self.data['objects'][fullname]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
targetid = '%s:%s' % (typ, target)
obj = self.data['objects'].get(targetid)
if obj is None:
# TODO: warn somehow?
return None
return make_refnode(builder, fromdocname, obj[0], targetid,
contnode, target)
def get_objects(self):
for refname, (docname, type) in self.data['objects'].items():
yield (refname, refname, type, docname, refname, 1)
def setup(app):
app.add_directive('cmake-module', CMakeModule)
app.add_transform(CMakeTransform)
app.add_transform(CMakeXRefTransform)
app.add_domain(CMakeDomain)
| {
"content_hash": "422ded8bae8ca7373da7bd22ca192900",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 98,
"avg_line_length": 39.848,
"alnum_prop": 0.5710366057685873,
"repo_name": "saschazelzer/CppMicroServices",
"id": "398cdbae332d63b87c953667275d297d444326cb",
"size": "15086",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "doc/cmake.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "91694"
},
{
"name": "C++",
"bytes": "1004307"
},
{
"name": "CSS",
"bytes": "4649"
}
],
"symlink_target": ""
} |
"""
plot.py
This file contains shared functions for plotting graphs
and networks.
"""
from numpy import *
import numpy.random
import matplotlib.pyplot as plt
import networkx as nx
def save_plot(name=""):
def wrap(f):
def wrapped(*args, **kwargs):
fname = ""
fext = ".png"
dpi=200
if 'fname' in kwargs.keys():
fname = kwargs['fname']
del kwargs['fname']
if 'fext' in kwargs.keys():
fext = kwargs['fext']
del kwargs['fext']
if 'dpi' in kwargs.keys():
dpi = kwargs['dpi']
del kwargs['dpi']
retval = f(*args, **kwargs)
if fname != "":
plt.savefig(fname + "_" + name + fext, dpi=dpi)
return retval
return wrapped
return wrap
@save_plot(name="hierarchical_tree")
def draw_tree(tree, pos=None, arrows=False, return_edges=False):
"""
Draws the hierarchical tree generated by hierarchical_decomposition.
If pos==None, positions are calculated by using graphviz's dot.
Returns the calculated positions for the tree nodes.
"""
plt.title("Hierarchical decomposition tree")
# No idea *why* this line has to be here, but it fixes a bug (?) in
# pygraphviz
ag = nx.to_agraph(tree)
if pos == None:
pos = nx.graphviz_layout(tree, prog='dot')
edges = nx.draw_networkx_edges(tree,
pos, with_labels=False, node_size=0, arrows=arrows)
if return_edges:
return pos, edges
else:
return pos
def draw_leaf_raw(leaf, title="Leaf", edge_list=None, mark_edges=None,
color=None, fixed_width=False, node_pos=None):
"""
Draws the leaf network using the data in the edges.
If mark_edges is not None, it contains a list of tuples of
the form (index, color).
"""
plt.title(title)
if node_pos == None:
pos = {}
for k in leaf.node.keys():
pos[k] = (leaf.node[k]['x'], leaf.node[k]['y'])
else:
pos = node_pos
if edge_list == None:
es = leaf.edges_iter()
else:
es = edge_list
if fixed_width:
widths = 4.
else:
widths = 2*array([leaf[e[0]][e[1]]['conductivity'] \
for e in es])
widths = 10./amax(widths)*widths
if mark_edges and not edge_list:
col_list = leaf.number_of_edges()*['k']
for i, c in mark_edges:
col_list[i] = c
edges = nx.draw_networkx_edges(leaf, pos=pos, width=widths, \
edge_color=col_list)
elif color and edge_list:
edges = nx.draw_networkx_edges(leaf, pos=pos, width=widths, \
edge_color=color, edgelist=edge_list)
else:
edges = nx.draw_networkx_edges(leaf, edgelist=edge_list, pos=pos, \
width=widths)
#nx.draw(leaf, pos=pos)
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
plt.gca().axis('equal')
return edges
@save_plot(name="leaf_network")
def draw_leaf(*args, **kwargs):
draw_leaf_raw(*args, **kwargs)
@save_plot(name="dual")
def draw_dual(dual):
"""
Draws the dual network to the pruned cycle
form.
"""
pos = {}
for k in dual.node.keys():
pos[k] = (dual.node[k]['x'], dual.node[k]['y'])
#widths = array([d['conductivity']
# for u, v, d in dual.edges_iter(data=True)])
#widths = 5./mean(widths)*widths
#widths[isinf(widths)] = 0.
#nx.draw_networkx_nodes(dual, pos=pos)
nx.draw(dual, pos=pos, with_labels=False, node_size=10)
@save_plot(name="filtration")
def draw_filtration(filtration, steps=9, biased=True):
""" Draws several steps of the filtration.
If biased is true, the steps chosen are biased towards the end
of the decomposition (in a power way)
"""
plt.title("Filtration steps")
n = len(filtration)
for i in xrange(steps):
if biased:
s = (i/float(steps - 1.))**0.15
else:
s = i/float(steps - 1.)
j = int(floor(s*(n-1)))
fil = filtration[j]
plt.subplot(ceil(float(steps)/3), 3, i+1)
#draw_leaf(fil, title="Step {}".format(filtration.step_nums[j]))
draw_leaf_raw(fil, title="")
| {
"content_hash": "f0660caca38c8566469f0eb4ac423cb2",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 76,
"avg_line_length": 27.689873417721518,
"alnum_prop": 0.5586285714285715,
"repo_name": "hronellenfitsch/nesting",
"id": "1e1e7988b9d20c91e68a0046532904af486d18b2",
"size": "4398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171171"
}
],
"symlink_target": ""
} |
from locust import HttpLocust, TaskSet, task
from urllib import quote_plus
from random import choice
urls = [ 'http://code.jquery.com/jquery-2.1.3.min.js',
'http://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js',
'http://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.3/jquery.min.js',
'http://google.com',
'http://facebook.com',
'http://twitter.com',
'http://github.com',
'http://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js' ]
class User(TaskSet):
def on_start(self):
self.target_url = choice(urls)
pass
@task
def request(self):
self.client.get('/?' + quote_plus(self.target_url))
class WebsiteUser(HttpLocust):
task_set = User
min_wait = 0
max_wait = 1000
| {
"content_hash": "2692a16f2f890e177506cb994250bda8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 31.44,
"alnum_prop": 0.6259541984732825,
"repo_name": "52North/ckan-harvester",
"id": "4cb9b8d63b8a0464c0ef8357a25d3926187b67f6",
"size": "786",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "locust/locustfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "104489"
},
{
"name": "Python",
"bytes": "786"
},
{
"name": "Shell",
"bytes": "969"
}
],
"symlink_target": ""
} |
from .SGD import SGD
from .Momentum import Momentum
from .Nesterov import Nesterov
from .AdaGrad import AdaGrad
from .RMSProp import RMSProp
from .DQNProp import DQNProp
from .AdaDelta import AdaDelta
from .Adam import Adam
| {
"content_hash": "edfb7a7b8d12305d9fbcdf77ce4f6891",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 30,
"avg_line_length": 28,
"alnum_prop": 0.8214285714285714,
"repo_name": "yobibyte/DeepFried2",
"id": "b01a9020a622596d77f4887726fcd5156c7522e6",
"size": "224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "DeepFried2/optimizers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113852"
}
],
"symlink_target": ""
} |
import json
import MySQLdb
class DBManager:
def __init__(self, host, user, passwd, db):
self.columns = ('temperature', 'humidity')
self.db = MySQLdb.connect(host=host, user=user, passwd=passwd, db=db)
self.cursor = self.db.cursor()
def latest(self):
self.cursor.execute("SELECT * FROM user_history ORDER BY stamp DESC LIMIT 1")
self.db.commit()
row = self.cursor.fetchone()
row_dict = {self.columns[i]: row[i] for i in range(len(row) - 1)}
return json.dumps(row_dict)
def sound_alert(self):
self.cursor.execute('SELECT * FROM sound_history ORDER BY stamp desc limit 1')
self.db.commit()
row = self.cursor.fetchone()
if row:
return json.dumps({'problem': row[0]})
return json.dumps({'problem': 0})
def temp_alert(self):
self.cursor.execute('SELECT * FROM temp_history ORDER BY stamp desc limit 1')
self.db.commit()
row = self.cursor.fetchone()
if row:
return json.dumps({'problem': row[0]})
return json.dumps({'problem': 0})
def report(self, period):
if period == 'weekly':
self.cursor.execute("SELECT * FROM user_history WHERE stamp"
" between date_sub(now(),INTERVAL 1 WEEK) and now();")
elif period == 'daily':
self.cursor.execute("SELECT * FROM user_history WHERE DATE(stamp)=CURDATE()")
elif period == '':
self.cursor.execute("SELECT * FROM user_history ORDER BY stamp")
self.db.commit()
count = int(self.cursor.rowcount)
ret_arr = []
for i in range(count):
row = self.cursor.fetchone()
row_dict = {self.columns[i]: row[i] for i in range(len(row) - 1)}
ret_arr.append(row_dict)
return json.dumps({'scores': ret_arr})
def get_insert_query(self, table, col_dict):
command_str = 'Insert into ' + table
val_str = 'Values('
field_str = '('
for field, value in col_dict.iteritems():
field_str += field + ', '
if value is None:
val_str += 'NULL'
elif isinstance(value, basestring):
val_str += '"' + value + '"'
else:
val_str += str(value)
val_str += ', '
val_str = val_str[:-2] + ')'
field_str = field_str[:-2] + ')'
final_query = command_str + " " + field_str + " " + val_str
return final_query
| {
"content_hash": "ca178af3b3031b6a230285130c4106eb",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 89,
"avg_line_length": 37.25,
"alnum_prop": 0.5420450059218318,
"repo_name": "TeamProxima/predictive-fault-tracker",
"id": "6732e2436f77678d414be4fdd874eed7c71d796f",
"size": "2533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/db_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "875"
},
{
"name": "Python",
"bytes": "25065"
},
{
"name": "Ruby",
"bytes": "286"
},
{
"name": "Shell",
"bytes": "8776"
},
{
"name": "Swift",
"bytes": "848540"
}
],
"symlink_target": ""
} |
import bpy
from bpy.types import Operator
from mmd_tools import bpyutils
import mmd_tools.core.model as mmd_model
class CleanRiggingObjects(Operator):
bl_idname = 'mmd_tools.clean_rig'
bl_label = 'Clean'
bl_description = 'Clean temporary objects of rigging'
bl_options = {'PRESET'}
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.clean()
return {'FINISHED'}
class BuildRig(Operator):
bl_idname = 'mmd_tools.build_rig'
bl_label = 'Build'
bl_description = ''
bl_options = {'PRESET'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.build()
context.scene.objects.active = obj
return {'FINISHED'}
class ApplyAdditionalTransformConstraints(Operator):
bl_idname = 'mmd_tools.apply_additioinal_transform'
bl_label = 'Apply Additional Transform'
bl_description = ''
bl_options = {'PRESET'}
@classmethod
def poll(cls, context):
return mmd_model.Model.findRoot(context.active_object)
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
mmd_model.Model(root)
#context.scene.objects.active = obj
return {'FINISHED'}
class CreateMMDModelRoot(Operator):
bl_idname = 'mmd_tools.create_mmd_model_root_object'
bl_label = 'Create a MMD Model Root Object'
bl_description = ''
bl_options = {'PRESET'}
scale = bpy.props.FloatProperty(name='Scale', default=0.2)
def execute(self, context):
rig = mmd_model.Model.create('New MMD Model', 'New MMD Model', self.scale)
arm = rig.armature()
with bpyutils.edit_object(arm) as data:
bone = data.edit_bones.new(name=u'全ての親')
bone.head = [0.0, 0.0, 0.0]
bone.tail = [0.0, 0.0, 1.0*self.scale]
arm.pose.bones[u'全ての親'].mmd_bone.name_j = u'全ての親'
arm.pose.bones[u'全ての親'].mmd_bone.name_e = 'Root'
mmd_root = rig.rootObject().mmd_root
frame_root = mmd_root.display_item_frames.add()
frame_root.name = 'Root'
frame_root.is_special = True
frame_facial = mmd_root.display_item_frames.add()
frame_facial.name = u'表情'
frame_facial.is_special = True
return {'FINISHED'}
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
| {
"content_hash": "2b1a74bf15c48aa4c50276ab4167b617",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 82,
"avg_line_length": 32.31645569620253,
"alnum_prop": 0.6321974148061105,
"repo_name": "sugiany/blender_mmd_tools",
"id": "253b5574df39b65461636a00c4d6138296147217",
"size": "2614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmd_tools/operators/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335110"
}
],
"symlink_target": ""
} |
import os
from flask.cli import pass_script_info, get_debug_flag
import click
@click.command()
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@pass_script_info
def run(info, host, port, reload, debugger, eager_loading):
"""Runs a local development server for the Flask-SocketIO application.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
debug = get_debug_flag()
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
# Extra startup messages. This depends a bit on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask-SocketIO app "%s"' % info.app_import_path)
if debug is not None:
print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
def run_server():
app = info.load_app()
socketio = app.extensions['socketio']
socketio.run(app, host=host, port=port, debug=debugger,
use_reloader=False, log_output=debugger)
if reload:
from werkzeug.serving import run_with_reloader
run_with_reloader(run_server)
else:
run_server()
| {
"content_hash": "acae6c328da0a60d1c0f692c66ba87bd",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 40.6551724137931,
"alnum_prop": 0.631891433418151,
"repo_name": "sumedh123/debatify",
"id": "3952ecd94d624d8b5c70937fa574b006d657db15",
"size": "2358",
"binary": false,
"copies": "1",
"ref": "refs/heads/UI",
"path": "venv/lib/python2.7/site-packages/flask_socketio/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "347155"
},
{
"name": "HTML",
"bytes": "102503"
},
{
"name": "JavaScript",
"bytes": "608373"
},
{
"name": "Python",
"bytes": "8393673"
},
{
"name": "Shell",
"bytes": "3298"
}
],
"symlink_target": ""
} |
import numpy as np
from bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid
from bokeh.models.glyphs import AnnularWedge
from bokeh.io import curdoc, show
N = 9
x = np.linspace(-2, 2, N)
y = x**2
r = x/12.0+0.4
source = ColumnDataSource(dict(x=x, y=y, r=r))
plot = Plot(
title=None, plot_width=300, plot_height=300,
min_border=0, toolbar_location=None)
glyph = AnnularWedge(x="x", y="y", inner_radius=.2, outer_radius="r", start_angle=0.6, end_angle=4.1, fill_color="#8888ee")
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
| {
"content_hash": "5151b9c07366da613d2667944a3f3da3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 123,
"avg_line_length": 24.5625,
"alnum_prop": 0.7099236641221374,
"repo_name": "stonebig/bokeh",
"id": "dae92253a30a8aaf4f01b67efc2b4ce1f6437d38",
"size": "786",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/reference/models/AnnularWedge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
} |
import datetime
from functools import partial
from typing import Any, Optional, Union
import pandas as pd
from pandas.api.types import is_hashable
from pyspark._globals import _NoValue
from pyspark import pandas as ps
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.missing.indexes import MissingPandasLikeDatetimeIndex
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.utils import verify_temp_column_name
class DatetimeIndex(Index):
"""
Immutable ndarray-like of datetime64 data.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
closed : {'left', 'right'}, optional
Set whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
dictates how ambiguous times should be handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
dayfirst : bool, default False
If True, parse dates in `data` with the day first order.
yearfirst : bool, default False
If True parse dates in `data` with the year first order.
dtype : numpy.dtype or str, default None
Note that the only NumPy dtype allowed is ‘datetime64[ns]’.
copy : bool, default False
Make a copy of input ndarray.
name : label, default None
Name to be stored in the index.
See Also
--------
Index : The base pandas Index type.
to_datetime : Convert argument to datetime.
Examples
--------
>>> ps.DatetimeIndex(['1970-01-01', '1970-01-01', '1970-01-01'])
DatetimeIndex(['1970-01-01', '1970-01-01', '1970-01-01'], dtype='datetime64[ns]', freq=None)
From a Series:
>>> from datetime import datetime
>>> s = ps.Series([datetime(2021, 3, 1), datetime(2021, 3, 2)], index=[10, 20])
>>> ps.DatetimeIndex(s)
DatetimeIndex(['2021-03-01', '2021-03-02'], dtype='datetime64[ns]', freq=None)
From an Index:
>>> idx = ps.DatetimeIndex(['1970-01-01', '1970-01-01', '1970-01-01'])
>>> ps.DatetimeIndex(idx)
DatetimeIndex(['1970-01-01', '1970-01-01', '1970-01-01'], dtype='datetime64[ns]', freq=None)
"""
def __new__(
cls,
data=None,
freq=_NoValue,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if not is_hashable(name):
raise TypeError("Index.name must be a hashable type")
if isinstance(data, (Series, Index)):
if dtype is None:
dtype = "datetime64[ns]"
return Index(data, dtype=dtype, copy=copy, name=name)
kwargs = dict(
data=data,
normalize=normalize,
closed=closed,
ambiguous=ambiguous,
dayfirst=dayfirst,
yearfirst=yearfirst,
dtype=dtype,
copy=copy,
name=name,
)
if freq is not _NoValue:
kwargs["freq"] = freq
return ps.from_pandas(pd.DatetimeIndex(**kwargs))
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeDatetimeIndex, item):
property_or_func = getattr(MissingPandasLikeDatetimeIndex, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'DatetimeIndex' object has no attribute '{}'".format(item))
# Properties
@property
def year(self) -> Index:
"""
The year of the datetime.
"""
return Index(self.to_series().dt.year)
@property
def month(self) -> Index:
"""
The month of the timestamp as January = 1 December = 12.
"""
return Index(self.to_series().dt.month)
@property
def day(self) -> Index:
"""
The days of the datetime.
"""
return Index(self.to_series().dt.day)
@property
def hour(self) -> Index:
"""
The hours of the datetime.
"""
return Index(self.to_series().dt.hour)
@property
def minute(self) -> Index:
"""
The minutes of the datetime.
"""
return Index(self.to_series().dt.minute)
@property
def second(self) -> Index:
"""
The seconds of the datetime.
"""
return Index(self.to_series().dt.second)
@property
def microsecond(self) -> Index:
"""
The microseconds of the datetime.
"""
return Index(self.to_series().dt.microsecond)
@property
def week(self) -> Index:
"""
The week ordinal of the year.
"""
return Index(self.to_series().dt.week)
@property
def weekofyear(self) -> Index:
return Index(self.to_series().dt.weekofyear)
weekofyear.__doc__ = week.__doc__
@property
def dayofweek(self) -> Index:
"""
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> idx = ps.date_range('2016-12-31', '2017-01-08', freq='D')
>>> idx.dayofweek
Int64Index([5, 6, 0, 1, 2, 3, 4, 5, 6], dtype='int64')
"""
return Index(self.to_series().dt.dayofweek)
@property
def day_of_week(self) -> Index:
return self.dayofweek
day_of_week.__doc__ = dayofweek.__doc__
@property
def weekday(self) -> Index:
return Index(self.to_series().dt.weekday)
weekday.__doc__ = dayofweek.__doc__
@property
def dayofyear(self) -> Index:
"""
The ordinal day of the year.
"""
return Index(self.to_series().dt.dayofyear)
@property
def day_of_year(self) -> Index:
return self.dayofyear
day_of_year.__doc__ = dayofyear.__doc__
@property
def quarter(self) -> Index:
"""
The quarter of the date.
"""
return Index(self.to_series().dt.quarter)
@property
def is_month_start(self) -> Index:
"""
Indicates whether the date is the first day of the month.
Returns
-------
Index
Returns a Index with boolean values
See Also
--------
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
>>> idx = ps.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
Index([False, False, True], dtype='object')
"""
return Index(self.to_series().dt.is_month_start)
@property
def is_month_end(self) -> Index:
"""
Indicates whether the date is the last day of the month.
Returns
-------
Index
Returns a Index with boolean values.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
Examples
--------
>>> idx = ps.date_range("2018-02-27", periods=3)
>>> idx.is_month_end
Index([False, True, False], dtype='object')
"""
return Index(self.to_series().dt.is_month_end)
@property
def is_quarter_start(self) -> Index:
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Index
Returns an Index with boolean values.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
>>> idx = ps.date_range('2017-03-30', periods=4)
>>> idx.is_quarter_start
Index([False, False, True, False], dtype='object')
"""
return Index(self.to_series().dt.is_quarter_start)
@property
def is_quarter_end(self) -> Index:
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Index
Returns an Index with boolean values.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
>>> idx = ps.date_range('2017-03-30', periods=4)
>>> idx.is_quarter_end
Index([False, True, False, False], dtype='object')
"""
return Index(self.to_series().dt.is_quarter_end)
@property
def is_year_start(self) -> Index:
"""
Indicate whether the date is the first day of a year.
Returns
-------
Index
Returns an Index with boolean values.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
>>> idx = ps.date_range("2017-12-30", periods=3)
>>> idx.is_year_start
Index([False, False, True], dtype='object')
"""
return Index(self.to_series().dt.is_year_start)
@property
def is_year_end(self) -> Index:
"""
Indicate whether the date is the last day of the year.
Returns
-------
Index
Returns an Index with boolean values.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
>>> idx = ps.date_range("2017-12-30", periods=3)
>>> idx.is_year_end
Index([False, True, False], dtype='object')
"""
return Index(self.to_series().dt.is_year_end)
@property
def is_leap_year(self) -> Index:
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Index
Booleans indicating if dates belong to a leap year.
Examples
--------
>>> idx = ps.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx.is_leap_year
Index([True, False, False], dtype='object')
"""
return Index(self.to_series().dt.is_leap_year)
@property
def daysinmonth(self) -> Index:
"""
The number of days in the month.
"""
return Index(self.to_series().dt.daysinmonth)
@property
def days_in_month(self) -> Index:
return Index(self.to_series().dt.days_in_month)
days_in_month.__doc__ = daysinmonth.__doc__
# Methods
def ceil(self, freq, *args, **kwargs) -> "DatetimeIndex":
"""
Perform ceil operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to ceil the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
Returns
-------
DatetimeIndex
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> rng = ps.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng.ceil('H') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
"""
disallow_nanoseconds(freq)
return DatetimeIndex(self.to_series().dt.ceil(freq, *args, **kwargs))
def floor(self, freq, *args, **kwargs) -> "DatetimeIndex":
"""
Perform floor operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to floor the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
Returns
-------
DatetimeIndex
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> rng = ps.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng.floor("H") # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
"""
disallow_nanoseconds(freq)
return DatetimeIndex(self.to_series().dt.floor(freq, *args, **kwargs))
def round(self, freq, *args, **kwargs) -> "DatetimeIndex":
"""
Perform round operation on the data to the specified freq.
Parameters
----------
freq : str or Offset
The frequency level to round the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end).
Returns
-------
DatetimeIndex
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
>>> rng = ps.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng.round("H") # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
"""
disallow_nanoseconds(freq)
return DatetimeIndex(self.to_series().dt.round(freq, *args, **kwargs))
def month_name(self, locale: Optional[str] = None) -> Index:
"""
Return the month names of the DatetimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = ps.date_range(start='2018-01', freq='M', periods=3)
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
return Index(self.to_series().dt.month_name(locale))
def day_name(self, locale: Optional[str] = None) -> Index:
"""
Return the day names of the series with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = ps.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
return Index(self.to_series().dt.day_name(locale))
def normalize(self) -> "DatetimeIndex":
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor.
Returns
-------
DatetimeIndex
The same type as the original data.
See Also
--------
floor : Floor the series to the specified freq.
ceil : Ceil the series to the specified freq.
round : Round the series to the specified freq.
Examples
--------
>>> idx = ps.date_range(start='2014-08-01 10:00', freq='H', periods=3)
>>> idx.normalize()
DatetimeIndex(['2014-08-01', '2014-08-01', '2014-08-01'], dtype='datetime64[ns]', freq=None)
"""
return DatetimeIndex(self.to_series().dt.normalize())
def strftime(self, date_format: str) -> Index:
"""
Convert to a string Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in python string format
doc.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
Index
Index of formatted strings.
See Also
--------
normalize : Return series with times to midnight.
round : Round the series to the specified freq.
floor : Floor the series to the specified freq.
Examples
--------
>>> idx = ps.date_range(pd.Timestamp("2018-03-10 09:00"), periods=3, freq='s')
>>> idx.strftime('%B %d, %Y, %r') # doctest: +NORMALIZE_WHITESPACE
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
return Index(self.to_series().dt.strftime(date_format))
def indexer_between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
) -> Index:
"""
Return index locations of values between particular times of day
(e.g., 9:00-9:30AM).
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
include_end : bool, default True
Returns
-------
values_between_time : Index of integers
Examples
--------
>>> kidx = ps.date_range("2000-01-01", periods=3, freq="T")
>>> kidx # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:01:00',
'2000-01-01 00:02:00'],
dtype='datetime64[ns]', freq=None)
>>> kidx.indexer_between_time("00:01", "00:02").sort_values()
Int64Index([1, 2], dtype='int64')
>>> kidx.indexer_between_time("00:01", "00:02", include_end=False)
Int64Index([1], dtype='int64')
>>> kidx.indexer_between_time("00:01", "00:02", include_start=False)
Int64Index([2], dtype='int64')
"""
def pandas_between_time(pdf) -> ps.DataFrame[int]:
return pdf.between_time(start_time, end_time, include_start, include_end)
kdf = self.to_frame()[[]]
id_column_name = verify_temp_column_name(kdf, "__id_column__")
kdf = kdf.koalas.attach_id_column("distributed-sequence", id_column_name)
with ps.option_context("compute.default_index_type", "distributed"):
# The attached index in the statement below will be dropped soon,
# so we enforce “distributed” default index type
kdf = kdf.koalas.apply_batch(pandas_between_time)
return ps.Index(first_series(kdf).rename(self.name))
def indexer_at_time(self, time: Union[datetime.time, str], asof: bool = False) -> Index:
"""
Return index locations of values at particular time of day
(e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
Time passed in either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
Returns
-------
values_at_time : Index of integers
Examples
--------
>>> kidx = ps.date_range("2000-01-01", periods=3, freq="T")
>>> kidx # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:01:00',
'2000-01-01 00:02:00'],
dtype='datetime64[ns]', freq=None)
>>> kidx.indexer_at_time("00:00")
Int64Index([0], dtype='int64')
>>> kidx.indexer_at_time("00:01")
Int64Index([1], dtype='int64')
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
def pandas_at_time(pdf) -> ps.DataFrame[int]:
return pdf.at_time(time, asof)
kdf = self.to_frame()[[]]
id_column_name = verify_temp_column_name(kdf, "__id_column__")
kdf = kdf.koalas.attach_id_column("distributed-sequence", id_column_name)
with ps.option_context("compute.default_index_type", "distributed"):
# The attached index in the statement below will be dropped soon,
# so we enforce “distributed” default index type
kdf = kdf.koalas.apply_batch(pandas_at_time)
return ps.Index(first_series(kdf).rename(self.name))
def disallow_nanoseconds(freq):
if freq in ["N", "ns"]:
raise ValueError("nanoseconds is not supported")
def _test():
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.indexes.datetimes
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.indexes.datetimes.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.indexes.datetimes tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.indexes.datetimes,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "0412f5a30686f498634f9f8b7dfff8c5",
"timestamp": "",
"source": "github",
"line_count": 756,
"max_line_length": 100,
"avg_line_length": 31.462962962962962,
"alnum_prop": 0.5587740687799546,
"repo_name": "BryanCutler/spark",
"id": "25dd5a478974ea0c1e6b79a2dcff1f4579462404",
"size": "24582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/pandas/indexes/datetimes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "31536"
},
{
"name": "Batchfile",
"bytes": "24063"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23868"
},
{
"name": "HTML",
"bytes": "8567"
},
{
"name": "Java",
"bytes": "2740577"
},
{
"name": "JavaScript",
"bytes": "132645"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "3797"
},
{
"name": "PowerShell",
"bytes": "3735"
},
{
"name": "Python",
"bytes": "2073103"
},
{
"name": "R",
"bytes": "876803"
},
{
"name": "Roff",
"bytes": "27922"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "20944985"
},
{
"name": "Shell",
"bytes": "151467"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
} |
import stat
import pysftp
import logging
import datetime
from airflow.hooks.base_hook import BaseHook
class SFTPHook(BaseHook):
"""
Interact with SFTP. Aims to be interchangeable with FTPHook.
Pitfalls: - In contrast with FTPHook describe_directory only returns size, type and
modify. It doesn't return unix.owner, unix.mode, perm, unix.group and
unique.
- retrieve_file and store_file only take a local full path and not a
buffer.
- If no mode is passed to create_directory it will be created with 777
permissions.
Errors that may occur throughout but should be handled downstream.
"""
def __init__(self, ftp_conn_id='sftp_default'):
self.ftp_conn_id = ftp_conn_id
self.conn = None
def get_conn(self):
"""
Returns an SFTP connection object
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
cnopts = pysftp.CnOpts()
if ('ignore_hostkey_verification' in params.extra_dejson
and params.extra_dejson['ignore_hostkey_verification']):
cnopts.hostkeys = None
conn_params = {
'host': params.host,
'port': params.port,
'username': params.login,
'cnopts': cnopts
}
if params.password is not None:
conn_params['password'] = params.password
if 'private_key' in params.extra_dejson:
conn_params['private_key'] = params.extra_dejson['private_key']
if 'private_key_pass' in params.extra_dejson:
conn_params['private_key_pass'] = params.extra_dejson['private_key_pass']
self.conn = pysftp.Connection(**conn_params)
return self.conn
def close_conn(self):
"""
Closes the connection. An error will occur if the
connection wasnt ever opened.
"""
conn = self.conn
conn.close()
self.conn = None
def describe_directory(self, path):
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
flist = conn.listdir_attr(path)
files = {}
for f in flist:
modify = datetime.datetime.fromtimestamp(f.st_mtime).strftime('%Y%m%d%H%M%S')
files[f.filename] = {
'size': f.st_size,
'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file',
'modify': modify}
return files
def list_directory(self, path):
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
:type path: str
"""
conn = self.get_conn()
files = conn.listdir(path)
return files
def create_directory(self, path, mode=777):
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
:type path: str
:param mode: int representation of octal mode for directory
"""
conn = self.get_conn()
conn.mkdir(path, mode)
def delete_directory(self, path):
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
:type path: str
"""
conn = self.get_conn()
conn.rmdir(path)
def retrieve_file(self, remote_full_path, local_full_path):
"""
Transfers the remote file to a local location.
If local_full_path is a string path, the file will be put
at that location
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path: full path to the local file
:type local_full_path: str
"""
conn = self.get_conn()
logging.info('Retrieving file from FTP: {}'.format(remote_full_path))
conn.get(remote_full_path, local_full_path)
logging.info('Finished retrieving file from FTP: {}'.format(
remote_full_path))
def store_file(self, remote_full_path, local_full_path):
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path: full path to the local file
:type local_full_path: str
"""
conn = self.get_conn()
conn.put(local_full_path, remote_full_path)
def delete_file(self, path):
"""
Removes a file on the FTP Server
:param path: full path to the remote file
:type path: str
"""
conn = self.get_conn()
conn.remove(path)
def get_mod_time(self, path):
conn = self.get_conn()
ftp_mdtm = conn.stat(path).st_mtime
return datetime.datetime.fromtimestamp(ftp_mdtm).strftime('%Y%m%d%H%M%S')
| {
"content_hash": "f36fcea2bf684b686dfb64d851814daa",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 89,
"avg_line_length": 35.557046979865774,
"alnum_prop": 0.5790864477161193,
"repo_name": "sdiazb/airflow",
"id": "7340146625aa934cc291dce6b299f9c6c1dfbdb9",
"size": "5865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/sftp_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57001"
},
{
"name": "HTML",
"bytes": "149244"
},
{
"name": "JavaScript",
"bytes": "1364376"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2287776"
},
{
"name": "Shell",
"bytes": "21140"
}
],
"symlink_target": ""
} |
'Update vendors'
from quickbooks import QBRequestProcessor
from parameters import APPLICATION_NAME
# Get vendors from QuickBooks
qbRequestProcessor = QBRequestProcessor(applicationName=APPLICATION_NAME)
results = qbRequestProcessor.call('VendorQueryRq', {})
# Determine new vendors
allVendorNames = [
'xxx',
'yyy',
]
oldVendorNames = [x['Name'] for x in results]
newVendorNames = set(allVendorNames).difference(oldVendorNames)
# Confirm
if oldVendorNames:
print 'Here are the existing vendors:\n' + '\n'.join(oldVendorNames)
if newVendorNames:
print 'Here are the vendors we will add:\n' + '\n'.join(newVendorNames)
if raw_input('Proceed (y/[n])? ').lower() == 'y':
# Add new vendors
for vendorName in newVendorNames:
qbRequestProcessor.call('VendorAddRq', {
'VendorAdd': {
'Name': vendorName,
},
})
| {
"content_hash": "a518fd4e4656cd35cb20a0eece549e5e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 32.642857142857146,
"alnum_prop": 0.6641137855579868,
"repo_name": "invisibleroads/inteum-quickbooks-sync",
"id": "ed90184c09c109fe9ad611b12f00a5d1d06320f0",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97334"
}
],
"symlink_target": ""
} |
import logging
import datetime
from google.appengine.ext import ndb
class Mail(ndb.Model):
title = ndb.StringProperty(required=True)
title_path = ndb.StringProperty(required=False)
title_hover_text = ndb.StringProperty(required=False)
body = ndb.TextProperty(required=True)
birthday = ndb.DateTimeProperty(auto_now_add=True)
bday_offset = ndb.IntegerProperty(required=True)
editday = ndb.DateTimeProperty(auto_now=True)
@staticmethod
def get_top_posts(limit=None, offset=None):
post_query = 'ORDER BY birthday DESC'
posts = Mail.gql(post_query).fetch(limit=limit, offset=offset)
return posts
@staticmethod
def get_posts_by_date(post_date, limit=None, offset=None):
if isinstance(post_date, datetime.date):
post_date = datetime.datetime.combine(post_date, datetime.time())
elif not isinstance(post_date, datetime.datetime):
raise TypeError
tomorrow = post_date + datetime.timedelta(days=1)
ndb_date_format = "%Y-%m-%d %H:%M:%S"
query_range = {'today': post_date.strftime(ndb_date_format),
'tomorrow': tomorrow.strftime(ndb_date_format)}
post_query = ['WHERE',
'birthday >= DATETIME(\'%(today)s\')',
'AND',
'birthday <= DATETIME(\'%(tomorrow)s\')']
# this preserves links using a date offset (essentially so we can
# index from the *oldest* post being index 0) - this is in contrast
# to wanting to have the *newest* post when showing more than one
if offset is None:
post_query.append('ORDER BY birthday DESC')
else:
post_query.append('ORDER BY birthday ASC')
post_query = ' '.join(post_query) % query_range
posts = Mail.gql(post_query).fetch(limit=limit, offset=offset)
return posts
@staticmethod
def get_posts_by_title_path(title_path, limit=None, offset=None):
post_query = ' '.join(['WHERE',
'title_path = \'%s\'',
'ORDER BY birthday DESC']) % title_path
posts = Mail.gql(post_query).fetch(limit=limit, offset=offset)
return posts
@staticmethod
def get_posts_by_key_list(key_list, limit=None, offset=None):
if offset:
key_list = key_list[offset:]
if limit:
key_list = key_list[:limit]
posts = [ndb.Key(urlsafe=key).get() for key in key_list]
posts = filter(None, posts) # TODO: shouldn't have to do this
return posts
| {
"content_hash": "977ae354ed025e1c1bf0bad8df6f36ab",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 38,
"alnum_prop": 0.5615220483641536,
"repo_name": "stupidnetizen/miniblog",
"id": "adfc1de39dd0d5a54b243b7c52ffd86e2ff906eb",
"size": "2909",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import csv
import numpy
import matplotlib.pyplot as plt
numFiles = 5
numGenerations = 250
errorbarStep = 25
filenames = ['champion','average']
legend = ['Champion Fitness', 'Average Fitness']
colors = ['r', 'b']
styles = ['-','--']
linewidth = 2
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
plt.figure()
for file_number in range(len(filenames)):
filename = filenames[file_number]
color = colors[file_number]
style = styles[file_number]
legend_val = legend[file_number]
generations = []
means = []
stddev = []
generations_error = []
means_error = []
stddev_error = []
#values = range(1,numGenerations+1)
values = []
for row in range(numGenerations):
#values[row] = [values[row]]
values.append([])
for i in range(numFiles):
new_file = csv.reader(open(str(i) + '-' + filename + '.csv', 'rb'))
new_file = list(new_file)
for row in range(numGenerations):
values[row].append(float(new_file[row][0]) - 10)
file_writer = csv.writer(open(filename + '.csv', 'wb'))
for row in range(numGenerations):
#calculate data for plotting
generations.append(row+1)
means.append(numpy.mean(values[row]))
stddev.append(numpy.std(values[row]))
if ((row+1)%errorbarStep == 0):
generations_error.append(row+1)
means_error.append(numpy.mean(values[row]))
stddev_error.append(numpy.std(values[row]))
mean_val = numpy.mean(values[row])
#print csv to file with mean and generation ready for chart creation
values[row].append(row+1)
values[row].append(mean_val)
file_writer.writerow(values[row])
#print [row+1, numpy.mean(values[row]), numpy.std(values[row])]
plt.errorbar(generations_error,means_error,yerr=stddev_error,fmt=None,ecolor=color,label='_nolegend_',linewidth=linewidth,capsize=2*linewidth,mew=linewidth)
plt.plot(generations, means, color+style, label=legend_val, linewidth=linewidth)
plt.legend(loc=4)
plt.xlim(0,255)
plt.ylim(0,35)
plt.grid()
plt.xlabel('Number of generations')
plt.ylabel('Fitness')
#plt.savefig('figure.png', bbox_inches='tight', dpi=200)
plt.show()
| {
"content_hash": "05ecc8a92f8c46be64358f479debe88c",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 158,
"avg_line_length": 29.493150684931507,
"alnum_prop": 0.681374825824431,
"repo_name": "mhauskn/HyperNEAT",
"id": "514521b64dd9f945733c9f0fac758a5a96dcc2a5",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gecco/results/freeway/merge.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "91920"
},
{
"name": "Assembly",
"bytes": "145417"
},
{
"name": "Batchfile",
"bytes": "647"
},
{
"name": "C",
"bytes": "1420400"
},
{
"name": "C#",
"bytes": "55726"
},
{
"name": "C++",
"bytes": "4589959"
},
{
"name": "CLIPS",
"bytes": "7056"
},
{
"name": "CMake",
"bytes": "66687"
},
{
"name": "DIGITAL Command Language",
"bytes": "13695"
},
{
"name": "Groff",
"bytes": "7571"
},
{
"name": "HTML",
"bytes": "119747"
},
{
"name": "Makefile",
"bytes": "27585"
},
{
"name": "Module Management System",
"bytes": "1593"
},
{
"name": "Pascal",
"bytes": "62898"
},
{
"name": "PostScript",
"bytes": "81121"
},
{
"name": "Prolog",
"bytes": "467819"
},
{
"name": "Python",
"bytes": "63410"
},
{
"name": "Ruby",
"bytes": "10453"
},
{
"name": "SAS",
"bytes": "1776"
},
{
"name": "Shell",
"bytes": "9043"
},
{
"name": "TeX",
"bytes": "1549261"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.