id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
20,700
waf_unit_test.py
projecthamster_hamster/waflib/Tools/waf_unit_test.py
#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 # Thomas Nagy, 2010-2018 (ita) """ Unit testing system for C/C++/D and interpreted languages providing test execution: * in parallel, by using ``waf -j`` * partial (only the tests that have changed) or full (by using ``waf --alltests``) The tests are declared by adding the **test** feature to programs:: def options(opt): opt.load('compiler_cxx waf_unit_test') def configure(conf): conf.load('compiler_cxx waf_unit_test') def build(bld): bld(features='cxx cxxprogram test', source='main.cpp', target='app') # or bld.program(features='test', source='main2.cpp', target='app2') When the build is executed, the program 'test' will be built and executed without arguments. The success/failure is detected by looking at the return code. The status and the standard output/error are stored on the build context. The results can be displayed by registering a callback function. Here is how to call the predefined callback:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) By passing --dump-test-scripts the build outputs corresponding python files (with extension _run.py) that are useful for debugging purposes. """ import os, shlex, sys from waflib.TaskGen import feature, after_method, taskgen_method from waflib import Utils, Task, Logs, Options from waflib.Tools import ccroot testlock = Utils.threading.Lock() SCRIPT_TEMPLATE = """#! %(python)s import subprocess, sys cmd = %(cmd)r # if you want to debug with gdb: #cmd = ['gdb', '-args'] + cmd env = %(env)r status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str)) sys.exit(status) """ @taskgen_method def handle_ut_cwd(self, key): """ Task generator method, used internally to limit code duplication. This method may disappear anytime. """ cwd = getattr(self, key, None) if cwd: if isinstance(cwd, str): # we want a Node instance if os.path.isabs(cwd): self.ut_cwd = self.bld.root.make_node(cwd) else: self.ut_cwd = self.path.make_node(cwd) @feature('test_scripts') def make_interpreted_test(self): """Create interpreted unit tests.""" for x in ['test_scripts_source', 'test_scripts_template']: if not hasattr(self, x): Logs.warn('a test_scripts taskgen i missing %s' % x) return self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False)) script_nodes = self.to_nodes(self.test_scripts_source) for script_node in script_nodes: tsk = self.create_task('utest', [script_node]) tsk.vars = lst + tsk.vars tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd()) self.handle_ut_cwd('test_scripts_cwd') env = getattr(self, 'test_scripts_env', None) if env: self.ut_env = env else: self.ut_env = dict(os.environ) paths = getattr(self, 'test_scripts_paths', {}) for (k,v) in paths.items(): p = self.ut_env.get(k, '').split(os.pathsep) if isinstance(v, str): v = v.split(os.pathsep) self.ut_env[k] = os.pathsep.join(p + v) self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env]) @feature('test') @after_method('apply_link', 'process_use') def make_test(self): """Create the unit test task. There can be only one unit test task by task generator.""" if not getattr(self, 'link_task', None): return tsk = self.create_task('utest', self.link_task.outputs) if getattr(self, 'ut_str', None): self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) tsk.vars = tsk.vars + lst self.env.append_value('UT_DEPS', self.ut_str) self.handle_ut_cwd('ut_cwd') if not hasattr(self, 'ut_paths'): paths = [] for x in self.tmp_use_sorted: try: y = self.bld.get_tgen_by_name(x).link_task except AttributeError: pass else: if not isinstance(y, ccroot.stlink_task): paths.append(y.outputs[0].parent.abspath()) self.ut_paths = os.pathsep.join(paths) + os.pathsep if not hasattr(self, 'ut_env'): self.ut_env = dct = dict(os.environ) def add_path(var): dct[var] = self.ut_paths + dct.get(var,'') if Utils.is_win32: add_path('PATH') elif Utils.unversioned_sys_platform() == 'darwin': add_path('DYLD_LIBRARY_PATH') add_path('LD_LIBRARY_PATH') else: add_path('LD_LIBRARY_PATH') if not hasattr(self, 'ut_cmd'): self.ut_cmd = getattr(Options.options, 'testcmd', False) self.env.append_value('UT_DEPS', str(self.ut_cmd)) self.env.append_value('UT_DEPS', self.ut_paths) self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env]) @taskgen_method def add_test_results(self, tup): """Override and return tup[1] to interrupt the build immediately if a test does not run""" Logs.debug("ut: %r", tup) try: self.utest_results.append(tup) except AttributeError: self.utest_results = [tup] try: self.bld.utest_results.append(tup) except AttributeError: self.bld.utest_results = [tup] @Task.deep_inputs class utest(Task.Task): """ Execute a unit test """ color = 'PINK' after = ['vnum', 'inst'] vars = ['UT_DEPS'] def runnable_status(self): """ Always execute the task if `waf --alltests` was used or no tests if ``waf --notests`` was used """ if getattr(Options.options, 'no_tests', False): return Task.SKIP_ME ret = super(utest, self).runnable_status() if ret == Task.SKIP_ME: if getattr(Options.options, 'all_tests', False): return Task.RUN_ME return ret def get_test_env(self): """ In general, tests may require any library built anywhere in the project. Override this method if fewer paths are needed """ return self.generator.ut_env def post_run(self): super(utest, self).post_run() if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]: self.generator.bld.task_sigs[self.uid()] = None def run(self): """ Execute the test. The execution is always successful, and the results are stored on ``self.generator.bld.utest_results`` for postprocessing. Override ``add_test_results`` to interrupt the build """ if hasattr(self.generator, 'ut_run'): return self.generator.ut_run(self) self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()]) ut_cmd = getattr(self.generator, 'ut_cmd', False) if ut_cmd: self.ut_exec = shlex.split(ut_cmd % Utils.shell_escape(self.ut_exec)) return self.exec_command(self.ut_exec) def exec_command(self, cmd, **kw): self.generator.bld.log_command(cmd, kw) if getattr(Options.options, 'dump_test_scripts', False): script_code = SCRIPT_TEMPLATE % { 'python': sys.executable, 'env': self.get_test_env(), 'cwd': self.get_cwd().abspath(), 'cmd': cmd } script_file = self.inputs[0].abspath() + '_run.py' Utils.writef(script_file, script_code, encoding='utf-8') os.chmod(script_file, Utils.O755) if Logs.verbose > 1: Logs.info('Test debug file written as %r' % script_file) proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(), stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str)) (stdout, stderr) = proc.communicate() self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr) testlock.acquire() try: return self.generator.add_test_results(tup) finally: testlock.release() def get_cwd(self): return getattr(self.generator, 'ut_cwd', self.inputs[0].parent) def summary(bld): """ Display an execution summary:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) """ lst = getattr(bld, 'utest_results', []) if lst: Logs.pprint('CYAN', 'execution summary') total = len(lst) tfail = len([x for x in lst if x[1]]) Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total)) for (f, code, out, err) in lst: if not code: Logs.pprint('GREEN', ' %s' % f) Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total)) for (f, code, out, err) in lst: if code: Logs.pprint('RED', ' %s' % f) def set_exit_code(bld): """ If any of the tests fail waf will exit with that exit code. This is useful if you have an automated build system which need to report on errors from the tests. You may use it like this: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.set_exit_code) """ lst = getattr(bld, 'utest_results', []) for (f, code, out, err) in lst: if code: msg = [] if out: msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8'))) if err: msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8'))) bld.fatal(os.linesep.join(msg)) def options(opt): """ Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options. """ opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests') opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests') opt.add_option('--clear-failed', action='store_true', default=False, help='Force failed unit tests to run again next time', dest='clear_failed_tests') opt.add_option('--testcmd', action='store', default=False, dest='testcmd', help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind') opt.add_option('--dump-test-scripts', action='store_true', default=False, help='Create python scripts to help debug tests', dest='dump_test_scripts')
9,827
Python
.py
255
35.701961
128
0.702257
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,701
compiler_c.py
projecthamster_hamster/waflib/Tools/compiler_c.py
#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de, 2007 (pmarat) """ Try to detect a C compiler from the list of supported compilers (gcc, msvc, etc):: def options(opt): opt.load('compiler_c') def configure(cnf): cnf.load('compiler_c') def build(bld): bld.program(source='main.c', target='app') The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_c.c_compiler`. To register a new C compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: from waflib.Tools.compiler_c import c_compiler c_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] def options(opt): opt.load('compiler_c') def configure(cnf): cnf.load('compiler_c') def build(bld): bld.program(source='main.c', target='app') Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: $ CC=clang waf configure """ import re from waflib.Tools import ccroot from waflib import Utils from waflib.Logs import debug c_compiler = { 'win32': ['msvc', 'gcc', 'clang'], 'cygwin': ['gcc', 'clang'], 'darwin': ['clang', 'gcc'], 'aix': ['xlc', 'gcc', 'clang'], 'linux': ['gcc', 'clang', 'icc'], 'sunos': ['suncc', 'gcc'], 'irix': ['gcc', 'irixcc'], 'hpux': ['gcc'], 'osf1V': ['gcc'], 'gnu': ['gcc', 'clang'], 'java': ['gcc', 'msvc', 'clang', 'icc'], 'gnukfreebsd': ['gcc', 'clang'], 'default': ['clang', 'gcc'], } """ Dict mapping platform names to Waf tools finding specific C compilers:: from waflib.Tools.compiler_c import c_compiler c_compiler['linux'] = ['gcc', 'icc', 'suncc'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = c_compiler.get(build_platform, c_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable C compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_c_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_c')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (C compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) debug('compiler_c: %r', e) else: if conf.env.CC: conf.end_msg(conf.env.get_flat('CC')) conf.env.COMPILER_CC = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a C compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-c-compiler=gcc """ test_for_compiler = default_compilers() opt.load_special_tools('c_*.py', ban=['c_dumbpreproc.py']) cc_compiler_opts = opt.add_option_group('Configuration options') cc_compiler_opts.add_option('--check-c-compiler', default=None, help='list of C compilers to try [%s]' % test_for_compiler, dest="check_c_compiler") for x in test_for_compiler.split(): opt.load('%s' % x)
3,251
Python
.py
93
32.623656
126
0.683774
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,702
gas.py
projecthamster_hamster/waflib/Tools/gas.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) "Detect as/gas/gcc for compiling assembly files" import waflib.Tools.asm # - leave this from waflib.Tools import ar def configure(conf): """ Find the programs gas/as/gcc and set the variable *AS* """ conf.find_program(['gas', 'gcc'], var='AS') conf.env.AS_TGT_F = ['-c', '-o'] conf.env.ASLNK_TGT_F = ['-o'] conf.find_ar() conf.load('asm') conf.env.ASM_NAME = 'gas'
448
Python
.py
16
26.25
55
0.682984
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,703
ruby.py
projecthamster_hamster/waflib/Tools/ruby.py
#!/usr/bin/env python # encoding: utf-8 # daniel.svensson at purplescout.se 2008 # Thomas Nagy 2016-2018 (ita) """ Support for Ruby extensions. A C/C++ compiler is required:: def options(opt): opt.load('compiler_c ruby') def configure(conf): conf.load('compiler_c ruby') conf.check_ruby_version((1,8,0)) conf.check_ruby_ext_devel() conf.check_ruby_module('libxml') def build(bld): bld( features = 'c cshlib rubyext', source = 'rb_mytest.c', target = 'mytest_ext', install_path = '${ARCHDIR_RUBY}') bld.install_files('${LIBDIR_RUBY}', 'Mytest.rb') """ import os from waflib import Errors, Options, Task, Utils from waflib.TaskGen import before_method, feature, extension from waflib.Configure import conf @feature('rubyext') @before_method('apply_incpaths', 'process_source', 'apply_bundle', 'apply_link') def init_rubyext(self): """ Add required variables for ruby extensions """ self.install_path = '${ARCHDIR_RUBY}' self.uselib = self.to_list(getattr(self, 'uselib', '')) if not 'RUBY' in self.uselib: self.uselib.append('RUBY') if not 'RUBYEXT' in self.uselib: self.uselib.append('RUBYEXT') @feature('rubyext') @before_method('apply_link', 'propagate_uselib_vars') def apply_ruby_so_name(self): """ Strip the *lib* prefix from ruby extensions """ self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.rubyext_PATTERN @conf def check_ruby_version(self, minver=()): """ Checks if ruby is installed. If installed the variable RUBY will be set in environment. The ruby binary can be overridden by ``--with-ruby-binary`` command-line option. """ ruby = self.find_program('ruby', var='RUBY', value=Options.options.rubybinary) try: version = self.cmd_and_log(ruby + ['-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip() except Errors.WafError: self.fatal('could not determine ruby version') self.env.RUBY_VERSION = version try: ver = tuple(map(int, version.split('.'))) except Errors.WafError: self.fatal('unsupported ruby version %r' % version) cver = '' if minver: cver = '> ' + '.'.join(str(x) for x in minver) if ver < minver: self.fatal('ruby is too old %r' % ver) self.msg('Checking for ruby version %s' % cver, version) @conf def check_ruby_ext_devel(self): """ Check if a ruby extension can be created """ if not self.env.RUBY: self.fatal('ruby detection is required first') if not self.env.CC_NAME and not self.env.CXX_NAME: self.fatal('load a c/c++ compiler first') version = tuple(map(int, self.env.RUBY_VERSION.split("."))) def read_out(cmd): return Utils.to_list(self.cmd_and_log(self.env.RUBY + ['-rrbconfig', '-e', cmd])) def read_config(key): return read_out('puts RbConfig::CONFIG[%r]' % key) cpppath = archdir = read_config('archdir') if version >= (1, 9, 0): ruby_hdrdir = read_config('rubyhdrdir') cpppath += ruby_hdrdir if version >= (2, 0, 0): cpppath += read_config('rubyarchhdrdir') cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])] self.check(header_name='ruby.h', includes=cpppath, errmsg='could not find ruby header file', link_header_test=False) self.env.LIBPATH_RUBYEXT = read_config('libdir') self.env.LIBPATH_RUBYEXT += archdir self.env.INCLUDES_RUBYEXT = cpppath self.env.CFLAGS_RUBYEXT = read_config('CCDLFLAGS') self.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0] # ok this is really stupid, but the command and flags are combined. # so we try to find the first argument... flags = read_config('LDSHARED') while flags and flags[0][0] != '-': flags = flags[1:] # we also want to strip out the deprecated ppc flags if len(flags) > 1 and flags[1] == "ppc": flags = flags[2:] self.env.LINKFLAGS_RUBYEXT = flags self.env.LINKFLAGS_RUBYEXT += read_config('LIBS') self.env.LINKFLAGS_RUBYEXT += read_config('LIBRUBYARG_SHARED') if Options.options.rubyarchdir: self.env.ARCHDIR_RUBY = Options.options.rubyarchdir else: self.env.ARCHDIR_RUBY = read_config('sitearchdir')[0] if Options.options.rubylibdir: self.env.LIBDIR_RUBY = Options.options.rubylibdir else: self.env.LIBDIR_RUBY = read_config('sitelibdir')[0] @conf def check_ruby_module(self, module_name): """ Check if the selected ruby interpreter can require the given ruby module:: def configure(conf): conf.check_ruby_module('libxml') :param module_name: module :type module_name: string """ self.start_msg('Ruby module %s' % module_name) try: self.cmd_and_log(self.env.RUBY + ['-e', 'require \'%s\';puts 1' % module_name]) except Errors.WafError: self.end_msg(False) self.fatal('Could not find the ruby module %r' % module_name) self.end_msg(True) @extension('.rb') def process(self, node): return self.create_task('run_ruby', node) class run_ruby(Task.Task): """ Task to run ruby files detected by file extension .rb:: def options(opt): opt.load('ruby') def configure(ctx): ctx.check_ruby_version() def build(bld): bld.env.RBFLAGS = '-e puts "hello world"' bld(source='a_ruby_file.rb') """ run_str = '${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}' def options(opt): """ Add the ``--with-ruby-archdir``, ``--with-ruby-libdir`` and ``--with-ruby-binary`` options """ opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files') opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path') opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary')
5,579
Python
.py
151
34.509934
136
0.711849
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,704
errcheck.py
projecthamster_hamster/waflib/Tools/errcheck.py
#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Common mistakes highlighting. There is a performance impact, so this tool is only loaded when running ``waf -v`` """ typos = { 'feature':'features', 'sources':'source', 'targets':'target', 'include':'includes', 'export_include':'export_includes', 'define':'defines', 'importpath':'includes', 'installpath':'install_path', 'iscopy':'is_copy', 'uses':'use', } meths_typos = ['__call__', 'program', 'shlib', 'stlib', 'objects'] import sys from waflib import Logs, Build, Node, Task, TaskGen, ConfigSet, Errors, Utils from waflib.Tools import ccroot def check_same_targets(self): mp = Utils.defaultdict(list) uids = {} def check_task(tsk): if not isinstance(tsk, Task.Task): return if hasattr(tsk, 'no_errcheck_out'): return for node in tsk.outputs: mp[node].append(tsk) try: uids[tsk.uid()].append(tsk) except KeyError: uids[tsk.uid()] = [tsk] for g in self.groups: for tg in g: try: for tsk in tg.tasks: check_task(tsk) except AttributeError: # raised if not a task generator, which should be uncommon check_task(tg) dupe = False for (k, v) in mp.items(): if len(v) > 1: dupe = True msg = '* Node %r is created more than once%s. The task generators are:' % (k, Logs.verbose == 1 and " (full message on 'waf -v -v')" or "") Logs.error(msg) for x in v: if Logs.verbose > 1: Logs.error(' %d. %r', 1 + v.index(x), x.generator) else: Logs.error(' %d. %r in %r', 1 + v.index(x), x.generator.name, getattr(x.generator, 'path', None)) Logs.error('If you think that this is an error, set no_errcheck_out on the task instance') if not dupe: for (k, v) in uids.items(): if len(v) > 1: Logs.error('* Several tasks use the same identifier. Please check the information on\n https://waf.io/apidocs/Task.html?highlight=uid#waflib.Task.Task.uid') tg_details = tsk.generator.name if Logs.verbose > 2: tg_details = tsk.generator for tsk in v: Logs.error(' - object %r (%r) defined in %r', tsk.__class__.__name__, tsk, tg_details) def check_invalid_constraints(self): feat = set() for x in list(TaskGen.feats.values()): feat.union(set(x)) for (x, y) in TaskGen.task_gen.prec.items(): feat.add(x) feat.union(set(y)) ext = set() for x in TaskGen.task_gen.mappings.values(): ext.add(x.__name__) invalid = ext & feat if invalid: Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method', list(invalid)) # the build scripts have been read, so we can check for invalid after/before attributes on task classes for cls in list(Task.classes.values()): if sys.hexversion > 0x3000000 and issubclass(cls, Task.Task) and isinstance(cls.hcode, str): raise Errors.WafError('Class %r has hcode value %r of type <str>, expecting <bytes> (use Utils.h_cmd() ?)' % (cls, cls.hcode)) for x in ('before', 'after'): for y in Utils.to_list(getattr(cls, x, [])): if not Task.classes.get(y): Logs.error('Erroneous order constraint %r=%r on task class %r', x, y, cls.__name__) if getattr(cls, 'rule', None): Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")', cls.__name__) def replace(m): """ Replaces existing BuildContext methods to verify parameter names, for example ``bld(source=)`` has no ending *s* """ oldcall = getattr(Build.BuildContext, m) def call(self, *k, **kw): ret = oldcall(self, *k, **kw) for x in typos: if x in kw: if x == 'iscopy' and 'subst' in getattr(self, 'features', ''): continue Logs.error('Fix the typo %r -> %r on %r', x, typos[x], ret) return ret setattr(Build.BuildContext, m, call) def enhance_lib(): """ Modifies existing classes and methods to enable error verification """ for m in meths_typos: replace(m) # catch '..' in ant_glob patterns def ant_glob(self, *k, **kw): if k: lst = Utils.to_list(k[0]) for pat in lst: sp = pat.split('/') if '..' in sp: Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'", k[0]) if '.' in sp: Logs.error("In ant_glob pattern %r: '.' means 'one dot', not 'current directory'", k[0]) return self.old_ant_glob(*k, **kw) Node.Node.old_ant_glob = Node.Node.ant_glob Node.Node.ant_glob = ant_glob # catch ant_glob on build folders def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): if remove: try: if self.is_child_of(self.ctx.bldnode) and not quiet: quiet = True Logs.error('Calling ant_glob on build folders (%r) is dangerous: add quiet=True / remove=False', self) except AttributeError: pass return self.old_ant_iter(accept, maxdepth, pats, dir, src, remove, quiet) Node.Node.old_ant_iter = Node.Node.ant_iter Node.Node.ant_iter = ant_iter # catch conflicting ext_in/ext_out/before/after declarations old = Task.is_before def is_before(t1, t2): ret = old(t1, t2) if ret and old(t2, t1): Logs.error('Contradictory order constraints in classes %r %r', t1, t2) return ret Task.is_before = is_before # check for bld(feature='cshlib') where no 'c' is given - this can be either a mistake or on purpose # so we only issue a warning def check_err_features(self): lst = self.to_list(self.features) if 'shlib' in lst: Logs.error('feature shlib -> cshlib, dshlib or cxxshlib') for x in ('c', 'cxx', 'd', 'fc'): if not x in lst and lst and lst[0] in [x+y for y in ('program', 'shlib', 'stlib')]: Logs.error('%r features is probably missing %r', self, x) TaskGen.feature('*')(check_err_features) # check for erroneous order constraints def check_err_order(self): if not hasattr(self, 'rule') and not 'subst' in Utils.to_list(self.features): for x in ('before', 'after', 'ext_in', 'ext_out'): if hasattr(self, x): Logs.warn('Erroneous order constraint %r on non-rule based task generator %r', x, self) else: for x in ('before', 'after'): for y in self.to_list(getattr(self, x, [])): if not Task.classes.get(y): Logs.error('Erroneous order constraint %s=%r on %r (no such class)', x, y, self) TaskGen.feature('*')(check_err_order) # check for @extension used with @feature/@before_method/@after_method def check_compile(self): check_invalid_constraints(self) try: ret = self.orig_compile() finally: check_same_targets(self) return ret Build.BuildContext.orig_compile = Build.BuildContext.compile Build.BuildContext.compile = check_compile # check for invalid build groups #914 def use_rec(self, name, **kw): try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: pass else: idx = self.bld.get_group_idx(self) odx = self.bld.get_group_idx(y) if odx > idx: msg = "Invalid 'use' across build groups:" if Logs.verbose > 1: msg += '\n target %r\n uses:\n %r' % (self, y) else: msg += " %r uses %r (try 'waf -v -v' for the full error)" % (self.name, name) raise Errors.WafError(msg) self.orig_use_rec(name, **kw) TaskGen.task_gen.orig_use_rec = TaskGen.task_gen.use_rec TaskGen.task_gen.use_rec = use_rec # check for env.append def _getattr(self, name, default=None): if name == 'append' or name == 'add': raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique') elif name == 'prepend': raise Errors.WafError('env.prepend does not exist: use env.prepend_value') if name in self.__slots__: return super(ConfigSet.ConfigSet, self).__getattr__(name, default) else: return self[name] ConfigSet.ConfigSet.__getattr__ = _getattr def options(opt): """ Error verification can be enabled by default (not just on ``waf -v``) by adding to the user script options """ enhance_lib()
7,826
Python
.py
210
34.057143
162
0.677164
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,705
d.py
projecthamster_hamster/waflib/Tools/d.py
#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2007-2018 (ita) from waflib import Utils, Task, Errors from waflib.TaskGen import taskgen_method, feature, extension from waflib.Tools import d_scan, d_config from waflib.Tools.ccroot import link_task, stlink_task class d(Task.Task): "Compile a d file into an object file" color = 'GREEN' run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_SRC_F:SRC} ${D_TGT_F:TGT}' scan = d_scan.scan class d_with_header(d): "Compile a d file and generate a header" run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}' class d_header(Task.Task): "Compile d headers" color = 'BLUE' run_str = '${D} ${D_HEADER} ${SRC}' class dprogram(link_task): "Link object files into a d program" run_str = '${D_LINKER} ${LINKFLAGS} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F:TGT} ${RPATH_ST:RPATH} ${DSTLIB_MARKER} ${DSTLIBPATH_ST:STLIBPATH} ${DSTLIB_ST:STLIB} ${DSHLIB_MARKER} ${DLIBPATH_ST:LIBPATH} ${DSHLIB_ST:LIB}' inst_to = '${BINDIR}' class dshlib(dprogram): "Link object files into a d shared library" inst_to = '${LIBDIR}' class dstlib(stlink_task): "Link object files into a d static library" pass # do not remove @extension('.d', '.di', '.D') def d_hook(self, node): """ Compile *D* files. To get .di files as well as .o files, set the following:: def build(bld): bld.program(source='foo.d', target='app', generate_headers=True) """ ext = Utils.destos_to_binfmt(self.env.DEST_OS) == 'pe' and 'obj' or 'o' out = '%s.%d.%s' % (node.name, self.idx, ext) def create_compiled_task(self, name, node): task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task if getattr(self, 'generate_headers', None): tsk = create_compiled_task(self, 'd_with_header', node) tsk.outputs.append(node.change_ext(self.env.DHEADER_ext)) else: tsk = create_compiled_task(self, 'd', node) return tsk @taskgen_method def generate_header(self, filename): """ See feature request #104:: def build(bld): tg = bld.program(source='foo.d', target='app') tg.generate_header('blah.d') # is equivalent to: #tg = bld.program(source='foo.d', target='app', header_lst='blah.d') :param filename: header to create :type filename: string """ try: self.header_lst.append([filename, self.install_path]) except AttributeError: self.header_lst = [[filename, self.install_path]] @feature('d') def process_header(self): """ Process the attribute 'header_lst' to create the d header compilation tasks:: def build(bld): bld.program(source='foo.d', target='app', header_lst='blah.d') """ for i in getattr(self, 'header_lst', []): node = self.path.find_resource(i[0]) if not node: raise Errors.WafError('file %r not found on d obj' % i[0]) self.create_task('d_header', node, node.change_ext('.di'))
3,014
Python
.py
80
35.3
213
0.693521
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,706
icc.py
projecthamster_hamster/waflib/Tools/icc.py
#!/usr/bin/env python # encoding: utf-8 # Stian Selnes 2008 # Thomas Nagy 2009-2018 (ita) """ Detects the Intel C compiler """ import sys from waflib.Tools import ccroot, ar, gcc from waflib.Configure import conf @conf def find_icc(conf): """ Finds the program icc and execute it to ensure it really is icc """ cc = conf.find_program(['icc', 'ICL'], var='CC') conf.get_cc_version(cc, icc=True) conf.env.CC_NAME = 'icc' def configure(conf): conf.find_icc() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
604
Python
.py
26
21.576923
64
0.728223
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,707
bison.py
projecthamster_hamster/waflib/Tools/bison.py
#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy 2009-2018 (ita) """ The **bison** program is a code generator which creates C or C++ files. The generated files are compiled into object files. """ from waflib import Task from waflib.TaskGen import extension class bison(Task.Task): """Compiles bison files""" color = 'BLUE' run_str = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}' ext_out = ['.h'] # just to make sure @extension('.y', '.yc', '.yy') def big_bison(self, node): """ Creates a bison task, which must be executed from the directory of the output file. """ has_h = '-d' in self.env.BISONFLAGS outs = [] if node.name.endswith('.yc'): outs.append(node.change_ext('.tab.cc')) if has_h: outs.append(node.change_ext('.tab.hh')) else: outs.append(node.change_ext('.tab.c')) if has_h: outs.append(node.change_ext('.tab.h')) tsk = self.create_task('bison', node, outs) tsk.cwd = node.parent.get_bld() # and the c/cxx file must be compiled too self.source.append(outs[0]) def configure(conf): """ Detects the *bison* program """ conf.find_program('bison', var='BISON') conf.env.BISONFLAGS = ['-d']
1,187
Python
.py
40
27.6
84
0.683656
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,708
c_aliases.py
projecthamster_hamster/waflib/Tools/c_aliases.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2015 (ita) "base for all c/c++ programs and libraries" from waflib import Utils, Errors from waflib.Configure import conf def get_extensions(lst): """ Returns the file extensions for the list of files given as input :param lst: files to process :list lst: list of string or :py:class:`waflib.Node.Node` :return: list of file extensions :rtype: list of string """ ret = [] for x in Utils.to_list(lst): if not isinstance(x, str): x = x.name ret.append(x[x.rfind('.') + 1:]) return ret def sniff_features(**kw): """ Computes and returns the features required for a task generator by looking at the file extensions. This aimed for C/C++ mainly:: snif_features(source=['foo.c', 'foo.cxx'], type='shlib') # returns ['cxx', 'c', 'cxxshlib', 'cshlib'] :param source: source files to process :type source: list of string or :py:class:`waflib.Node.Node` :param type: object type in *program*, *shlib* or *stlib* :type type: string :return: the list of features for a task generator processing the source files :rtype: list of string """ exts = get_extensions(kw.get('source', [])) typ = kw['typ'] feats = [] # watch the order, cxx will have the precedence for x in 'cxx cpp c++ cc C'.split(): if x in exts: feats.append('cxx') break if 'c' in exts or 'vala' in exts or 'gs' in exts: feats.append('c') if 's' in exts or 'S' in exts: feats.append('asm') for x in 'f f90 F F90 for FOR'.split(): if x in exts: feats.append('fc') break if 'd' in exts: feats.append('d') if 'java' in exts: feats.append('java') return 'java' if typ in ('program', 'shlib', 'stlib'): will_link = False for x in feats: if x in ('cxx', 'd', 'fc', 'c', 'asm'): feats.append(x + typ) will_link = True if not will_link and not kw.get('features', []): raise Errors.WafError('Unable to determine how to link %r, try adding eg: features="c cshlib"?' % kw) return feats def set_features(kw, typ): """ Inserts data in the input dict *kw* based on existing data and on the type of target required (typ). :param kw: task generator parameters :type kw: dict :param typ: type of target :type typ: string """ kw['typ'] = typ kw['features'] = Utils.to_list(kw.get('features', [])) + Utils.to_list(sniff_features(**kw)) @conf def program(bld, *k, **kw): """ Alias for creating programs by looking at the file extensions:: def build(bld): bld.program(source='foo.c', target='app') # equivalent to: # bld(features='c cprogram', source='foo.c', target='app') """ set_features(kw, 'program') return bld(*k, **kw) @conf def shlib(bld, *k, **kw): """ Alias for creating shared libraries by looking at the file extensions:: def build(bld): bld.shlib(source='foo.c', target='app') # equivalent to: # bld(features='c cshlib', source='foo.c', target='app') """ set_features(kw, 'shlib') return bld(*k, **kw) @conf def stlib(bld, *k, **kw): """ Alias for creating static libraries by looking at the file extensions:: def build(bld): bld.stlib(source='foo.cpp', target='app') # equivalent to: # bld(features='cxx cxxstlib', source='foo.cpp', target='app') """ set_features(kw, 'stlib') return bld(*k, **kw) @conf def objects(bld, *k, **kw): """ Alias for creating object files by looking at the file extensions:: def build(bld): bld.objects(source='foo.c', target='app') # equivalent to: # bld(features='c', source='foo.c', target='app') """ set_features(kw, 'objects') return bld(*k, **kw)
3,580
Python
.py
118
27.720339
104
0.675306
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,709
dbus.py
projecthamster_hamster/waflib/Tools/dbus.py
#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 """ Compiles dbus files with **dbus-binding-tool** Typical usage:: def options(opt): opt.load('compiler_c dbus') def configure(conf): conf.load('compiler_c dbus') def build(bld): tg = bld.program( includes = '.', source = bld.path.ant_glob('*.c'), target = 'gnome-hello') tg.add_dbus_file('test.xml', 'test_prefix', 'glib-server') """ from waflib import Task, Errors from waflib.TaskGen import taskgen_method, before_method @taskgen_method def add_dbus_file(self, filename, prefix, mode): """ Adds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*. :param filename: xml file to compile :type filename: string :param prefix: dbus binding tool prefix (--prefix=prefix) :type prefix: string :param mode: dbus binding tool mode (--mode=mode) :type mode: string """ if not hasattr(self, 'dbus_lst'): self.dbus_lst = [] if not 'process_dbus' in self.meths: self.meths.append('process_dbus') self.dbus_lst.append([filename, prefix, mode]) @before_method('process_source') def process_dbus(self): """ Processes the dbus files stored in the attribute *dbus_lst* to create :py:class:`waflib.Tools.dbus.dbus_binding_tool` instances. """ for filename, prefix, mode in getattr(self, 'dbus_lst', []): node = self.path.find_resource(filename) if not node: raise Errors.WafError('file not found ' + filename) tsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h')) tsk.env.DBUS_BINDING_TOOL_PREFIX = prefix tsk.env.DBUS_BINDING_TOOL_MODE = mode class dbus_binding_tool(Task.Task): """ Compiles a dbus file """ color = 'BLUE' ext_out = ['.h'] run_str = '${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}' shell = True # temporary workaround for #795 def configure(conf): """ Detects the program dbus-binding-tool and sets ``conf.env.DBUS_BINDING_TOOL`` """ conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL')
2,062
Python
.py
60
32.133333
129
0.712851
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,710
javaw.py
projecthamster_hamster/waflib/Tools/javaw.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Java support Javac is one of the few compilers that behaves very badly: #. it outputs files where it wants to (-d is only for the package root) #. it recompiles files silently behind your back #. it outputs an undefined amount of files (inner classes) Remember that the compilation can be performed using Jython[1] rather than regular Python. Instead of running one of the following commands:: ./waf configure python waf configure You would have to run:: java -jar /path/to/jython.jar waf configure [1] http://www.jython.org/ Usage ===== Load the "java" tool. def configure(conf): conf.load('java') Java tools will be autodetected and eventually, if present, the quite standard JAVA_HOME environment variable will be used. The also standard CLASSPATH variable is used for library searching. In configuration phase checks can be done on the system environment, for example to check if a class is known in the classpath:: conf.check_java_class('java.io.FileOutputStream') or if the system supports JNI applications building:: conf.check_jni_headers() The java tool supports compiling java code, creating jar files and creating javadoc documentation. This can be either done separately or together in a single definition. For example to manage them separately:: bld(features = 'javac', srcdir = 'src', compat = '1.7', use = 'animals', name = 'cats-src', ) bld(features = 'jar', basedir = '.', destfile = '../cats.jar', name = 'cats', use = 'cats-src' ) Or together by defining all the needed attributes:: bld(features = 'javac jar javadoc', srcdir = 'src/', # folder containing the sources to compile outdir = 'src', # folder where to output the classes (in the build directory) compat = '1.6', # java compatibility version number classpath = ['.', '..'], # jar basedir = 'src', # folder containing the classes and other files to package (must match outdir) destfile = 'foo.jar', # do not put the destfile in the folder of the java classes! use = 'NNN', jaropts = ['-C', 'default/src/', '.'], # can be used to give files manifest = 'src/Manifest.mf', # Manifest file to include # javadoc javadoc_package = ['com.meow' , 'com.meow.truc.bar', 'com.meow.truc.foo'], javadoc_output = 'javadoc', ) External jar dependencies can be mapped to a standard waf "use" dependency by setting an environment variable with a CLASSPATH prefix in the configuration, for example:: conf.env.CLASSPATH_NNN = ['aaaa.jar', 'bbbb.jar'] and then NNN can be freely used in rules as:: use = 'NNN', In the java tool the dependencies via use are not transitive by default, as this necessity depends on the code. To enable recursive dependency scanning use on a specific rule: recurse_use = True Or build-wise by setting RECURSE_JAVA: bld.env.RECURSE_JAVA = True Unit tests can be integrated in the waf unit test environment using the javatest extra. """ import os, shutil from waflib import Task, Utils, Errors, Node from waflib.Configure import conf from waflib.TaskGen import feature, before_method, after_method, taskgen_method from waflib.Tools import ccroot ccroot.USELIB_VARS['javac'] = set(['CLASSPATH', 'JAVACFLAGS']) SOURCE_RE = '**/*.java' JAR_RE = '**/*' class_check_source = ''' public class Test { public static void main(String[] argv) { Class lib; if (argv.length < 1) { System.err.println("Missing argument"); System.exit(77); } try { lib = Class.forName(argv[0]); } catch (ClassNotFoundException e) { System.err.println("ClassNotFoundException"); System.exit(1); } lib = null; System.exit(0); } } ''' @feature('javac') @before_method('process_source') def apply_java(self): """ Create a javac task for compiling *.java files*. There can be only one javac task by task generator. """ Utils.def_attrs(self, jarname='', classpath='', sourcepath='.', srcdir='.', jar_mf_attributes={}, jar_mf_classpath=[]) outdir = getattr(self, 'outdir', None) if outdir: if not isinstance(outdir, Node.Node): outdir = self.path.get_bld().make_node(self.outdir) else: outdir = self.path.get_bld() outdir.mkdir() self.outdir = outdir self.env.OUTDIR = outdir.abspath() self.javac_task = tsk = self.create_task('javac') tmp = [] srcdir = getattr(self, 'srcdir', '') if isinstance(srcdir, Node.Node): srcdir = [srcdir] for x in Utils.to_list(srcdir): if isinstance(x, Node.Node): y = x else: y = self.path.find_dir(x) if not y: self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) tmp.append(y) tsk.srcdir = tmp if getattr(self, 'compat', None): tsk.env.append_value('JAVACFLAGS', ['-source', str(self.compat)]) if hasattr(self, 'sourcepath'): fold = [isinstance(x, Node.Node) and x or self.path.find_dir(x) for x in self.to_list(self.sourcepath)] names = os.pathsep.join([x.srcpath() for x in fold]) else: names = [x.srcpath() for x in tsk.srcdir] if names: tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names]) @taskgen_method def java_use_rec(self, name, **kw): """ Processes recursively the *use* attribute for each referred java compilation """ if name in self.tmp_use_seen: return self.tmp_use_seen.append(name) try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) return else: y.post() # Add generated JAR name for CLASSPATH. Task ordering (set_run_after) # is already guaranteed by ordering done between the single tasks if hasattr(y, 'jar_task'): self.use_lst.append(y.jar_task.outputs[0].abspath()) else: if hasattr(y,'outdir'): self.use_lst.append(y.outdir.abspath()) else: self.use_lst.append(y.path.get_bld().abspath()) for x in self.to_list(getattr(y, 'use', [])): self.java_use_rec(x) @feature('javac') @before_method('propagate_uselib_vars') @after_method('apply_java') def use_javac_files(self): """ Processes the *use* attribute referring to other java compilations """ self.use_lst = [] self.tmp_use_seen = [] self.uselib = self.to_list(getattr(self, 'uselib', [])) names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: tg = get(x) except Errors.WafError: self.uselib.append(x) else: tg.post() if hasattr(tg, 'jar_task'): self.use_lst.append(tg.jar_task.outputs[0].abspath()) self.javac_task.set_run_after(tg.jar_task) self.javac_task.dep_nodes.extend(tg.jar_task.outputs) else: if hasattr(tg, 'outdir'): base_node = tg.outdir else: base_node = tg.path.get_bld() self.use_lst.append(base_node.abspath()) self.javac_task.dep_nodes.extend([dx for dx in base_node.ant_glob(JAR_RE, remove=False, quiet=True)]) for tsk in tg.tasks: self.javac_task.set_run_after(tsk) # If recurse use scan is enabled recursively add use attribute for each used one if getattr(self, 'recurse_use', False) or self.bld.env.RECURSE_JAVA: self.java_use_rec(x) self.env.append_value('CLASSPATH', self.use_lst) @feature('javac') @after_method('apply_java', 'propagate_uselib_vars', 'use_javac_files') def set_classpath(self): """ Sets the CLASSPATH value on the *javac* task previously created. """ if getattr(self, 'classpath', None): self.env.append_unique('CLASSPATH', getattr(self, 'classpath', [])) for x in self.tasks: x.env.CLASSPATH = os.pathsep.join(self.env.CLASSPATH) + os.pathsep @feature('jar') @after_method('apply_java', 'use_javac_files') @before_method('process_source') def jar_files(self): """ Creates a jar task (one maximum per task generator) """ destfile = getattr(self, 'destfile', 'test.jar') jaropts = getattr(self, 'jaropts', []) manifest = getattr(self, 'manifest', None) basedir = getattr(self, 'basedir', None) if basedir: if not isinstance(self.basedir, Node.Node): basedir = self.path.get_bld().make_node(basedir) else: basedir = self.path.get_bld() if not basedir: self.bld.fatal('Could not find the basedir %r for %r' % (self.basedir, self)) self.jar_task = tsk = self.create_task('jar_create') if manifest: jarcreate = getattr(self, 'jarcreate', 'cfm') if not isinstance(manifest,Node.Node): node = self.path.find_resource(manifest) else: node = manifest if not node: self.bld.fatal('invalid manifest file %r for %r' % (manifest, self)) tsk.dep_nodes.append(node) jaropts.insert(0, node.abspath()) else: jarcreate = getattr(self, 'jarcreate', 'cf') if not isinstance(destfile, Node.Node): destfile = self.path.find_or_declare(destfile) if not destfile: self.bld.fatal('invalid destfile %r for %r' % (destfile, self)) tsk.set_outputs(destfile) tsk.basedir = basedir jaropts.append('-C') jaropts.append(basedir.bldpath()) jaropts.append('.') tsk.env.JAROPTS = jaropts tsk.env.JARCREATE = jarcreate if getattr(self, 'javac_task', None): tsk.set_run_after(self.javac_task) @feature('jar') @after_method('jar_files') def use_jar_files(self): """ Processes the *use* attribute to set the build order on the tasks created by another task generator. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: y = get(x) except Errors.WafError: self.uselib.append(x) else: y.post() self.jar_task.run_after.update(y.tasks) class JTask(Task.Task): """ Base class for java and jar tasks; provides functionality to run long commands """ def split_argfile(self, cmd): inline = [cmd[0]] infile = [] for x in cmd[1:]: # jar and javac do not want -J flags in @file if x.startswith('-J'): inline.append(x) else: infile.append(self.quote_flag(x)) return (inline, infile) class jar_create(JTask): """ Creates a jar file """ color = 'GREEN' run_str = '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}' def runnable_status(self): """ Wait for dependent tasks to be executed, then read the files to update the list of inputs. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: try: self.inputs = [x for x in self.basedir.ant_glob(JAR_RE, remove=False, quiet=True) if id(x) != id(self.outputs[0])] except Exception: raise Errors.WafError('Could not find the basedir %r for %r' % (self.basedir, self)) return super(jar_create, self).runnable_status() class javac(JTask): """ Compiles java files """ color = 'BLUE' run_str = '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}' vars = ['CLASSPATH', 'JAVACFLAGS', 'JAVAC', 'OUTDIR'] """ The javac task will be executed again if the variables CLASSPATH, JAVACFLAGS, JAVAC or OUTDIR change. """ def uid(self): """Identify java tasks by input&output folder""" lst = [self.__class__.__name__, self.generator.outdir.abspath()] for x in self.srcdir: lst.append(x.abspath()) return Utils.h_list(lst) def runnable_status(self): """ Waits for dependent tasks to be complete, then read the file system to find the input nodes. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: self.inputs = [] for x in self.srcdir: if x.exists(): self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False, quiet=True)) return super(javac, self).runnable_status() def post_run(self): """ List class files created """ for node in self.generator.outdir.ant_glob('**/*.class', quiet=True): self.generator.bld.node_sigs[node] = self.uid() self.generator.bld.task_sigs[self.uid()] = self.cache_sig @feature('javadoc') @after_method('process_rule') def create_javadoc(self): """ Creates a javadoc task (feature 'javadoc') """ tsk = self.create_task('javadoc') tsk.classpath = getattr(self, 'classpath', []) self.javadoc_package = Utils.to_list(self.javadoc_package) if not isinstance(self.javadoc_output, Node.Node): self.javadoc_output = self.bld.path.find_or_declare(self.javadoc_output) class javadoc(Task.Task): """ Builds java documentation """ color = 'BLUE' def __str__(self): return '%s: %s -> %s\n' % (self.__class__.__name__, self.generator.srcdir, self.generator.javadoc_output) def run(self): env = self.env bld = self.generator.bld wd = bld.bldnode #add src node + bld node (for generated java code) srcpath = self.generator.path.abspath() + os.sep + self.generator.srcdir srcpath += os.pathsep srcpath += self.generator.path.get_bld().abspath() + os.sep + self.generator.srcdir classpath = env.CLASSPATH classpath += os.pathsep classpath += os.pathsep.join(self.classpath) classpath = "".join(classpath) self.last_cmd = lst = [] lst.extend(Utils.to_list(env.JAVADOC)) lst.extend(['-d', self.generator.javadoc_output.abspath()]) lst.extend(['-sourcepath', srcpath]) lst.extend(['-classpath', classpath]) lst.extend(['-subpackages']) lst.extend(self.generator.javadoc_package) lst = [x for x in lst if x] self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) def post_run(self): nodes = self.generator.javadoc_output.ant_glob('**', quiet=True) for node in nodes: self.generator.bld.node_sigs[node] = self.uid() self.generator.bld.task_sigs[self.uid()] = self.cache_sig def configure(self): """ Detects the javac, java and jar programs """ # If JAVA_PATH is set, we prepend it to the path list java_path = self.environ['PATH'].split(os.pathsep) v = self.env if 'JAVA_HOME' in self.environ: java_path = [os.path.join(self.environ['JAVA_HOME'], 'bin')] + java_path self.env.JAVA_HOME = [self.environ['JAVA_HOME']] for x in 'javac java jar javadoc'.split(): self.find_program(x, var=x.upper(), path_list=java_path, mandatory=(x not in ('javadoc'))) if 'CLASSPATH' in self.environ: v.CLASSPATH = self.environ['CLASSPATH'] if not v.JAR: self.fatal('jar is required for making java packages') if not v.JAVAC: self.fatal('javac is required for compiling java classes') v.JARCREATE = 'cf' # can use cvf v.JAVACFLAGS = [] @conf def check_java_class(self, classname, with_classpath=None): """ Checks if the specified java class exists :param classname: class to check, like java.util.HashMap :type classname: string :param with_classpath: additional classpath to give :type with_classpath: string """ javatestdir = '.waf-javatest' classpath = javatestdir if self.env.CLASSPATH: classpath += os.pathsep + self.env.CLASSPATH if isinstance(with_classpath, str): classpath += os.pathsep + with_classpath shutil.rmtree(javatestdir, True) os.mkdir(javatestdir) Utils.writef(os.path.join(javatestdir, 'Test.java'), class_check_source) # Compile the source self.exec_command(self.env.JAVAC + [os.path.join(javatestdir, 'Test.java')], shell=False) # Try to run the app cmd = self.env.JAVA + ['-cp', classpath, 'Test', classname] self.to_log("%s\n" % str(cmd)) found = self.exec_command(cmd, shell=False) self.msg('Checking for java class %s' % classname, not found) shutil.rmtree(javatestdir, True) return found @conf def check_jni_headers(conf): """ Checks for jni headers and libraries. On success the conf.env variables xxx_JAVA are added for use in C/C++ targets:: def options(opt): opt.load('compiler_c') def configure(conf): conf.load('compiler_c java') conf.check_jni_headers() def build(bld): bld.shlib(source='a.c', target='app', use='JAVA') """ if not conf.env.CC_NAME and not conf.env.CXX_NAME: conf.fatal('load a compiler first (gcc, g++, ..)') if not conf.env.JAVA_HOME: conf.fatal('set JAVA_HOME in the system environment') # jni requires the jvm javaHome = conf.env.JAVA_HOME[0] dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/include') if dir is None: dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/../Headers') # think different?! if dir is None: conf.fatal('JAVA_HOME does not seem to be set properly') f = dir.ant_glob('**/(jni|jni_md).h') incDirs = [x.parent.abspath() for x in f] dir = conf.root.find_dir(conf.env.JAVA_HOME[0]) f = dir.ant_glob('**/*jvm.(so|dll|dylib)') libDirs = [x.parent.abspath() for x in f] or [javaHome] # On windows, we need both the .dll and .lib to link. On my JDK, they are # in different directories... f = dir.ant_glob('**/*jvm.(lib)') if f: libDirs = [[x, y.parent.abspath()] for x in libDirs for y in f] if conf.env.DEST_OS == 'freebsd': conf.env.append_unique('LINKFLAGS_JAVA', '-pthread') for d in libDirs: try: conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm', libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA') except Exception: pass else: break else: conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs)
16,882
Python
.py
483
32.231884
118
0.705752
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,711
clangxx.py
projecthamster_hamster/waflib/Tools/clangxx.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) """ Detect the Clang++ C++ compiler """ from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf @conf def find_clangxx(conf): """ Finds the program clang++, and executes it to ensure it really is clang++ """ cxx = conf.find_program('clang++', var='CXX') conf.get_cc_version(cxx, clang=True) conf.env.CXX_NAME = 'clang' def configure(conf): conf.find_clangxx() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags()
648
Python
.py
25
24.16
74
0.716828
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,712
gnu_dirs.py
projecthamster_hamster/waflib/Tools/gnu_dirs.py
#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 """ Sets various standard variables such as INCLUDEDIR. SBINDIR and others. To use this module just call:: opt.load('gnu_dirs') and:: conf.load('gnu_dirs') Add options for the standard GNU directories, this tool will add the options found in autotools, and will update the environment with the following installation variables: ============== ========================================= ======================= Variable Description Default Value ============== ========================================= ======================= PREFIX installation prefix /usr/local EXEC_PREFIX installation prefix for binaries PREFIX BINDIR user commands EXEC_PREFIX/bin SBINDIR system binaries EXEC_PREFIX/sbin LIBEXECDIR program-specific binaries EXEC_PREFIX/libexec SYSCONFDIR host-specific configuration PREFIX/etc SHAREDSTATEDIR architecture-independent variable data PREFIX/com LOCALSTATEDIR variable data PREFIX/var LIBDIR object code libraries EXEC_PREFIX/lib INCLUDEDIR header files PREFIX/include OLDINCLUDEDIR header files for non-GCC compilers /usr/include DATAROOTDIR architecture-independent data root PREFIX/share DATADIR architecture-independent data DATAROOTDIR INFODIR GNU "info" documentation DATAROOTDIR/info LOCALEDIR locale-dependent data DATAROOTDIR/locale MANDIR manual pages DATAROOTDIR/man DOCDIR documentation root DATAROOTDIR/doc/APPNAME HTMLDIR HTML documentation DOCDIR DVIDIR DVI documentation DOCDIR PDFDIR PDF documentation DOCDIR PSDIR PostScript documentation DOCDIR ============== ========================================= ======================= """ import os, re from waflib import Utils, Options, Context gnuopts = ''' bindir, user commands, ${EXEC_PREFIX}/bin sbindir, system binaries, ${EXEC_PREFIX}/sbin libexecdir, program-specific binaries, ${EXEC_PREFIX}/libexec sysconfdir, host-specific configuration, ${PREFIX}/etc sharedstatedir, architecture-independent variable data, ${PREFIX}/com localstatedir, variable data, ${PREFIX}/var libdir, object code libraries, ${EXEC_PREFIX}/lib%s includedir, header files, ${PREFIX}/include oldincludedir, header files for non-GCC compilers, /usr/include datarootdir, architecture-independent data root, ${PREFIX}/share datadir, architecture-independent data, ${DATAROOTDIR} infodir, GNU "info" documentation, ${DATAROOTDIR}/info localedir, locale-dependent data, ${DATAROOTDIR}/locale mandir, manual pages, ${DATAROOTDIR}/man docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE} htmldir, HTML documentation, ${DOCDIR} dvidir, DVI documentation, ${DOCDIR} pdfdir, PDF documentation, ${DOCDIR} psdir, PostScript documentation, ${DOCDIR} ''' % Utils.lib64() _options = [x.split(', ') for x in gnuopts.splitlines() if x] def configure(conf): """ Reads the command-line options to set lots of variables in *conf.env*. The variables BINDIR and LIBDIR will be overwritten. """ def get_param(varname, default): return getattr(Options.options, varname, '') or default env = conf.env env.LIBDIR = env.BINDIR = [] env.EXEC_PREFIX = get_param('EXEC_PREFIX', env.PREFIX) env.PACKAGE = getattr(Context.g_module, 'APPNAME', None) or env.PACKAGE complete = False iter = 0 while not complete and iter < len(_options) + 1: iter += 1 complete = True for name, help, default in _options: name = name.upper() if not env[name]: try: env[name] = Utils.subst_vars(get_param(name, default).replace('/', os.sep), env) except TypeError: complete = False if not complete: lst = [x for x, _, _ in _options if not env[x.upper()]] raise conf.errors.WafError('Variable substitution failure %r' % lst) def options(opt): """ Adds lots of command-line options, for example:: --exec-prefix: EXEC_PREFIX """ inst_dir = opt.add_option_group('Installation prefix', 'By default, "waf install" will put the files in\ "/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\ than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"') for k in ('--prefix', '--destdir'): option = opt.parser.get_option(k) if option: opt.parser.remove_option(k) inst_dir.add_option(option) inst_dir.add_option('--exec-prefix', help = 'installation prefix for binaries [PREFIX]', default = '', dest = 'EXEC_PREFIX') dirs_options = opt.add_option_group('Installation directories') for name, help, default in _options: option_name = '--' + name str_default = default str_help = '%s [%s]' % (help, re.sub(r'\$\{([^}]+)\}', r'\1', str_default)) dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper())
5,182
Python
.py
111
44.720721
102
0.646209
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,713
compiler_d.py
projecthamster_hamster/waflib/Tools/compiler_d.py
#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2016-2018 (ita) """ Try to detect a D compiler from the list of supported compilers:: def options(opt): opt.load('compiler_d') def configure(cnf): cnf.load('compiler_d') def build(bld): bld.program(source='main.d', target='app') Only three D compilers are really present at the moment: * gdc * dmd, the ldc compiler having a very similar command-line interface * ldc2 """ import re from waflib import Utils, Logs d_compiler = { 'default' : ['gdc', 'dmd', 'ldc2'] } """ Dict mapping the platform names to lists of names of D compilers to try, in order of preference:: from waflib.Tools.compiler_d import d_compiler d_compiler['default'] = ['gdc', 'dmd', 'ldc2'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = d_compiler.get(build_platform, d_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable D compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_d_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_d')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (D compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) Logs.debug('compiler_d: %r', e) else: if conf.env.D: conf.end_msg(conf.env.get_flat('D')) conf.env.COMPILER_D = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a D compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-d-compiler=dmd """ test_for_compiler = default_compilers() d_compiler_opts = opt.add_option_group('Configuration options') d_compiler_opts.add_option('--check-d-compiler', default=None, help='list of D compilers to try [%s]' % test_for_compiler, dest='check_d_compiler') for x in test_for_compiler.split(): opt.load('%s' % x)
2,265
Python
.py
70
29.885714
97
0.72156
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,714
fc_config.py
projecthamster_hamster/waflib/Tools/fc_config.py
#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) """ Fortran configuration helpers """ import re, os, sys, shlex from waflib.Configure import conf from waflib.TaskGen import feature, before_method FC_FRAGMENT = ' program main\n end program main\n' FC_FRAGMENT2 = ' PROGRAM MAIN\n END\n' # what's the actual difference between these? @conf def fc_flags(conf): """ Defines common fortran configuration flags and file extensions """ v = conf.env v.FC_SRC_F = [] v.FC_TGT_F = ['-c', '-o'] v.FCINCPATH_ST = '-I%s' v.FCDEFINES_ST = '-D%s' if not v.LINK_FC: v.LINK_FC = v.FC v.FCLNK_SRC_F = [] v.FCLNK_TGT_F = ['-o'] v.FCFLAGS_fcshlib = ['-fpic'] v.LINKFLAGS_fcshlib = ['-shared'] v.fcshlib_PATTERN = 'lib%s.so' v.fcstlib_PATTERN = 'lib%s.a' v.FCLIB_ST = '-l%s' v.FCLIBPATH_ST = '-L%s' v.FCSTLIB_ST = '-l%s' v.FCSTLIBPATH_ST = '-L%s' v.FCSTLIB_MARKER = '-Wl,-Bstatic' v.FCSHLIB_MARKER = '-Wl,-Bdynamic' v.SONAME_ST = '-Wl,-h,%s' @conf def fc_add_flags(conf): """ Adds FCFLAGS / LDFLAGS / LINKFLAGS from os.environ to conf.env """ conf.add_os_flags('FCPPFLAGS', dup=False) conf.add_os_flags('FCFLAGS', dup=False) conf.add_os_flags('LINKFLAGS', dup=False) conf.add_os_flags('LDFLAGS', dup=False) @conf def check_fortran(self, *k, **kw): """ Compiles a Fortran program to ensure that the settings are correct """ self.check_cc( fragment = FC_FRAGMENT, compile_filename = 'test.f', features = 'fc fcprogram', msg = 'Compiling a simple fortran app') @conf def check_fc(self, *k, **kw): """ Same as :py:func:`waflib.Tools.c_config.check` but defaults to the *Fortran* programming language (this overrides the C defaults in :py:func:`waflib.Tools.c_config.validate_c`) """ kw['compiler'] = 'fc' if not 'compile_mode' in kw: kw['compile_mode'] = 'fc' if not 'type' in kw: kw['type'] = 'fcprogram' if not 'compile_filename' in kw: kw['compile_filename'] = 'test.f90' if not 'code' in kw: kw['code'] = FC_FRAGMENT return self.check(*k, **kw) # ------------------------------------------------------------------------ # --- These are the default platform modifiers, refactored here for # convenience. gfortran and g95 have much overlap. # ------------------------------------------------------------------------ @conf def fortran_modifier_darwin(conf): """ Defines Fortran flags and extensions for OSX systems """ v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.LINKFLAGS_fcshlib = ['-dynamiclib'] v.fcshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.LINKFLAGS_fcstlib = [] v.FCSHLIB_MARKER = '' v.FCSTLIB_MARKER = '' v.SONAME_ST = '' @conf def fortran_modifier_win32(conf): """ Defines Fortran flags for Windows platforms """ v = conf.env v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' v.fcshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.FCFLAGS_fcshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def fortran_modifier_cygwin(conf): """ Defines Fortran flags for use on cygwin """ fortran_modifier_win32(conf) v = conf.env v.fcshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_fcshlib', ['-Wl,--enable-auto-image-base']) v.FCFLAGS_fcshlib = [] # ------------------------------------------------------------------------ @conf def check_fortran_dummy_main(self, *k, **kw): """ Determines if a main function is needed by compiling a code snippet with the C compiler and linking it with the Fortran compiler (useful on unix-like systems) """ if not self.env.CC: self.fatal('A c compiler is required for check_fortran_dummy_main') lst = ['MAIN__', '__MAIN', '_MAIN', 'MAIN_', 'MAIN'] lst.extend([m.lower() for m in lst]) lst.append('') self.start_msg('Detecting whether we need a dummy main') for main in lst: kw['fortran_main'] = main try: self.check_cc( fragment = 'int %s() { return 0; }\n' % (main or 'test'), features = 'c fcprogram', mandatory = True ) if not main: self.env.FC_MAIN = -1 self.end_msg('no') else: self.env.FC_MAIN = main self.end_msg('yes %s' % main) break except self.errors.ConfigurationError: pass else: self.end_msg('not found') self.fatal('could not detect whether fortran requires a dummy main, see the config.log') # ------------------------------------------------------------------------ GCC_DRIVER_LINE = re.compile('^Driving:') POSIX_STATIC_EXT = re.compile(r'\S+\.a') POSIX_LIB_FLAGS = re.compile(r'-l\S+') @conf def is_link_verbose(self, txt): """Returns True if 'useful' link options can be found in txt""" assert isinstance(txt, str) for line in txt.splitlines(): if not GCC_DRIVER_LINE.search(line): if POSIX_STATIC_EXT.search(line) or POSIX_LIB_FLAGS.search(line): return True return False @conf def check_fortran_verbose_flag(self, *k, **kw): """ Checks what kind of verbose (-v) flag works, then sets it to env.FC_VERBOSE_FLAG """ self.start_msg('fortran link verbose flag') for x in ('-v', '--verbose', '-verbose', '-V'): try: self.check_cc( features = 'fc fcprogram_test', fragment = FC_FRAGMENT2, compile_filename = 'test.f', linkflags = [x], mandatory=True) except self.errors.ConfigurationError: pass else: # output is on stderr or stdout (for xlf) if self.is_link_verbose(self.test_bld.err) or self.is_link_verbose(self.test_bld.out): self.end_msg(x) break else: self.end_msg('failure') self.fatal('Could not obtain the fortran link verbose flag (see config.log)') self.env.FC_VERBOSE_FLAG = x return x # ------------------------------------------------------------------------ # linkflags which match those are ignored LINKFLAGS_IGNORED = [r'-lang*', r'-lcrt[a-zA-Z0-9\.]*\.o', r'-lc$', r'-lSystem', r'-libmil', r'-LIST:*', r'-LNO:*'] if os.name == 'nt': LINKFLAGS_IGNORED.extend([r'-lfrt*', r'-luser32', r'-lkernel32', r'-ladvapi32', r'-lmsvcrt', r'-lshell32', r'-lmingw', r'-lmoldname']) else: LINKFLAGS_IGNORED.append(r'-lgcc*') RLINKFLAGS_IGNORED = [re.compile(f) for f in LINKFLAGS_IGNORED] def _match_ignore(line): """Returns True if the line should be ignored (Fortran verbose flag test)""" for i in RLINKFLAGS_IGNORED: if i.match(line): return True return False def parse_fortran_link(lines): """Given the output of verbose link of Fortran compiler, this returns a list of flags necessary for linking using the standard linker.""" final_flags = [] for line in lines: if not GCC_DRIVER_LINE.match(line): _parse_flink_line(line, final_flags) return final_flags SPACE_OPTS = re.compile('^-[LRuYz]$') NOSPACE_OPTS = re.compile('^-[RL]') def _parse_flink_token(lexer, token, tmp_flags): # Here we go (convention for wildcard is shell, not regex !) # 1 TODO: we first get some root .a libraries # 2 TODO: take everything starting by -bI:* # 3 Ignore the following flags: -lang* | -lcrt*.o | -lc | # -lgcc* | -lSystem | -libmil | -LANG:=* | -LIST:* | -LNO:*) # 4 take into account -lkernel32 # 5 For options of the kind -[[LRuYz]], as they take one argument # after, the actual option is the next token # 6 For -YP,*: take and replace by -Larg where arg is the old # argument # 7 For -[lLR]*: take # step 3 if _match_ignore(token): pass # step 4 elif token.startswith('-lkernel32') and sys.platform == 'cygwin': tmp_flags.append(token) # step 5 elif SPACE_OPTS.match(token): t = lexer.get_token() if t.startswith('P,'): t = t[2:] for opt in t.split(os.pathsep): tmp_flags.append('-L%s' % opt) # step 6 elif NOSPACE_OPTS.match(token): tmp_flags.append(token) # step 7 elif POSIX_LIB_FLAGS.match(token): tmp_flags.append(token) else: # ignore anything not explicitly taken into account pass t = lexer.get_token() return t def _parse_flink_line(line, final_flags): """private""" lexer = shlex.shlex(line, posix = True) lexer.whitespace_split = True t = lexer.get_token() tmp_flags = [] while t: t = _parse_flink_token(lexer, t, tmp_flags) final_flags.extend(tmp_flags) return final_flags @conf def check_fortran_clib(self, autoadd=True, *k, **kw): """ Obtains the flags for linking with the C library if this check works, add uselib='CLIB' to your task generators """ if not self.env.FC_VERBOSE_FLAG: self.fatal('env.FC_VERBOSE_FLAG is not set: execute check_fortran_verbose_flag?') self.start_msg('Getting fortran runtime link flags') try: self.check_cc( fragment = FC_FRAGMENT2, compile_filename = 'test.f', features = 'fc fcprogram_test', linkflags = [self.env.FC_VERBOSE_FLAG] ) except Exception: self.end_msg(False) if kw.get('mandatory', True): conf.fatal('Could not find the c library flags') else: out = self.test_bld.err flags = parse_fortran_link(out.splitlines()) self.end_msg('ok (%s)' % ' '.join(flags)) self.env.LINKFLAGS_CLIB = flags return flags return [] def getoutput(conf, cmd, stdin=False): """ Obtains Fortran command outputs """ from waflib import Errors if conf.env.env: env = conf.env.env else: env = dict(os.environ) env['LANG'] = 'C' input = stdin and '\n'.encode() or None try: out, err = conf.cmd_and_log(cmd, env=env, output=0, input=input) except Errors.WafError as e: # An WafError might indicate an error code during the command # execution, in this case we still obtain the stderr and stdout, # which we can use to find the version string. if not (hasattr(e, 'stderr') and hasattr(e, 'stdout')): raise e else: # Ignore the return code and return the original # stdout and stderr. out = e.stdout err = e.stderr except Exception: conf.fatal('could not determine the compiler version %r' % cmd) return (out, err) # ------------------------------------------------------------------------ ROUTINES_CODE = """\ subroutine foobar() return end subroutine foo_bar() return end """ MAIN_CODE = """ void %(dummy_func_nounder)s(void); void %(dummy_func_under)s(void); int %(main_func_name)s() { %(dummy_func_nounder)s(); %(dummy_func_under)s(); return 0; } """ @feature('link_main_routines_func') @before_method('process_source') def link_main_routines_tg_method(self): """ The configuration test declares a unique task generator, so we create other task generators from there for fortran link tests """ def write_test_file(task): task.outputs[0].write(task.generator.code) bld = self.bld bld(rule=write_test_file, target='main.c', code=MAIN_CODE % self.__dict__) bld(rule=write_test_file, target='test.f', code=ROUTINES_CODE) bld(features='fc fcstlib', source='test.f', target='test') bld(features='c fcprogram', source='main.c', target='app', use='test') def mangling_schemes(): """ Generate triplets for use with mangle_name (used in check_fortran_mangling) the order is tuned for gfortan """ for u in ('_', ''): for du in ('', '_'): for c in ("lower", "upper"): yield (u, du, c) def mangle_name(u, du, c, name): """Mangle a name from a triplet (used in check_fortran_mangling)""" return getattr(name, c)() + u + (name.find('_') != -1 and du or '') @conf def check_fortran_mangling(self, *k, **kw): """ Detect the mangling scheme, sets FORTRAN_MANGLING to the triplet found This test will compile a fortran static library, then link a c app against it """ if not self.env.CC: self.fatal('A c compiler is required for link_main_routines') if not self.env.FC: self.fatal('A fortran compiler is required for link_main_routines') if not self.env.FC_MAIN: self.fatal('Checking for mangling requires self.env.FC_MAIN (execute "check_fortran_dummy_main" first?)') self.start_msg('Getting fortran mangling scheme') for (u, du, c) in mangling_schemes(): try: self.check_cc( compile_filename = [], features = 'link_main_routines_func', msg = 'nomsg', errmsg = 'nomsg', dummy_func_nounder = mangle_name(u, du, c, 'foobar'), dummy_func_under = mangle_name(u, du, c, 'foo_bar'), main_func_name = self.env.FC_MAIN ) except self.errors.ConfigurationError: pass else: self.end_msg("ok ('%s', '%s', '%s-case')" % (u, du, c)) self.env.FORTRAN_MANGLING = (u, du, c) break else: self.end_msg(False) self.fatal('mangler not found') return (u, du, c) @feature('pyext') @before_method('propagate_uselib_vars', 'apply_link') def set_lib_pat(self): """Sets the Fortran flags for linking with Python""" self.env.fcshlib_PATTERN = self.env.pyext_PATTERN @conf def detect_openmp(self): """ Detects openmp flags and sets the OPENMP ``FCFLAGS``/``LINKFLAGS`` """ for x in ('-fopenmp','-openmp','-mp','-xopenmp','-omp','-qsmp=omp'): try: self.check_fc( msg = 'Checking for OpenMP flag %s' % x, fragment = 'program main\n call omp_get_num_threads()\nend program main', fcflags = x, linkflags = x, uselib_store = 'OPENMP' ) except self.errors.ConfigurationError: pass else: break else: self.fatal('Could not find OpenMP') @conf def check_gfortran_o_space(self): if self.env.FC_NAME != 'GFORTRAN' or int(self.env.FC_VERSION[0]) > 4: # This is for old compilers and only for gfortran. # No idea how other implementations handle this. Be safe and bail out. return self.env.stash() self.env.FCLNK_TGT_F = ['-o', ''] try: self.check_fc(msg='Checking if the -o link must be split from arguments', fragment=FC_FRAGMENT, features='fc fcshlib') except self.errors.ConfigurationError: self.env.revert() else: self.env.commit()
13,986
Python
.py
430
29.965116
135
0.659283
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,715
gfortran.py
projecthamster_hamster/waflib/Tools/gfortran.py
#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import re from waflib import Utils from waflib.Tools import fc, fc_config, fc_scan, ar from waflib.Configure import conf @conf def find_gfortran(conf): """Find the gfortran program (will look in the environment variable 'FC')""" fc = conf.find_program(['gfortran','g77'], var='FC') # (fallback to g77 for systems, where no gfortran is available) conf.get_gfortran_version(fc) conf.env.FC_NAME = 'GFORTRAN' @conf def gfortran_flags(conf): v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.FORTRANMODFLAG = ['-J', ''] # template for module path v.FCFLAGS_DEBUG = ['-Werror'] # why not @conf def gfortran_modifier_win32(conf): fc_config.fortran_modifier_win32(conf) @conf def gfortran_modifier_cygwin(conf): fc_config.fortran_modifier_cygwin(conf) @conf def gfortran_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def gfortran_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() gfortran_modifier_func = getattr(conf, 'gfortran_modifier_' + dest_os, None) if gfortran_modifier_func: gfortran_modifier_func() @conf def get_gfortran_version(conf, fc): """Get the compiler version""" # ensure this is actually gfortran, not an imposter. version_re = re.compile(r"GNU\s*Fortran", re.I).search cmd = fc + ['--version'] out, err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the compiler type') # --- now get more detailed info -- see c_config.get_cc_version cmd = fc + ['-dM', '-E', '-'] out, err = fc_config.getoutput(conf, cmd, stdin=True) if out.find('__GNUC__') < 0: conf.fatal('Could not determine the compiler type') k = {} out = out.splitlines() import shlex for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k def isT(var): return var in k and k[var] != '0' conf.env.FC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__']) def configure(conf): conf.find_gfortran() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.gfortran_flags() conf.gfortran_modifier_platform() conf.check_gfortran_o_space()
2,322
Python
.py
76
28.434211
85
0.713324
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,716
suncxx.py
projecthamster_hamster/waflib/Tools/suncxx.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_sxx(conf): """ Detects the sun C++ compiler """ v = conf.env cc = conf.find_program(['CC', 'c++'], var='CXX') try: conf.cmd_and_log(cc + ['-flags']) except Errors.WafError: conf.fatal('%r is not a Sun compiler' % cc) v.CXX_NAME = 'sun' conf.get_suncc_version(cc) @conf def sxx_common_flags(conf): """ Flags required for executing the sun C++ compiler """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o', ''] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o', ''] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Bdynamic' v.STLIB_MARKER = '-Bstatic' v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-xcode=pic32', '-DPIC'] v.LINKFLAGS_cxxshlib = ['-G'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = ['-Bstatic'] v.cxxstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_sxx() conf.find_ar() conf.sxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags()
1,511
Python
.py
54
25.907407
62
0.601108
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,717
suncc.py
projecthamster_hamster/waflib/Tools/suncc.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_scc(conf): """ Detects the Sun C compiler """ v = conf.env cc = conf.find_program('cc', var='CC') try: conf.cmd_and_log(cc + ['-flags']) except Errors.WafError: conf.fatal('%r is not a Sun compiler' % cc) v.CC_NAME = 'sun' conf.get_suncc_version(cc) @conf def scc_common_flags(conf): """ Flags required for executing the sun C compiler """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o', ''] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o', ''] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Bdynamic' v.STLIB_MARKER = '-Bstatic' v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-xcode=pic32', '-DPIC'] v.LINKFLAGS_cshlib = ['-G'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = ['-Bstatic'] v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_scc() conf.find_ar() conf.scc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
1,491
Python
.py
54
25.537037
62
0.591292
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,718
md5_tstamp.py
projecthamster_hamster/waflib/Tools/md5_tstamp.py
#! /usr/bin/env python # encoding: utf-8 """ Re-calculate md5 hashes of files only when the file time have changed:: def options(opt): opt.load('md5_tstamp') The hashes can also reflect either the file contents (STRONGEST=True) or the file time and file size. The performance benefits of this module are usually insignificant. """ import os, stat from waflib import Utils, Build, Node STRONGEST = True Build.SAVED_ATTRS.append('hashes_md5_tstamp') def h_file(self): filename = self.abspath() st = os.stat(filename) cache = self.ctx.hashes_md5_tstamp if filename in cache and cache[filename][0] == st.st_mtime: return cache[filename][1] if STRONGEST: ret = Utils.h_file(filename) else: if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('Not a file') ret = Utils.md5(str((st.st_mtime, st.st_size)).encode()).digest() cache[filename] = (st.st_mtime, ret) return ret h_file.__doc__ = Node.Node.h_file.__doc__ Node.Node.h_file = h_file
963
Python
.py
30
30
76
0.735358
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,719
asm.py
projecthamster_hamster/waflib/Tools/asm.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) """ Assembly support, used by tools such as gas and nasm To declare targets using assembly:: def configure(conf): conf.load('gcc gas') def build(bld): bld( features='c cstlib asm', source = 'test.S', target = 'asmtest') bld( features='asm asmprogram', source = 'test.S', target = 'asmtest') Support for pure asm programs and libraries should also work:: def configure(conf): conf.load('nasm') conf.find_program('ld', 'ASLINK') def build(bld): bld( features='asm asmprogram', source = 'test.S', target = 'asmtest') """ import re from waflib import Errors, Logs, Task from waflib.Tools.ccroot import link_task, stlink_task from waflib.TaskGen import extension from waflib.Tools import c_preproc re_lines = re.compile( '^[ \t]*(?:%)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef)[ \t]*(.*)\r*$', re.IGNORECASE | re.MULTILINE) class asm_parser(c_preproc.c_parser): def filter_comments(self, node): code = node.read() code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) return re_lines.findall(code) class asm(Task.Task): """ Compiles asm files by gas/nasm/yasm/... """ color = 'BLUE' run_str = '${AS} ${ASFLAGS} ${ASMPATH_ST:INCPATHS} ${ASMDEFINES_ST:DEFINES} ${AS_SRC_F}${SRC} ${AS_TGT_F}${TGT}' def scan(self): if self.env.ASM_NAME == 'gas': return c_preproc.scan(self) elif self.env.ASM_NAME == 'nasm': Logs.warn('The Nasm dependency scanner is incomplete!') try: incn = self.generator.includes_nodes except AttributeError: raise Errors.WafError('%r is missing the "asm" feature' % self.generator) if c_preproc.go_absolute: nodepaths = incn else: nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)] tmp = asm_parser(nodepaths) tmp.start(self.inputs[0], self.env) return (tmp.nodes, tmp.names) @extension('.s', '.S', '.asm', '.ASM', '.spp', '.SPP') def asm_hook(self, node): """ Binds the asm extension to the asm task :param node: input file :type node: :py:class:`waflib.Node.Node` """ return self.create_compiled_task('asm', node) class asmprogram(link_task): "Links object files into a c program" run_str = '${ASLINK} ${ASLINKFLAGS} ${ASLNK_TGT_F}${TGT} ${ASLNK_SRC_F}${SRC}' ext_out = ['.bin'] inst_to = '${BINDIR}' class asmshlib(asmprogram): "Links object files into a c shared library" inst_to = '${LIBDIR}' class asmstlib(stlink_task): "Links object files into a c static library" pass # do not remove def configure(conf): conf.env.ASMPATH_ST = '-I%s' conf.env.ASMDEFINES_ST = '-D%s'
2,693
Python
.py
85
29.105882
113
0.693735
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,720
ar.py
projecthamster_hamster/waflib/Tools/ar.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) """ The **ar** program creates static libraries. This tool is almost always loaded from others (C, C++, D, etc) for static library support. """ from waflib.Configure import conf @conf def find_ar(conf): """Configuration helper used by C/C++ tools to enable the support for static libraries""" conf.load('ar') def configure(conf): """Finds the ar program and sets the default flags in ``conf.env.ARFLAGS``""" conf.find_program('ar', var='AR') conf.add_os_flags('ARFLAGS') if not conf.env.ARFLAGS: conf.env.ARFLAGS = ['rcs']
634
Python
.py
19
31.684211
90
0.722951
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,721
c_config.py
projecthamster_hamster/waflib/Tools/c_config.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ C/C++/D configuration helpers """ from __future__ import with_statement import os, re, shlex from waflib import Build, Utils, Task, Options, Logs, Errors, Runner from waflib.TaskGen import after_method, feature from waflib.Configure import conf WAF_CONFIG_H = 'config.h' """default name for the config.h file""" DEFKEYS = 'define_key' INCKEYS = 'include_key' SNIP_EMPTY_PROGRAM = ''' int main(int argc, char **argv) { (void)argc; (void)argv; return 0; } ''' MACRO_TO_DESTOS = { '__linux__' : 'linux', '__GNU__' : 'gnu', # hurd '__FreeBSD__' : 'freebsd', '__NetBSD__' : 'netbsd', '__OpenBSD__' : 'openbsd', '__sun' : 'sunos', '__hpux' : 'hpux', '__sgi' : 'irix', '_AIX' : 'aix', '__CYGWIN__' : 'cygwin', '__MSYS__' : 'cygwin', '_UWIN' : 'uwin', '_WIN64' : 'win32', '_WIN32' : 'win32', # Note about darwin: this is also tested with 'defined __APPLE__ && defined __MACH__' somewhere below in this file. '__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__' : 'darwin', '__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__' : 'darwin', # iphone '__QNX__' : 'qnx', '__native_client__' : 'nacl' # google native client platform } MACRO_TO_DEST_CPU = { '__x86_64__' : 'x86_64', '__amd64__' : 'x86_64', '__i386__' : 'x86', '__ia64__' : 'ia', '__mips__' : 'mips', '__sparc__' : 'sparc', '__alpha__' : 'alpha', '__aarch64__' : 'aarch64', '__thumb__' : 'thumb', '__arm__' : 'arm', '__hppa__' : 'hppa', '__powerpc__' : 'powerpc', '__ppc__' : 'powerpc', '__convex__' : 'convex', '__m68k__' : 'm68k', '__s390x__' : 's390x', '__s390__' : 's390', '__sh__' : 'sh', '__xtensa__' : 'xtensa', '__e2k__' : 'e2k', '__riscv' : 'riscv', } @conf def parse_flags(self, line, uselib_store, env=None, force_static=False, posix=None): """ Parses flags from the input lines, and adds them to the relevant use variables:: def configure(conf): conf.parse_flags('-O3', 'FOO') # conf.env.CXXFLAGS_FOO = ['-O3'] # conf.env.CFLAGS_FOO = ['-O3'] :param line: flags :type line: string :param uselib_store: where to add the flags :type uselib_store: string :param env: config set or conf.env by default :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param force_static: force usage of static libraries :type force_static: bool default False :param posix: usage of POSIX mode for shlex lexical analiysis library :type posix: bool default True """ assert(isinstance(line, str)) env = env or self.env # Issue 811 and 1371 if posix is None: posix = True if '\\' in line: posix = ('\\ ' in line) or ('\\\\' in line) lex = shlex.shlex(line, posix=posix) lex.whitespace_split = True lex.commenters = '' lst = list(lex) so_re = re.compile(r"\.so(?:\.[0-9]+)*$") # append_unique is not always possible # for example, apple flags may require both -arch i386 and -arch ppc uselib = uselib_store def app(var, val): env.append_value('%s_%s' % (var, uselib), val) def appu(var, val): env.append_unique('%s_%s' % (var, uselib), val) static = False while lst: x = lst.pop(0) st = x[:2] ot = x[2:] if st == '-I' or st == '/I': if not ot: ot = lst.pop(0) appu('INCLUDES', ot) elif st == '-i': tmp = [x, lst.pop(0)] app('CFLAGS', tmp) app('CXXFLAGS', tmp) elif st == '-D' or (env.CXX_NAME == 'msvc' and st == '/D'): # not perfect but.. if not ot: ot = lst.pop(0) app('DEFINES', ot) elif st == '-l': if not ot: ot = lst.pop(0) prefix = 'STLIB' if (force_static or static) else 'LIB' app(prefix, ot) elif st == '-L': if not ot: ot = lst.pop(0) prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' appu(prefix, ot) elif x.startswith('/LIBPATH:'): prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' appu(prefix, x.replace('/LIBPATH:', '')) elif x.startswith('-std='): prefix = 'CXXFLAGS' if '++' in x else 'CFLAGS' app(prefix, x) elif x.startswith('+') or x in ('-pthread', '-fPIC', '-fpic', '-fPIE', '-fpie', '-flto', '-fno-lto'): app('CFLAGS', x) app('CXXFLAGS', x) app('LINKFLAGS', x) elif x == '-framework': appu('FRAMEWORK', lst.pop(0)) elif x.startswith('-F'): appu('FRAMEWORKPATH', x[2:]) elif x == '-Wl,-rpath' or x == '-Wl,-R': app('RPATH', lst.pop(0).lstrip('-Wl,')) elif x.startswith('-Wl,-R,'): app('RPATH', x[7:]) elif x.startswith('-Wl,-R'): app('RPATH', x[6:]) elif x.startswith('-Wl,-rpath,'): app('RPATH', x[11:]) elif x == '-Wl,-Bstatic' or x == '-Bstatic': static = True elif x == '-Wl,-Bdynamic' or x == '-Bdynamic': static = False elif x.startswith('-Wl') or x in ('-rdynamic', '-pie'): app('LINKFLAGS', x) elif x.startswith(('-m', '-f', '-dynamic', '-O', '-g')): # Adding the -W option breaks python builds on Openindiana app('CFLAGS', x) app('CXXFLAGS', x) elif x.startswith('-bundle'): app('LINKFLAGS', x) elif x.startswith(('-undefined', '-Xlinker')): arg = lst.pop(0) app('LINKFLAGS', [x, arg]) elif x.startswith(('-arch', '-isysroot')): tmp = [x, lst.pop(0)] app('CFLAGS', tmp) app('CXXFLAGS', tmp) app('LINKFLAGS', tmp) elif x.endswith(('.a', '.dylib', '.lib')) or so_re.search(x): appu('LINKFLAGS', x) # not cool, #762 else: self.to_log('Unhandled flag %r' % x) @conf def validate_cfg(self, kw): """ Searches for the program *pkg-config* if missing, and validates the parameters to pass to :py:func:`waflib.Tools.c_config.exec_cfg`. :param path: the **-config program to use** (default is *pkg-config*) :type path: list of string :param msg: message to display to describe the test executed :type msg: string :param okmsg: message to display when the test is successful :type okmsg: string :param errmsg: message to display in case of error :type errmsg: string """ if not 'path' in kw: if not self.env.PKGCONFIG: self.find_program('pkg-config', var='PKGCONFIG') kw['path'] = self.env.PKGCONFIG # verify that exactly one action is requested s = ('atleast_pkgconfig_version' in kw) + ('modversion' in kw) + ('package' in kw) if s != 1: raise ValueError('exactly one of atleast_pkgconfig_version, modversion and package must be set') if not 'msg' in kw: if 'atleast_pkgconfig_version' in kw: kw['msg'] = 'Checking for pkg-config version >= %r' % kw['atleast_pkgconfig_version'] elif 'modversion' in kw: kw['msg'] = 'Checking for %r version' % kw['modversion'] else: kw['msg'] = 'Checking for %r' %(kw['package']) # let the modversion check set the okmsg to the detected version if not 'okmsg' in kw and not 'modversion' in kw: kw['okmsg'] = 'yes' if not 'errmsg' in kw: kw['errmsg'] = 'not found' # pkg-config version if 'atleast_pkgconfig_version' in kw: pass elif 'modversion' in kw: if not 'uselib_store' in kw: kw['uselib_store'] = kw['modversion'] if not 'define_name' in kw: kw['define_name'] = '%s_VERSION' % Utils.quote_define_name(kw['uselib_store']) else: if not 'uselib_store' in kw: kw['uselib_store'] = Utils.to_list(kw['package'])[0].upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(kw['uselib_store']) @conf def exec_cfg(self, kw): """ Executes ``pkg-config`` or other ``-config`` applications to collect configuration flags: * if atleast_pkgconfig_version is given, check that pkg-config has the version n and return * if modversion is given, then return the module version * else, execute the *-config* program with the *args* and *variables* given, and set the flags on the *conf.env.FLAGS_name* variable :param path: the **-config program to use** :type path: list of string :param atleast_pkgconfig_version: minimum pkg-config version to use (disable other tests) :type atleast_pkgconfig_version: string :param package: package name, for example *gtk+-2.0* :type package: string :param uselib_store: if the test is successful, define HAVE\\_*name*. It is also used to define *conf.env.FLAGS_name* variables. :type uselib_store: string :param modversion: if provided, return the version of the given module and define *name*\\_VERSION :type modversion: string :param args: arguments to give to *package* when retrieving flags :type args: list of string :param variables: return the values of particular variables :type variables: list of string :param define_variable: additional variables to define (also in conf.env.PKG_CONFIG_DEFINES) :type define_variable: dict(string: string) :param pkg_config_path: paths where pkg-config should search for .pc config files (overrides env.PKG_CONFIG_PATH if exists) :type pkg_config_path: string, list of directories separated by colon :param force_static: force usage of static libraries :type force_static: bool default False :param posix: usage of POSIX mode for shlex lexical analiysis library :type posix: bool default True """ path = Utils.to_list(kw['path']) env = self.env.env or None if kw.get('pkg_config_path'): if not env: env = dict(self.environ) env['PKG_CONFIG_PATH'] = kw['pkg_config_path'] def define_it(): define_name = kw['define_name'] # by default, add HAVE_X to the config.h, else provide DEFINES_X for use=X if kw.get('global_define', 1): self.define(define_name, 1, False) else: self.env.append_unique('DEFINES_%s' % kw['uselib_store'], "%s=1" % define_name) if kw.get('add_have_to_env', 1): self.env[define_name] = 1 # pkg-config version if 'atleast_pkgconfig_version' in kw: cmd = path + ['--atleast-pkgconfig-version=%s' % kw['atleast_pkgconfig_version']] self.cmd_and_log(cmd, env=env) return # single version for a module if 'modversion' in kw: version = self.cmd_and_log(path + ['--modversion', kw['modversion']], env=env).strip() if not 'okmsg' in kw: kw['okmsg'] = version self.define(kw['define_name'], version) return version lst = [] + path defi = kw.get('define_variable') if not defi: defi = self.env.PKG_CONFIG_DEFINES or {} for key, val in defi.items(): lst.append('--define-variable=%s=%s' % (key, val)) static = kw.get('force_static', False) if 'args' in kw: args = Utils.to_list(kw['args']) if '--static' in args or '--static-libs' in args: static = True lst += args # tools like pkgconf expect the package argument after the -- ones -_- lst.extend(Utils.to_list(kw['package'])) # retrieving variables of a module if 'variables' in kw: v_env = kw.get('env', self.env) vars = Utils.to_list(kw['variables']) for v in vars: val = self.cmd_and_log(lst + ['--variable=' + v], env=env).strip() var = '%s_%s' % (kw['uselib_store'], v) v_env[var] = val return # so we assume the command-line will output flags to be parsed afterwards ret = self.cmd_and_log(lst, env=env) define_it() self.parse_flags(ret, kw['uselib_store'], kw.get('env', self.env), force_static=static, posix=kw.get('posix')) return ret @conf def check_cfg(self, *k, **kw): """ Checks for configuration flags using a **-config**-like program (pkg-config, sdl-config, etc). This wraps internal calls to :py:func:`waflib.Tools.c_config.validate_cfg` and :py:func:`waflib.Tools.c_config.exec_cfg` so check exec_cfg parameters descriptions for more details on kw passed A few examples:: def configure(conf): conf.load('compiler_c') conf.check_cfg(package='glib-2.0', args='--libs --cflags') conf.check_cfg(package='pango') conf.check_cfg(package='pango', uselib_store='MYPANGO', args=['--cflags', '--libs']) conf.check_cfg(package='pango', args=['pango >= 0.1.0', 'pango < 9.9.9', '--cflags', '--libs'], msg="Checking for 'pango 0.1.0'") conf.check_cfg(path='sdl-config', args='--cflags --libs', package='', uselib_store='SDL') conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', package='', uselib_store='OPEN_MPI', mandatory=False) # variables conf.check_cfg(package='gtk+-2.0', variables=['includedir', 'prefix'], uselib_store='FOO') print(conf.env.FOO_includedir) """ self.validate_cfg(kw) if 'msg' in kw: self.start_msg(kw['msg'], **kw) ret = None try: ret = self.exec_cfg(kw) except self.errors.WafError as e: if 'errmsg' in kw: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: self.to_log('Command failure: %s' % e) self.fatal('The configuration failed') else: if not ret: ret = True kw['success'] = ret if 'okmsg' in kw: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret def build_fun(bld): """ Build function that is used for running configuration tests with ``conf.check()`` """ if bld.kw['compile_filename']: node = bld.srcnode.make_node(bld.kw['compile_filename']) node.write(bld.kw['code']) o = bld(features=bld.kw['features'], source=bld.kw['compile_filename'], target='testprog') for k, v in bld.kw.items(): setattr(o, k, v) if not bld.kw.get('quiet'): bld.conf.to_log("==>\n%s\n<==" % bld.kw['code']) @conf def validate_c(self, kw): """ Pre-checks the parameters that will be given to :py:func:`waflib.Configure.run_build` :param compiler: c or cxx (tries to guess what is best) :type compiler: string :param type: cprogram, cshlib, cstlib - not required if *features are given directly* :type type: binary to create :param feature: desired features for the task generator that will execute the test, for example ``cxx cxxstlib`` :type feature: list of string :param fragment: provide a piece of code for the test (default is to let the system create one) :type fragment: string :param uselib_store: define variables after the test is executed (IMPORTANT!) :type uselib_store: string :param use: parameters to use for building (just like the normal *use* keyword) :type use: list of string :param define_name: define to set when the check is over :type define_name: string :param execute: execute the resulting binary :type execute: bool :param define_ret: if execute is set to True, use the execution output in both the define and the return value :type define_ret: bool :param header_name: check for a particular header :type header_name: string :param auto_add_header_name: if header_name was set, add the headers in env.INCKEYS so the next tests will include these headers :type auto_add_header_name: bool """ for x in ('type_name', 'field_name', 'function_name'): if x in kw: Logs.warn('Invalid argument %r in test' % x) if not 'build_fun' in kw: kw['build_fun'] = build_fun if not 'env' in kw: kw['env'] = self.env.derive() env = kw['env'] if not 'compiler' in kw and not 'features' in kw: kw['compiler'] = 'c' if env.CXX_NAME and Task.classes.get('cxx'): kw['compiler'] = 'cxx' if not self.env.CXX: self.fatal('a c++ compiler is required') else: if not self.env.CC: self.fatal('a c compiler is required') if not 'compile_mode' in kw: kw['compile_mode'] = 'c' if 'cxx' in Utils.to_list(kw.get('features', [])) or kw.get('compiler') == 'cxx': kw['compile_mode'] = 'cxx' if not 'type' in kw: kw['type'] = 'cprogram' if not 'features' in kw: if not 'header_name' in kw or kw.get('link_header_test', True): kw['features'] = [kw['compile_mode'], kw['type']] # "c ccprogram" else: kw['features'] = [kw['compile_mode']] else: kw['features'] = Utils.to_list(kw['features']) if not 'compile_filename' in kw: kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '') def to_header(dct): if 'header_name' in dct: dct = Utils.to_list(dct['header_name']) return ''.join(['#include <%s>\n' % x for x in dct]) return '' if 'framework_name' in kw: # OSX, not sure this is used anywhere fwkname = kw['framework_name'] if not 'uselib_store' in kw: kw['uselib_store'] = fwkname.upper() if not kw.get('no_header'): fwk = '%s/%s.h' % (fwkname, fwkname) if kw.get('remove_dot_h'): fwk = fwk[:-2] val = kw.get('header_name', []) kw['header_name'] = Utils.to_list(val) + [fwk] kw['msg'] = 'Checking for framework %s' % fwkname kw['framework'] = fwkname elif 'header_name' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for header %s' % kw['header_name'] l = Utils.to_list(kw['header_name']) assert len(l), 'list of headers in header_name is empty' kw['code'] = to_header(kw) + SNIP_EMPTY_PROGRAM if not 'uselib_store' in kw: kw['uselib_store'] = l[0].upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(l[0]) if 'lib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for library %s' % kw['lib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['lib'].upper() if 'stlib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for static library %s' % kw['stlib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['stlib'].upper() if 'fragment' in kw: # an additional code fragment may be provided to replace the predefined code # in custom headers kw['code'] = kw['fragment'] if not 'msg' in kw: kw['msg'] = 'Checking for code snippet' if not 'errmsg' in kw: kw['errmsg'] = 'no' for (flagsname,flagstype) in (('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')): if flagsname in kw: if not 'msg' in kw: kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname]) if not 'errmsg' in kw: kw['errmsg'] = 'no' if not 'execute' in kw: kw['execute'] = False if kw['execute']: kw['features'].append('test_exec') kw['chmod'] = Utils.O755 if not 'errmsg' in kw: kw['errmsg'] = 'not found' if not 'okmsg' in kw: kw['okmsg'] = 'yes' if not 'code' in kw: kw['code'] = SNIP_EMPTY_PROGRAM # if there are headers to append automatically to the next tests if self.env[INCKEYS]: kw['code'] = '\n'.join(['#include <%s>' % x for x in self.env[INCKEYS]]) + '\n' + kw['code'] # in case defines lead to very long command-lines if kw.get('merge_config_header') or env.merge_config_header: kw['code'] = '%s\n\n%s' % (self.get_config_header(), kw['code']) env.DEFINES = [] # modify the copy if not kw.get('success'): kw['success'] = None if 'define_name' in kw: self.undefine(kw['define_name']) if not 'msg' in kw: self.fatal('missing "msg" in conf.check(...)') @conf def post_check(self, *k, **kw): """ Sets the variables after a test executed in :py:func:`waflib.Tools.c_config.check` was run successfully """ is_success = 0 if kw['execute']: if kw['success'] is not None: if kw.get('define_ret'): is_success = kw['success'] else: is_success = (kw['success'] == 0) else: is_success = (kw['success'] == 0) if kw.get('define_name'): comment = kw.get('comment', '') define_name = kw['define_name'] if kw['execute'] and kw.get('define_ret') and isinstance(is_success, str): if kw.get('global_define', 1): self.define(define_name, is_success, quote=kw.get('quote', 1), comment=comment) else: if kw.get('quote', 1): succ = '"%s"' % is_success else: succ = int(is_success) val = '%s=%s' % (define_name, succ) var = 'DEFINES_%s' % kw['uselib_store'] self.env.append_value(var, val) else: if kw.get('global_define', 1): self.define_cond(define_name, is_success, comment=comment) else: var = 'DEFINES_%s' % kw['uselib_store'] self.env.append_value(var, '%s=%s' % (define_name, int(is_success))) # define conf.env.HAVE_X to 1 if kw.get('add_have_to_env', 1): if kw.get('uselib_store'): self.env[self.have_define(kw['uselib_store'])] = 1 elif kw['execute'] and kw.get('define_ret'): self.env[define_name] = is_success else: self.env[define_name] = int(is_success) if 'header_name' in kw: if kw.get('auto_add_header_name'): self.env.append_value(INCKEYS, Utils.to_list(kw['header_name'])) if is_success and 'uselib_store' in kw: from waflib.Tools import ccroot # See get_uselib_vars in ccroot.py _vars = set() for x in kw['features']: if x in ccroot.USELIB_VARS: _vars |= ccroot.USELIB_VARS[x] for k in _vars: x = k.lower() if x in kw: self.env.append_value(k + '_' + kw['uselib_store'], kw[x]) return is_success @conf def check(self, *k, **kw): """ Performs a configuration test by calling :py:func:`waflib.Configure.run_build`. For the complete list of parameters, see :py:func:`waflib.Tools.c_config.validate_c`. To force a specific compiler, pass ``compiler='c'`` or ``compiler='cxx'`` to the list of arguments Besides build targets, complete builds can be given through a build function. All files will be written to a temporary directory:: def build(bld): lib_node = bld.srcnode.make_node('libdir/liblc1.c') lib_node.parent.mkdir() lib_node.write('#include <stdio.h>\\nint lib_func(void) { FILE *f = fopen("foo", "r");}\\n', 'w') bld(features='c cshlib', source=[lib_node], linkflags=conf.env.EXTRA_LDFLAGS, target='liblc') conf.check(build_fun=build, msg=msg) """ self.validate_c(kw) self.start_msg(kw['msg'], **kw) ret = None try: ret = self.run_build(*k, **kw) except self.errors.ConfigurationError: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: raise else: self.fatal('The configuration failed') else: kw['success'] = ret ret = self.post_check(*k, **kw) if not ret: self.end_msg(kw['errmsg'], 'YELLOW', **kw) self.fatal('The configuration failed %r' % ret) else: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret class test_exec(Task.Task): """ A task that runs programs after they are built. See :py:func:`waflib.Tools.c_config.test_exec_fun`. """ color = 'PINK' def run(self): cmd = [self.inputs[0].abspath()] + getattr(self.generator, 'test_args', []) if getattr(self.generator, 'rpath', None): if getattr(self.generator, 'define_ret', False): self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd) else: self.generator.bld.retval = self.generator.bld.exec_command(cmd) else: env = self.env.env or {} env.update(dict(os.environ)) for var in ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'PATH'): env[var] = self.inputs[0].parent.abspath() + os.path.pathsep + env.get(var, '') if getattr(self.generator, 'define_ret', False): self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd, env=env) else: self.generator.bld.retval = self.generator.bld.exec_command(cmd, env=env) @feature('test_exec') @after_method('apply_link') def test_exec_fun(self): """ The feature **test_exec** is used to create a task that will to execute the binary created (link task output) during the build. The exit status will be set on the build context, so only one program may have the feature *test_exec*. This is used by configuration tests:: def configure(conf): conf.check(execute=True) """ self.create_task('test_exec', self.link_task.outputs[0]) @conf def check_cxx(self, *k, **kw): """ Runs a test with a task generator of the form:: conf.check(features='cxx cxxprogram', ...) """ kw['compiler'] = 'cxx' return self.check(*k, **kw) @conf def check_cc(self, *k, **kw): """ Runs a test with a task generator of the form:: conf.check(features='c cprogram', ...) """ kw['compiler'] = 'c' return self.check(*k, **kw) @conf def set_define_comment(self, key, comment): """ Sets a comment that will appear in the configuration header :type key: string :type comment: string """ coms = self.env.DEFINE_COMMENTS if not coms: coms = self.env.DEFINE_COMMENTS = {} coms[key] = comment or '' @conf def get_define_comment(self, key): """ Returns the comment associated to a define :type key: string """ coms = self.env.DEFINE_COMMENTS or {} return coms.get(key, '') @conf def define(self, key, val, quote=True, comment=''): """ Stores a single define and its state into ``conf.env.DEFINES``. The value is cast to an integer (0/1). :param key: define name :type key: string :param val: value :type val: int or string :param quote: enclose strings in quotes (yes by default) :type quote: bool """ assert isinstance(key, str) if not key: return if val is True: val = 1 elif val in (False, None): val = 0 if isinstance(val, int) or isinstance(val, float): s = '%s=%s' else: s = quote and '%s="%s"' or '%s=%s' app = s % (key, str(val)) ban = key + '=' lst = self.env.DEFINES for x in lst: if x.startswith(ban): lst[lst.index(x)] = app break else: self.env.append_value('DEFINES', app) self.env.append_unique(DEFKEYS, key) self.set_define_comment(key, comment) @conf def undefine(self, key, comment=''): """ Removes a global define from ``conf.env.DEFINES`` :param key: define name :type key: string """ assert isinstance(key, str) if not key: return ban = key + '=' lst = [x for x in self.env.DEFINES if not x.startswith(ban)] self.env.DEFINES = lst self.env.append_unique(DEFKEYS, key) self.set_define_comment(key, comment) @conf def define_cond(self, key, val, comment=''): """ Conditionally defines a name:: def configure(conf): conf.define_cond('A', True) # equivalent to: # if val: conf.define('A', 1) # else: conf.undefine('A') :param key: define name :type key: string :param val: value :type val: int or string """ assert isinstance(key, str) if not key: return if val: self.define(key, 1, comment=comment) else: self.undefine(key, comment=comment) @conf def is_defined(self, key): """ Indicates whether a particular define is globally set in ``conf.env.DEFINES``. :param key: define name :type key: string :return: True if the define is set :rtype: bool """ assert key and isinstance(key, str) ban = key + '=' for x in self.env.DEFINES: if x.startswith(ban): return True return False @conf def get_define(self, key): """ Returns the value of an existing define, or None if not found :param key: define name :type key: string :rtype: string """ assert key and isinstance(key, str) ban = key + '=' for x in self.env.DEFINES: if x.startswith(ban): return x[len(ban):] return None @conf def have_define(self, key): """ Returns a variable suitable for command-line or header use by removing invalid characters and prefixing it with ``HAVE_`` :param key: define name :type key: string :return: the input key prefixed by *HAVE_* and substitute any invalid characters. :rtype: string """ return (self.env.HAVE_PAT or 'HAVE_%s') % Utils.quote_define_name(key) @conf def write_config_header(self, configfile='', guard='', top=False, defines=True, headers=False, remove=True, define_prefix=''): """ Writes a configuration header containing defines and includes:: def configure(cnf): cnf.define('A', 1) cnf.write_config_header('config.h') This function only adds include guards (if necessary), consult :py:func:`waflib.Tools.c_config.get_config_header` for details on the body. :param configfile: path to the file to create (relative or absolute) :type configfile: string :param guard: include guard name to add, by default it is computed from the file name :type guard: string :param top: write the configuration header from the build directory (default is from the current path) :type top: bool :param defines: add the defines (yes by default) :type defines: bool :param headers: add #include in the file :type headers: bool :param remove: remove the defines after they are added (yes by default, works like in autoconf) :type remove: bool :type define_prefix: string :param define_prefix: prefix all the defines in the file with a particular prefix """ if not configfile: configfile = WAF_CONFIG_H waf_guard = guard or 'W_%s_WAF' % Utils.quote_define_name(configfile) node = top and self.bldnode or self.path.get_bld() node = node.make_node(configfile) node.parent.mkdir() lst = ['/* WARNING! All changes made to this file will be lost! */\n'] lst.append('#ifndef %s\n#define %s\n' % (waf_guard, waf_guard)) lst.append(self.get_config_header(defines, headers, define_prefix=define_prefix)) lst.append('\n#endif /* %s */\n' % waf_guard) node.write('\n'.join(lst)) # config files must not be removed on "waf clean" self.env.append_unique(Build.CFG_FILES, [node.abspath()]) if remove: for key in self.env[DEFKEYS]: self.undefine(key) self.env[DEFKEYS] = [] @conf def get_config_header(self, defines=True, headers=False, define_prefix=''): """ Creates the contents of a ``config.h`` file from the defines and includes set in conf.env.define_key / conf.env.include_key. No include guards are added. A prelude will be added from the variable env.WAF_CONFIG_H_PRELUDE if provided. This can be used to insert complex macros or include guards:: def configure(conf): conf.env.WAF_CONFIG_H_PRELUDE = '#include <unistd.h>\\n' conf.write_config_header('config.h') :param defines: write the defines values :type defines: bool :param headers: write include entries for each element in self.env.INCKEYS :type headers: bool :type define_prefix: string :param define_prefix: prefix all the defines with a particular prefix :return: the contents of a ``config.h`` file :rtype: string """ lst = [] if self.env.WAF_CONFIG_H_PRELUDE: lst.append(self.env.WAF_CONFIG_H_PRELUDE) if headers: for x in self.env[INCKEYS]: lst.append('#include <%s>' % x) if defines: tbl = {} for k in self.env.DEFINES: a, _, b = k.partition('=') tbl[a] = b for k in self.env[DEFKEYS]: caption = self.get_define_comment(k) if caption: caption = ' /* %s */' % caption try: txt = '#define %s%s %s%s' % (define_prefix, k, tbl[k], caption) except KeyError: txt = '/* #undef %s%s */%s' % (define_prefix, k, caption) lst.append(txt) return "\n".join(lst) @conf def cc_add_flags(conf): """ Adds CFLAGS / CPPFLAGS from os.environ to conf.env """ conf.add_os_flags('CPPFLAGS', dup=False) conf.add_os_flags('CFLAGS', dup=False) @conf def cxx_add_flags(conf): """ Adds CXXFLAGS / CPPFLAGS from os.environ to conf.env """ conf.add_os_flags('CPPFLAGS', dup=False) conf.add_os_flags('CXXFLAGS', dup=False) @conf def link_add_flags(conf): """ Adds LINKFLAGS / LDFLAGS from os.environ to conf.env """ conf.add_os_flags('LINKFLAGS', dup=False) conf.add_os_flags('LDFLAGS', dup=False) @conf def cc_load_tools(conf): """ Loads the Waf c extensions """ if not conf.env.DEST_OS: conf.env.DEST_OS = Utils.unversioned_sys_platform() conf.load('c') @conf def cxx_load_tools(conf): """ Loads the Waf c++ extensions """ if not conf.env.DEST_OS: conf.env.DEST_OS = Utils.unversioned_sys_platform() conf.load('cxx') @conf def get_cc_version(conf, cc, gcc=False, icc=False, clang=False): """ Runs the preprocessor to determine the gcc/icc/clang version The variables CC_VERSION, DEST_OS, DEST_BINFMT and DEST_CPU will be set in *conf.env* :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-dM', '-E', '-'] env = conf.env.env or None try: out, err = conf.cmd_and_log(cmd, output=0, input='\n'.encode(), env=env) except Errors.WafError: conf.fatal('Could not determine the compiler version %r' % cmd) if gcc: if out.find('__INTEL_COMPILER') >= 0: conf.fatal('The intel compiler pretends to be gcc') if out.find('__GNUC__') < 0 and out.find('__clang__') < 0: conf.fatal('Could not determine the compiler type') if icc and out.find('__INTEL_COMPILER') < 0: conf.fatal('Not icc/icpc') if clang and out.find('__clang__') < 0: conf.fatal('Not clang/clang++') if not clang and out.find('__clang__') >= 0: conf.fatal('Could not find gcc/g++ (only Clang), if renamed try eg: CC=gcc48 CXX=g++48 waf configure') k = {} if icc or gcc or clang: out = out.splitlines() for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k # Some documentation is available at http://predef.sourceforge.net # The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns. if not conf.env.DEST_OS: conf.env.DEST_OS = '' for i in MACRO_TO_DESTOS: if isD(i): conf.env.DEST_OS = MACRO_TO_DESTOS[i] break else: if isD('__APPLE__') and isD('__MACH__'): conf.env.DEST_OS = 'darwin' elif isD('__unix__'): # unix must be tested last as it's a generic fallback conf.env.DEST_OS = 'generic' if isD('__ELF__'): conf.env.DEST_BINFMT = 'elf' elif isD('__WINNT__') or isD('__CYGWIN__') or isD('_WIN32'): conf.env.DEST_BINFMT = 'pe' if not conf.env.IMPLIBDIR: conf.env.IMPLIBDIR = conf.env.LIBDIR # for .lib or .dll.a files conf.env.LIBDIR = conf.env.BINDIR elif isD('__APPLE__'): conf.env.DEST_BINFMT = 'mac-o' if not conf.env.DEST_BINFMT: # Infer the binary format from the os name. conf.env.DEST_BINFMT = Utils.destos_to_binfmt(conf.env.DEST_OS) for i in MACRO_TO_DEST_CPU: if isD(i): conf.env.DEST_CPU = MACRO_TO_DEST_CPU[i] break Logs.debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')])) if icc: ver = k['__INTEL_COMPILER'] conf.env.CC_VERSION = (ver[:-2], ver[-2], ver[-1]) else: if isD('__clang__') and isD('__clang_major__'): conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) else: # older clang versions and gcc conf.env.CC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k.get('__GNUC_PATCHLEVEL__', '0')) return k @conf def get_xlc_version(conf, cc): """ Returns the Aix compiler version :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-qversion'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find xlc %r' % cmd) # the intention is to catch the 8.0 in "IBM XL C/C++ Enterprise Edition V8.0 for AIX..." for v in (r"IBM XL C/C\+\+.* V(?P<major>\d*)\.(?P<minor>\d*)",): version_re = re.compile(v, re.I).search match = version_re(out or err) if match: k = match.groupdict() conf.env.CC_VERSION = (k['major'], k['minor']) break else: conf.fatal('Could not determine the XLC version.') @conf def get_suncc_version(conf, cc): """ Returns the Sun compiler version :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-V'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError as e: # Older versions of the compiler exit with non-zero status when reporting their version if not (hasattr(e, 'returncode') and hasattr(e, 'stdout') and hasattr(e, 'stderr')): conf.fatal('Could not find suncc %r' % cmd) out = e.stdout err = e.stderr version = (out or err) version = version.splitlines()[0] # cc: Sun C 5.10 SunOS_i386 2009/06/03 # cc: Studio 12.5 Sun C++ 5.14 SunOS_sparc Beta 2015/11/17 # cc: WorkShop Compilers 5.0 98/12/15 C 5.0 version_re = re.compile(r'cc: (studio.*?|\s+)?(sun\s+(c\+\+|c)|(WorkShop\s+Compilers))?\s+(?P<major>\d*)\.(?P<minor>\d*)', re.I).search match = version_re(version) if match: k = match.groupdict() conf.env.CC_VERSION = (k['major'], k['minor']) else: conf.fatal('Could not determine the suncc version.') # ============ the --as-needed flag should added during the configuration, not at runtime ========= @conf def add_as_needed(self): """ Adds ``--as-needed`` to the *LINKFLAGS* On some platforms, it is a default flag. In some cases (e.g., in NS-3) it is necessary to explicitly disable this feature with `-Wl,--no-as-needed` flag. """ if self.env.DEST_BINFMT == 'elf' and 'gcc' in (self.env.CXX_NAME, self.env.CC_NAME): self.env.append_unique('LINKFLAGS', '-Wl,--as-needed') # ============ parallel configuration class cfgtask(Task.Task): """ A task that executes build configuration tests (calls conf.check) Make sure to use locks if concurrent access to the same conf.env data is necessary. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.run_after = set() def display(self): return '' def runnable_status(self): for x in self.run_after: if not x.hasrun: return Task.ASK_LATER return Task.RUN_ME def uid(self): return Utils.SIG_NIL def signature(self): return Utils.SIG_NIL def run(self): conf = self.conf bld = Build.BuildContext(top_dir=conf.srcnode.abspath(), out_dir=conf.bldnode.abspath()) bld.env = conf.env bld.init_dirs() bld.in_msg = 1 # suppress top-level start_msg bld.logger = self.logger bld.multicheck_task = self args = self.args try: if 'func' in args: bld.test(build_fun=args['func'], msg=args.get('msg', ''), okmsg=args.get('okmsg', ''), errmsg=args.get('errmsg', ''), ) else: args['multicheck_mandatory'] = args.get('mandatory', True) args['mandatory'] = True try: bld.check(**args) finally: args['mandatory'] = args['multicheck_mandatory'] except Exception: return 1 def process(self): Task.Task.process(self) if 'msg' in self.args: with self.generator.bld.multicheck_lock: self.conf.start_msg(self.args['msg']) if self.hasrun == Task.NOT_RUN: self.conf.end_msg('test cancelled', 'YELLOW') elif self.hasrun != Task.SUCCESS: self.conf.end_msg(self.args.get('errmsg', 'no'), 'YELLOW') else: self.conf.end_msg(self.args.get('okmsg', 'yes'), 'GREEN') @conf def multicheck(self, *k, **kw): """ Runs configuration tests in parallel; results are printed sequentially at the end of the build but each test must provide its own msg value to display a line:: def test_build(ctx): ctx.in_msg = True # suppress console outputs ctx.check_large_file(mandatory=False) conf.multicheck( {'header_name':'stdio.h', 'msg':'... stdio', 'uselib_store':'STDIO', 'global_define':False}, {'header_name':'xyztabcd.h', 'msg':'... optional xyztabcd.h', 'mandatory': False}, {'header_name':'stdlib.h', 'msg':'... stdlib', 'okmsg': 'aye', 'errmsg': 'nope'}, {'func': test_build, 'msg':'... testing an arbitrary build function', 'okmsg':'ok'}, msg = 'Checking for headers in parallel', mandatory = True, # mandatory tests raise an error at the end run_all_tests = True, # try running all tests ) The configuration tests may modify the values in conf.env in any order, and the define values can affect configuration tests being executed. It is hence recommended to provide `uselib_store` values with `global_define=False` to prevent such issues. """ self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)), **kw) # Force a copy so that threads append to the same list at least # no order is guaranteed, but the values should not disappear at least for var in ('DEFINES', DEFKEYS): self.env.append_value(var, []) self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {} # define a task object that will execute our tests class par(object): def __init__(self): self.keep = False self.task_sigs = {} self.progress_bar = 0 def total(self): return len(tasks) def to_log(self, *k, **kw): return bld = par() bld.keep = kw.get('run_all_tests', True) bld.imp_sigs = {} tasks = [] id_to_task = {} for counter, dct in enumerate(k): x = Task.classes['cfgtask'](bld=bld, env=None) tasks.append(x) x.args = dct x.args['multicheck_counter'] = counter x.bld = bld x.conf = self x.args = dct # bind a logger that will keep the info in memory x.logger = Logs.make_mem_logger(str(id(x)), self.logger) if 'id' in dct: id_to_task[dct['id']] = x # second pass to set dependencies with after_test/before_test for x in tasks: for key in Utils.to_list(x.args.get('before_tests', [])): tsk = id_to_task[key] if not tsk: raise ValueError('No test named %r' % key) tsk.run_after.add(x) for key in Utils.to_list(x.args.get('after_tests', [])): tsk = id_to_task[key] if not tsk: raise ValueError('No test named %r' % key) x.run_after.add(tsk) def it(): yield tasks while 1: yield [] bld.producer = p = Runner.Parallel(bld, Options.options.jobs) bld.multicheck_lock = Utils.threading.Lock() p.biter = it() self.end_msg('started') p.start() # flush the logs in order into the config.log for x in tasks: x.logger.memhandler.flush() self.start_msg('-> processing test results') if p.error: for x in p.error: if getattr(x, 'err_msg', None): self.to_log(x.err_msg) self.end_msg('fail', color='RED') raise Errors.WafError('There is an error in the library, read config.log for more information') failure_count = 0 for x in tasks: if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN): failure_count += 1 if failure_count: self.end_msg(kw.get('errmsg', '%s test failed' % failure_count), color='YELLOW', **kw) else: self.end_msg('all ok', **kw) for x in tasks: if x.hasrun != Task.SUCCESS: if x.args.get('mandatory', True): self.fatal(kw.get('fatalmsg') or 'One of the tests has failed, read config.log for more information') @conf def check_gcc_o_space(self, mode='c'): if int(self.env.CC_VERSION[0]) > 4: # this is for old compilers return self.env.stash() if mode == 'c': self.env.CCLNK_TGT_F = ['-o', ''] elif mode == 'cxx': self.env.CXXLNK_TGT_F = ['-o', ''] features = '%s %sshlib' % (mode, mode) try: self.check(msg='Checking if the -o link must be split from arguments', fragment=SNIP_EMPTY_PROGRAM, features=features) except self.errors.ConfigurationError: self.env.revert() else: self.env.commit()
41,994
Python
.py
1,186
32.584317
155
0.662589
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,722
compiler_fc.py
projecthamster_hamster/waflib/Tools/compiler_fc.py
#!/usr/bin/env python # encoding: utf-8 import re from waflib import Utils, Logs from waflib.Tools import fc fc_compiler = { 'win32' : ['gfortran','ifort'], 'darwin' : ['gfortran', 'g95', 'ifort'], 'linux' : ['gfortran', 'g95', 'ifort'], 'java' : ['gfortran', 'g95', 'ifort'], 'default': ['gfortran'], 'aix' : ['gfortran'] } """ Dict mapping the platform names to lists of names of Fortran compilers to try, in order of preference:: from waflib.Tools.compiler_c import c_compiler c_compiler['linux'] = ['gfortran', 'g95', 'ifort'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = fc_compiler.get(build_platform, fc_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable Fortran compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_fortran_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_fc')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (Fortran compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) Logs.debug('compiler_fortran: %r', e) else: if conf.env.FC: conf.end_msg(conf.env.get_flat('FC')) conf.env.COMPILER_FORTRAN = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a Fortran compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-fortran-compiler=ifort """ test_for_compiler = default_compilers() opt.load_special_tools('fc_*.py') fortran_compiler_opts = opt.add_option_group('Configuration options') fortran_compiler_opts.add_option('--check-fortran-compiler', default=None, help='list of Fortran compiler to try [%s]' % test_for_compiler, dest="check_fortran_compiler") for x in test_for_compiler.split(): opt.load('%s' % x)
2,162
Python
.py
63
31.746032
103
0.714696
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,723
c_osx.py
projecthamster_hamster/waflib/Tools/c_osx.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2008-2018 (ita) """ MacOSX related tools """ import os, shutil, platform from waflib import Task, Utils from waflib.TaskGen import taskgen_method, feature, after_method, before_method app_info = ''' <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd"> <plist version="0.9"> <dict> <key>CFBundlePackageType</key> <string>APPL</string> <key>CFBundleGetInfoString</key> <string>Created by Waf</string> <key>CFBundleSignature</key> <string>????</string> <key>NOTE</key> <string>THIS IS A GENERATED FILE, DO NOT MODIFY</string> <key>CFBundleExecutable</key> <string>{app_name}</string> </dict> </plist> ''' """ plist template """ @feature('c', 'cxx') def set_macosx_deployment_target(self): """ see WAF issue 285 and also and also http://trac.macports.org/ticket/17059 """ if self.env.MACOSX_DEPLOYMENT_TARGET: os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ: if Utils.unversioned_sys_platform() == 'darwin': os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2]) @taskgen_method def create_bundle_dirs(self, name, out): """ Creates bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp` """ dir = out.parent.find_or_declare(name) dir.mkdir() macos = dir.find_or_declare(['Contents', 'MacOS']) macos.mkdir() return dir def bundle_name_for_output(out): name = out.name k = name.rfind('.') if k >= 0: name = name[:k] + '.app' else: name = name + '.app' return name @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macapp(self): """ To compile an executable into a Mac application (a .app), set its *mac_app* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_app=True) To force *all* executables to be transformed into Mac applications:: def build(bld): bld.env.MACAPP = True bld.shlib(source='a.c', target='foo') """ if self.env.MACAPP or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'MacOS', out.name]) self.apptask = self.create_task('macapp', self.link_task.outputs, n1) inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755) if getattr(self, 'mac_files', None): # this only accepts files; they will be installed as seen from mac_files_root mac_files_root = getattr(self, 'mac_files_root', None) if isinstance(mac_files_root, str): mac_files_root = self.path.find_node(mac_files_root) if not mac_files_root: self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root) res_dir = n1.parent.parent.make_node('Resources') inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name for node in self.to_nodes(self.mac_files): relpath = node.path_from(mac_files_root or node.parent) self.create_task('macapp', node, res_dir.make_node(relpath)) self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node) if getattr(self.bld, 'is_install', None): # disable regular binary installation self.install_task.hasrun = Task.SKIP_ME @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macplist(self): """ Creates a :py:class:`waflib.Tools.c_osx.macplist` instance. """ if self.env.MACAPP or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'Info.plist']) self.plisttask = plisttask = self.create_task('macplist', [], n1) plisttask.context = { 'app_name': self.link_task.outputs[0].name, 'env': self.env } plist_ctx = getattr(self, 'plist_context', None) if (plist_ctx): plisttask.context.update(plist_ctx) if getattr(self, 'mac_plist', False): node = self.path.find_resource(self.mac_plist) if node: plisttask.inputs.append(node) else: plisttask.code = self.mac_plist else: plisttask.code = app_info inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name self.add_install_files(install_to=inst_to, install_from=n1) @feature('cshlib', 'cxxshlib') @before_method('apply_link', 'propagate_uselib_vars') def apply_bundle(self): """ To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_bundle = True) To force *all* executables to be transformed into bundles:: def build(bld): bld.env.MACBUNDLE = True bld.shlib(source='a.c', target='foo') """ if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False): self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = [] # disable the '-dynamiclib' flag self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN use = self.use = self.to_list(getattr(self, 'use', [])) if not 'MACBUNDLE' in use: use.append('MACBUNDLE') app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources'] class macapp(Task.Task): """ Creates mac applications """ color = 'PINK' def run(self): self.outputs[0].parent.mkdir() shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath()) class macplist(Task.Task): """ Creates plist files """ color = 'PINK' ext_in = ['.bin'] def run(self): if getattr(self, 'code', None): txt = self.code else: txt = self.inputs[0].read() context = getattr(self, 'context', {}) txt = txt.format(**context) self.outputs[0].write(txt)
5,846
Python
.py
164
32.987805
98
0.704228
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,724
xlcxx.py
projecthamster_hamster/waflib/Tools/xlcxx.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_xlcxx(conf): """ Detects the Aix C++ compiler """ cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX') conf.get_xlc_version(cxx) conf.env.CXX_NAME = 'xlc++' @conf def xlcxx_common_flags(conf): """ Flags required for executing the Aix C++ compiler """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o'] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-G', '-Wl,-brtl,-bexpfull'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = [] v.cxxstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_xlcxx() conf.find_ar() conf.xlcxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags()
1,468
Python
.py
52
26.211538
62
0.595153
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,725
gcc.py
projecthamster_hamster/waflib/Tools/gcc.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 """ gcc/llvm detection. """ from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_gcc(conf): """ Find the program gcc, and if present, try to detect its version number """ cc = conf.find_program(['gcc', 'cc'], var='CC') conf.get_cc_version(cc, gcc=True) conf.env.CC_NAME = 'gcc' @conf def gcc_common_flags(conf): """ Common flags for gcc on nearly all platforms """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o'] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = [] v.CCLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Wl,-Bdynamic' v.STLIB_MARKER = '-Wl,-Bstatic' v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-shared'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = ['-Wl,-Bstatic'] v.cstlib_PATTERN = 'lib%s.a' v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] v.CFLAGS_MACBUNDLE = ['-fPIC'] v.macbundle_PATTERN = '%s.bundle' @conf def gcc_modifier_win32(conf): """Configuration flags for executing gcc on Windows""" v = conf.env v.cprogram_PATTERN = '%s.exe' v.cshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.CFLAGS_cshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def gcc_modifier_cygwin(conf): """Configuration flags for executing gcc on Cygwin""" gcc_modifier_win32(conf) v = conf.env v.cshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base']) v.CFLAGS_cshlib = [] @conf def gcc_modifier_darwin(conf): """Configuration flags for executing gcc on MacOS""" v = conf.env v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-dynamiclib'] v.cshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.ARCH_ST = ['-arch'] v.LINKFLAGS_cstlib = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gcc_modifier_aix(conf): """Configuration flags for executing gcc on AIX""" v = conf.env v.LINKFLAGS_cprogram = ['-Wl,-brtl'] v.LINKFLAGS_cshlib = ['-shared','-Wl,-brtl,-bexpfull'] v.SHLIB_MARKER = [] @conf def gcc_modifier_hpux(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.CFLAGS_cshlib = ['-fPIC','-DPIC'] v.cshlib_PATTERN = 'lib%s.sl' @conf def gcc_modifier_openbsd(conf): conf.env.SONAME_ST = [] @conf def gcc_modifier_osf1V(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gcc_modifier_platform(conf): """Execute platform-specific functions based on *gcc_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. gcc_modifier_func = getattr(conf, 'gcc_modifier_' + conf.env.DEST_OS, None) if gcc_modifier_func: gcc_modifier_func() def configure(conf): """ Configuration for gcc """ conf.find_gcc() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() conf.check_gcc_o_space()
4,033
Python
.py
128
29.515625
105
0.635543
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,726
c_tests.py
projecthamster_hamster/waflib/Tools/c_tests.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) """ Various configuration tests. """ from waflib import Task from waflib.Configure import conf from waflib.TaskGen import feature, before_method, after_method LIB_CODE = ''' #ifdef _MSC_VER #define testEXPORT __declspec(dllexport) #else #define testEXPORT #endif testEXPORT int lib_func(void) { return 9; } ''' MAIN_CODE = ''' #ifdef _MSC_VER #define testEXPORT __declspec(dllimport) #else #define testEXPORT #endif testEXPORT int lib_func(void); int main(int argc, char **argv) { (void)argc; (void)argv; return !(lib_func() == 9); } ''' @feature('link_lib_test') @before_method('process_source') def link_lib_test_fun(self): """ The configuration test :py:func:`waflib.Configure.run_build` declares a unique task generator, so we need to create other task generators from here to check if the linker is able to link libraries. """ def write_test_file(task): task.outputs[0].write(task.generator.code) rpath = [] if getattr(self, 'add_rpath', False): rpath = [self.bld.path.get_bld().abspath()] mode = self.mode m = '%s %s' % (mode, mode) ex = self.test_exec and 'test_exec' or '' bld = self.bld bld(rule=write_test_file, target='test.' + mode, code=LIB_CODE) bld(rule=write_test_file, target='main.' + mode, code=MAIN_CODE) bld(features='%sshlib' % m, source='test.' + mode, target='test') bld(features='%sprogram %s' % (m, ex), source='main.' + mode, target='app', use='test', rpath=rpath) @conf def check_library(self, mode=None, test_exec=True): """ Checks if libraries can be linked with the current linker. Uses :py:func:`waflib.Tools.c_tests.link_lib_test_fun`. :param mode: c or cxx or d :type mode: string """ if not mode: mode = 'c' if self.env.CXX: mode = 'cxx' self.check( compile_filename = [], features = 'link_lib_test', msg = 'Checking for libraries', mode = mode, test_exec = test_exec) ######################################################################################## INLINE_CODE = ''' typedef int foo_t; static %s foo_t static_foo () {return 0; } %s foo_t foo () { return 0; } ''' INLINE_VALUES = ['inline', '__inline__', '__inline'] @conf def check_inline(self, **kw): """ Checks for the right value for inline macro. Define INLINE_MACRO to 1 if the define is found. If the inline macro is not 'inline', add a define to the ``config.h`` (#define inline __inline__) :param define_name: define INLINE_MACRO by default to 1 if the macro is defined :type define_name: string :param features: by default *c* or *cxx* depending on the compiler present :type features: list of string """ self.start_msg('Checking for inline') if not 'define_name' in kw: kw['define_name'] = 'INLINE_MACRO' if not 'features' in kw: if self.env.CXX: kw['features'] = ['cxx'] else: kw['features'] = ['c'] for x in INLINE_VALUES: kw['fragment'] = INLINE_CODE % (x, x) try: self.check(**kw) except self.errors.ConfigurationError: continue else: self.end_msg(x) if x != 'inline': self.define('inline', x, quote=False) return x self.fatal('could not use inline functions') ######################################################################################## LARGE_FRAGMENT = '''#include <unistd.h> int main(int argc, char **argv) { (void)argc; (void)argv; return !(sizeof(off_t) >= 8); } ''' @conf def check_large_file(self, **kw): """ Checks for large file support and define the macro HAVE_LARGEFILE The test is skipped on win32 systems (DEST_BINFMT == pe). :param define_name: define to set, by default *HAVE_LARGEFILE* :type define_name: string :param execute: execute the test (yes by default) :type execute: bool """ if not 'define_name' in kw: kw['define_name'] = 'HAVE_LARGEFILE' if not 'execute' in kw: kw['execute'] = True if not 'features' in kw: if self.env.CXX: kw['features'] = ['cxx', 'cxxprogram'] else: kw['features'] = ['c', 'cprogram'] kw['fragment'] = LARGE_FRAGMENT kw['msg'] = 'Checking for large file support' ret = True try: if self.env.DEST_BINFMT != 'pe': ret = self.check(**kw) except self.errors.ConfigurationError: pass else: if ret: return True kw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64' kw['defines'] = ['_FILE_OFFSET_BITS=64'] try: ret = self.check(**kw) except self.errors.ConfigurationError: pass else: self.define('_FILE_OFFSET_BITS', 64) return ret self.fatal('There is no support for large files') ######################################################################################## ENDIAN_FRAGMENT = ''' #ifdef _MSC_VER #define testshlib_EXPORT __declspec(dllexport) #else #define testshlib_EXPORT #endif short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int testshlib_EXPORT use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; ''' class grep_for_endianness(Task.Task): """ Task that reads a binary and tries to determine the endianness """ color = 'PINK' def run(self): txt = self.inputs[0].read(flags='rb').decode('latin-1') if txt.find('LiTTleEnDian') > -1: self.generator.tmp.append('little') elif txt.find('BIGenDianSyS') > -1: self.generator.tmp.append('big') else: return -1 @feature('grep_for_endianness') @after_method('apply_link') def grep_for_endianness_fun(self): """ Used by the endianness configuration test """ self.create_task('grep_for_endianness', self.link_task.outputs[0]) @conf def check_endianness(self): """ Executes a configuration test to determine the endianness """ tmp = [] def check_msg(self): return tmp[0] self.check(fragment=ENDIAN_FRAGMENT, features='c cshlib grep_for_endianness', msg='Checking for endianness', define='ENDIANNESS', tmp=tmp, okmsg=check_msg, confcache=None) return tmp[0]
6,123
Python
.py
204
27.877451
115
0.669555
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,727
icpc.py
projecthamster_hamster/waflib/Tools/icpc.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) """ Detects the Intel C++ compiler """ import sys from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf @conf def find_icpc(conf): """ Finds the program icpc, and execute it to ensure it really is icpc """ cxx = conf.find_program('icpc', var='CXX') conf.get_cc_version(cxx, icc=True) conf.env.CXX_NAME = 'icc' def configure(conf): conf.find_icpc() conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags()
590
Python
.py
25
21.88
67
0.733929
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,728
cxx.py
projecthamster_hamster/waflib/Tools/cxx.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) "Base for c++ programs and libraries" from waflib import TaskGen, Task from waflib.Tools import c_preproc from waflib.Tools.ccroot import link_task, stlink_task @TaskGen.extension('.cpp','.cc','.cxx','.C','.c++') def cxx_hook(self, node): "Binds c++ file extensions to create :py:class:`waflib.Tools.cxx.cxx` instances" return self.create_compiled_task('cxx', node) if not '.c' in TaskGen.task_gen.mappings: TaskGen.task_gen.mappings['.c'] = TaskGen.task_gen.mappings['.cpp'] class cxx(Task.Task): "Compiles C++ files into object files" run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' vars = ['CXXDEPS'] # unused variable to depend on, just in case ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] scan = c_preproc.scan class cxxprogram(link_task): "Links object files into c++ programs" run_str = '${LINK_CXX} ${LINKFLAGS} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}' vars = ['LINKDEPS'] ext_out = ['.bin'] inst_to = '${BINDIR}' class cxxshlib(cxxprogram): "Links object files into c++ shared libraries" inst_to = '${LIBDIR}' class cxxstlib(stlink_task): "Links object files into c++ static libraries" pass # do not remove
1,622
Python
.py
31
50.483871
311
0.700379
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,729
compiler_cxx.py
projecthamster_hamster/waflib/Tools/compiler_cxx.py
#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de 2007 (pmarat) """ Try to detect a C++ compiler from the list of supported compilers (g++, msvc, etc):: def options(opt): opt.load('compiler_cxx') def configure(cnf): cnf.load('compiler_cxx') def build(bld): bld.program(source='main.cpp', target='app') The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_cxx.cxx_compiler`. To register a new C++ compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] def options(opt): opt.load('compiler_cxx') def configure(cnf): cnf.load('compiler_cxx') def build(bld): bld.program(source='main.c', target='app') Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: $ CXX=clang waf configure """ import re from waflib.Tools import ccroot from waflib import Utils from waflib.Logs import debug cxx_compiler = { 'win32': ['msvc', 'g++', 'clang++'], 'cygwin': ['g++', 'clang++'], 'darwin': ['clang++', 'g++'], 'aix': ['xlc++', 'g++', 'clang++'], 'linux': ['g++', 'clang++', 'icpc'], 'sunos': ['sunc++', 'g++'], 'irix': ['g++'], 'hpux': ['g++'], 'osf1V': ['g++'], 'gnu': ['g++', 'clang++'], 'java': ['g++', 'msvc', 'clang++', 'icpc'], 'gnukfreebsd': ['g++', 'clang++'], 'default': ['clang++', 'g++'] } """ Dict mapping the platform names to Waf tools finding specific C++ compilers:: from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['linux'] = ['gxx', 'icpc', 'suncxx'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = cxx_compiler.get(build_platform, cxx_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable C++ compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_cxx_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_cxx')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (C++ compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) debug('compiler_cxx: %r', e) else: if conf.env.CXX: conf.end_msg(conf.env.get_flat('CXX')) conf.env.COMPILER_CXX = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a C++ compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-cxx-compiler=gxx """ test_for_compiler = default_compilers() opt.load_special_tools('cxx_*.py') cxx_compiler_opts = opt.add_option_group('Configuration options') cxx_compiler_opts.add_option('--check-cxx-compiler', default=None, help='list of C++ compilers to try [%s]' % test_for_compiler, dest="check_cxx_compiler") for x in test_for_compiler.split(): opt.load('%s' % x)
3,309
Python
.py
93
33.236559
126
0.673763
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,730
gxx.py
projecthamster_hamster/waflib/Tools/gxx.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 """ g++/llvm detection. """ from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_gxx(conf): """ Finds the program g++, and if present, try to detect its version number """ cxx = conf.find_program(['g++', 'c++'], var='CXX') conf.get_cc_version(cxx, gcc=True) conf.env.CXX_NAME = 'gcc' @conf def gxx_common_flags(conf): """ Common flags for g++ on nearly all platforms """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o'] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Wl,-Bdynamic' v.STLIB_MARKER = '-Wl,-Bstatic' v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-shared'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = ['-Wl,-Bstatic'] v.cxxstlib_PATTERN = 'lib%s.a' v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] v.CXXFLAGS_MACBUNDLE = ['-fPIC'] v.macbundle_PATTERN = '%s.bundle' @conf def gxx_modifier_win32(conf): """Configuration flags for executing gcc on Windows""" v = conf.env v.cxxprogram_PATTERN = '%s.exe' v.cxxshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.CXXFLAGS_cxxshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def gxx_modifier_cygwin(conf): """Configuration flags for executing g++ on Cygwin""" gxx_modifier_win32(conf) v = conf.env v.cxxshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_cxxshlib', ['-Wl,--enable-auto-image-base']) v.CXXFLAGS_cxxshlib = [] @conf def gxx_modifier_darwin(conf): """Configuration flags for executing g++ on MacOS""" v = conf.env v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-dynamiclib'] v.cxxshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.ARCH_ST = ['-arch'] v.LINKFLAGS_cxxstlib = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gxx_modifier_aix(conf): """Configuration flags for executing g++ on AIX""" v = conf.env v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] v.LINKFLAGS_cxxshlib = ['-shared', '-Wl,-brtl,-bexpfull'] v.SHLIB_MARKER = [] @conf def gxx_modifier_hpux(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.CFLAGS_cxxshlib = ['-fPIC','-DPIC'] v.cxxshlib_PATTERN = 'lib%s.sl' @conf def gxx_modifier_openbsd(conf): conf.env.SONAME_ST = [] @conf def gcc_modifier_osf1V(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gxx_modifier_platform(conf): """Execute platform-specific functions based on *gxx_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. gxx_modifier_func = getattr(conf, 'gxx_modifier_' + conf.env.DEST_OS, None) if gxx_modifier_func: gxx_modifier_func() def configure(conf): """ Configuration for g++ """ conf.find_gxx() conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() conf.check_gcc_o_space('cxx')
4,064
Python
.py
128
29.75
105
0.64346
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,731
g95.py
projecthamster_hamster/waflib/Tools/g95.py
#! /usr/bin/env python # encoding: utf-8 # KWS 2010 # Thomas Nagy 2016-2018 (ita) import re from waflib import Utils from waflib.Tools import fc, fc_config, fc_scan, ar from waflib.Configure import conf @conf def find_g95(conf): fc = conf.find_program('g95', var='FC') conf.get_g95_version(fc) conf.env.FC_NAME = 'G95' @conf def g95_flags(conf): v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.FORTRANMODFLAG = ['-fmod=', ''] # template for module path v.FCFLAGS_DEBUG = ['-Werror'] # why not @conf def g95_modifier_win32(conf): fc_config.fortran_modifier_win32(conf) @conf def g95_modifier_cygwin(conf): fc_config.fortran_modifier_cygwin(conf) @conf def g95_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def g95_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() g95_modifier_func = getattr(conf, 'g95_modifier_' + dest_os, None) if g95_modifier_func: g95_modifier_func() @conf def get_g95_version(conf, fc): """get the compiler version""" version_re = re.compile(r"g95\s*(?P<major>\d*)\.(?P<minor>\d*)").search cmd = fc + ['--version'] out, err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('cannot determine g95 version') k = match.groupdict() conf.env.FC_VERSION = (k['major'], k['minor']) def configure(conf): conf.find_g95() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.g95_flags() conf.g95_modifier_platform()
1,522
Python
.py
55
25.818182
72
0.71978
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,732
__init__.py
projecthamster_hamster/waflib/Tools/__init__.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita)
71
Python
.py
3
22.666667
30
0.705882
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,733
nobuild.py
projecthamster_hamster/waflib/Tools/nobuild.py
#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2015 (ita) """ Override the build commands to write empty files. This is useful for profiling and evaluating the Python overhead. To use:: def build(bld): ... bld.load('nobuild') """ from waflib import Task def build(bld): def run(self): for x in self.outputs: x.write('') for (name, cls) in Task.classes.items(): cls.run = run
419
Python
.py
18
20.333333
64
0.683544
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,734
ldc2.py
projecthamster_hamster/waflib/Tools/ldc2.py
#!/usr/bin/env python # encoding: utf-8 # Alex Rønne Petersen, 2012 (alexrp/Zor) from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_ldc2(conf): """ Finds the program *ldc2* and set the variable *D* """ conf.find_program(['ldc2'], var='D') out = conf.cmd_and_log(conf.env.D + ['-version']) if out.find("based on DMD v2.") == -1: conf.fatal("detected compiler is not ldc2") @conf def common_flags_ldc2(conf): """ Sets the D flags required by *ldc2* """ v = conf.env v.D_SRC_F = ['-c'] v.D_TGT_F = '-of%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-of%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-L-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L-L%s' v.LINKFLAGS_dshlib = ['-L-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = ['-H', '-Hf'] v.D_HDR_F = '%s' v.LINKFLAGS = [] v.DFLAGS_dshlib = ['-relocation-model=pic'] def configure(conf): """ Configuration for *ldc2* """ conf.find_ldc2() conf.load('ar') conf.load('d') conf.common_flags_ldc2() conf.d_platform_flags()
1,185
Python
.py
44
24.863636
50
0.59876
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,735
msvc.py
projecthamster_hamster/waflib/Tools/msvc.py
#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 (dv) # Tamas Pal, 2007 (folti) # Nicolas Mercier, 2009 # Matt Clarkson, 2012 """ Microsoft Visual C++/Intel C++ compiler support If you get detection problems, first try any of the following:: chcp 65001 set PYTHONIOENCODING=... set PYTHONLEGACYWINDOWSSTDIO=1 Usage:: $ waf configure --msvc_version="msvc 10.0,msvc 9.0" --msvc_target="x64" or:: def configure(conf): conf.env.MSVC_VERSIONS = ['msvc 10.0', 'msvc 9.0', 'msvc 8.0', 'msvc 7.1', 'msvc 7.0', 'msvc 6.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0'] conf.env.MSVC_TARGETS = ['x64'] conf.load('msvc') or:: def configure(conf): conf.load('msvc', funs='no_autodetect') conf.check_lib_msvc('gdi32') conf.check_libs_msvc('kernel32 user32') def build(bld): tg = bld.program(source='main.c', target='app', use='KERNEL32 USER32 GDI32') Platforms and targets will be tested in the order they appear; the first good configuration will be used. To force testing all the configurations that are not used, use the ``--no-msvc-lazy`` option or set ``conf.env.MSVC_LAZY_AUTODETECT=False``. Supported platforms: ia64, x64, x86, x86_amd64, x86_ia64, x86_arm, amd64_x86, amd64_arm Compilers supported: * msvc => Visual Studio, versions 6.0 (VC 98, VC .NET 2002) to 15 (Visual Studio 2017) * wsdk => Windows SDK, versions 6.0, 6.1, 7.0, 7.1, 8.0 * icl => Intel compiler, versions 9, 10, 11, 13 * winphone => Visual Studio to target Windows Phone 8 native (version 8.0 for now) * Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i) * PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i) To use WAF in a VS2008 Make file project (see http://code.google.com/p/waf/issues/detail?id=894) You may consider to set the environment variable "VS_UNICODE_OUTPUT" to nothing before calling waf. So in your project settings use something like 'cmd.exe /C "set VS_UNICODE_OUTPUT=& set PYTHONUNBUFFERED=true & waf build"'. cmd.exe /C "chcp 1252 & set PYTHONUNBUFFERED=true && set && waf configure" Setting PYTHONUNBUFFERED gives the unbuffered output. """ import os, sys, re, traceback from waflib import Utils, Logs, Options, Errors from waflib.TaskGen import after_method, feature from waflib.Configure import conf from waflib.Tools import ccroot, c, cxx, ar g_msvc_systemlibs = ''' aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32 osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32 shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32 traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp '''.split() """importlibs provided by MSVC/Platform SDK. Do NOT search them""" all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64'), ('x86_arm', 'arm'), ('x86_arm64', 'arm64'), ('amd64_x86', 'x86'), ('amd64_arm', 'arm'), ('amd64_arm64', 'arm64') ] """List of msvc platforms""" all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ] """List of wince platforms""" all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] """List of icl platforms""" def options(opt): default_ver = '' vsver = os.getenv('VSCMD_VER') if vsver: m = re.match(r'(^\d+\.\d+).*', vsver) if m: default_ver = 'msvc %s' % m.group(1) opt.add_option('--msvc_version', type='string', help = 'msvc version, eg: "msvc 10.0,msvc 9.0"', default=default_ver) opt.add_option('--msvc_targets', type='string', help = 'msvc targets, eg: "x64,arm"', default='') opt.add_option('--no-msvc-lazy', action='store_false', help = 'lazily check msvc target environments', default=True, dest='msvc_lazy') class MSVCVersion(object): def __init__(self, ver): m = re.search(r'^(.*)\s+(\d+[.]\d+)', ver) if m: self.name = m.group(1) self.number = float(m.group(2)) else: self.name = ver self.number = 0. def __lt__(self, other): if self.number == other.number: return self.name < other.name return self.number < other.number @conf def setup_msvc(conf, versiondict): """ Checks installed compilers and targets and returns the first combination from the user's options, env, or the global supported lists that checks. :param versiondict: dict(platform -> dict(architecture -> configuration)) :type versiondict: dict(string -> dict(string -> target_compiler) :return: the compiler, revision, path, include dirs, library paths and target architecture :rtype: tuple of strings """ platforms = getattr(Options.options, 'msvc_targets', '').split(',') if platforms == ['']: platforms=Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms] desired_versions = getattr(Options.options, 'msvc_version', '').split(',') if desired_versions == ['']: desired_versions = conf.env.MSVC_VERSIONS or list(sorted(versiondict.keys(), key=MSVCVersion, reverse=True)) # Override lazy detection by evaluating after the fact. lazy_detect = getattr(Options.options, 'msvc_lazy', True) if conf.env.MSVC_LAZY_AUTODETECT is False: lazy_detect = False if not lazy_detect: for val in versiondict.values(): for arch in list(val.keys()): cfg = val[arch] cfg.evaluate() if not cfg.is_valid: del val[arch] conf.env.MSVC_INSTALLED_VERSIONS = versiondict for version in desired_versions: Logs.debug('msvc: detecting %r - %r', version, desired_versions) try: targets = versiondict[version] except KeyError: continue seen = set() for arch in platforms: if arch in seen: continue else: seen.add(arch) try: cfg = targets[arch] except KeyError: continue cfg.evaluate() if cfg.is_valid: compiler,revision = version.rsplit(' ', 1) return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu conf.fatal('msvc: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) @conf def get_msvc_version(conf, compiler, version, target, vcvars): """ Checks that an installed compiler actually runs and uses vcvars to obtain the environment needed by the compiler. :param compiler: compiler type, for looking up the executable name :param version: compiler version, for debugging only :param target: target architecture :param vcvars: batch file to run to check the environment :return: the location of the compiler executable, the location of include dirs, and the library paths :rtype: tuple of strings """ Logs.debug('msvc: get_msvc_version: %r %r %r', compiler, version, target) try: conf.msvc_cnt += 1 except AttributeError: conf.msvc_cnt = 1 batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) batfile.write("""@echo off set INCLUDE= set LIB= call "%s" %s echo PATH=%%PATH%% echo INCLUDE=%%INCLUDE%% echo LIB=%%LIB%%;%%LIBPATH%% """ % (vcvars,target)) sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()], stdin=getattr(Utils.subprocess, 'DEVNULL', None)) lines = sout.splitlines() if not lines[0]: lines.pop(0) MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None for line in lines: if line.startswith('PATH='): path = line[5:] MSVC_PATH = path.split(';') elif line.startswith('INCLUDE='): MSVC_INCDIR = [i for i in line[8:].split(';') if i] elif line.startswith('LIB='): MSVC_LIBDIR = [i for i in line[4:].split(';') if i] if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): conf.fatal('msvc: Could not find a valid architecture for building (get_msvc_version_3)') # Check if the compiler is usable at all. # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. env = dict(os.environ) env.update(PATH = path) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) cxx = conf.find_program(compiler_name, path_list=MSVC_PATH) # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. if 'CL' in env: del(env['CL']) try: conf.cmd_and_log(cxx + ['/help'], env=env) except UnicodeError: st = traceback.format_exc() if conf.logger: conf.logger.error(st) conf.fatal('msvc: Unicode error - check the code page?') except Exception as e: Logs.debug('msvc: get_msvc_version: %r %r %r -> failure %s', compiler, version, target, str(e)) conf.fatal('msvc: cannot run the compiler in get_msvc_version (run with -v to display errors)') else: Logs.debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target) finally: conf.env[compiler_name] = '' return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) def gather_wince_supported_platforms(): """ Checks SmartPhones SDKs :param versions: list to modify :type versions: list """ supported_wince_platforms = [] try: ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs') except OSError: try: ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs') except OSError: ce_sdk = '' if not ce_sdk: return supported_wince_platforms index = 0 while 1: try: sdk_device = Utils.winreg.EnumKey(ce_sdk, index) sdk = Utils.winreg.OpenKey(ce_sdk, sdk_device) except OSError: break index += 1 try: path,type = Utils.winreg.QueryValueEx(sdk, 'SDKRootDir') except OSError: try: path,type = Utils.winreg.QueryValueEx(sdk,'SDKInformation') except OSError: continue path,xml = os.path.split(path) path = str(path) path,device = os.path.split(path) if not device: path,device = os.path.split(path) platforms = [] for arch,compiler in all_wince_platforms: if os.path.isdir(os.path.join(path, device, 'Lib', arch)): platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch))) if platforms: supported_wince_platforms.append((device, platforms)) return supported_wince_platforms def gather_msvc_detected_versions(): #Detected MSVC versions! version_pattern = re.compile(r'^(\d\d?\.\d\d?)(Exp)?$') detected_versions = [] for vcver,vcvar in (('VCExpress','Exp'), ('VisualStudio','')): prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\' + vcver try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) except OSError: prefix = 'SOFTWARE\\Microsoft\\' + vcver try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) except OSError: continue index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 match = version_pattern.match(version) if match: versionnumber = float(match.group(1)) else: continue detected_versions.append((versionnumber, version+vcvar, prefix+'\\'+version)) def fun(tup): return tup[0] detected_versions.sort(key = fun) return detected_versions class target_compiler(object): """ Wrap a compiler configuration; call evaluate() to determine whether the configuration is usable. """ def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): """ :param ctx: configuration context to use to eventually get the version environment :param compiler: compiler name :param cpu: target cpu :param version: compiler version number :param bat_target: ? :param bat: path to the batch file to run """ self.conf = ctx self.name = None self.is_valid = False self.is_done = False self.compiler = compiler self.cpu = cpu self.version = version self.bat_target = bat_target self.bat = bat self.callback = callback def evaluate(self): if self.is_done: return self.is_done = True try: vs = self.conf.get_msvc_version(self.compiler, self.version, self.bat_target, self.bat) except Errors.ConfigurationError: self.is_valid = False return if self.callback: vs = self.callback(self, vs) self.is_valid = True (self.bindirs, self.incdirs, self.libdirs) = vs def __str__(self): return str((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) def __repr__(self): return repr((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) @conf def gather_wsdk_versions(conf, versions): """ Use winreg to add the msvc versions to the input list :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^v..?.?\...?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue try: msvc_version = Utils.winreg.OpenKey(all_versions, version) path,type = Utils.winreg.QueryValueEx(msvc_version,'InstallationFolder') except OSError: continue if path and os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')): targets = {} for target,arch in all_msvc_platforms: targets[target] = target_compiler(conf, 'wsdk', arch, version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')) versions['wsdk ' + version[1:]] = targets @conf def gather_msvc_targets(conf, versions, version, vc_path): #Looking for normal MSVC compilers! targets = {} if os.path.isfile(os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')): for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')) elif os.path.isfile(os.path.join(vc_path, 'vcvarsall.bat')): for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'vcvarsall.bat')) elif os.path.isfile(os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')): targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, 'x86', os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')) elif os.path.isfile(os.path.join(vc_path, 'Bin', 'vcvars32.bat')): targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, '', os.path.join(vc_path, 'Bin', 'vcvars32.bat')) if targets: versions['msvc %s' % version] = targets @conf def gather_wince_targets(conf, versions, version, vc_path, vsvars, supported_platforms): #Looking for Win CE compilers! for device,platforms in supported_platforms: targets = {} for platform,compiler,include,lib in platforms: winCEpath = os.path.join(vc_path, 'ce') if not os.path.isdir(winCEpath): continue if os.path.isdir(os.path.join(winCEpath, 'lib', platform)): bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] incdirs = [os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include'), include] libdirs = [os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform), lib] def combine_common(obj, compiler_env): # TODO this is likely broken, remove in waf 2.1 (common_bindirs,_1,_2) = compiler_env return (bindirs + common_bindirs, incdirs, libdirs) targets[platform] = target_compiler(conf, 'msvc', platform, version, 'x86', vsvars, combine_common) if targets: versions[device + ' ' + version] = targets @conf def gather_winphone_targets(conf, versions, version, vc_path, vsvars): #Looking for WinPhone compilers targets = {} for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'winphone', realtarget, version, target, vsvars) if targets: versions['winphone ' + version] = targets @conf def gather_vswhere_versions(conf, versions): try: import json except ImportError: Logs.error('Visual Studio 2017 detection requires Python 2.6') return prg_path = os.environ.get('ProgramFiles(x86)', os.environ.get('ProgramFiles', 'C:\\Program Files (x86)')) vswhere = os.path.join(prg_path, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe') args = [vswhere, '-products', '*', '-legacy', '-format', 'json'] try: txt = conf.cmd_and_log(args) except Errors.WafError as e: Logs.debug('msvc: vswhere.exe failed %s', e) return if sys.version_info[0] < 3: txt = txt.decode(Utils.console_encoding()) arr = json.loads(txt) arr.sort(key=lambda x: x['installationVersion']) for entry in arr: ver = entry['installationVersion'] ver = str('.'.join(ver.split('.')[:2])) path = str(os.path.abspath(entry['installationPath'])) if os.path.exists(path) and ('msvc %s' % ver) not in versions: conf.gather_msvc_targets(versions, ver, path) @conf def gather_msvc_versions(conf, versions): vc_paths = [] for (v,version,reg) in gather_msvc_detected_versions(): try: try: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\VC") except OSError: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\Microsoft Visual C++") path,type = Utils.winreg.QueryValueEx(msvc_version, 'ProductDir') except OSError: try: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Wow6432node\\Microsoft\\VisualStudio\\SxS\\VS7") path,type = Utils.winreg.QueryValueEx(msvc_version, version) except OSError: continue else: vc_paths.append((version, os.path.abspath(str(path)))) continue else: vc_paths.append((version, os.path.abspath(str(path)))) wince_supported_platforms = gather_wince_supported_platforms() for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) vsvars = os.path.join(vs_path, 'Common7', 'Tools', 'vsvars32.bat') if wince_supported_platforms and os.path.isfile(vsvars): conf.gather_wince_targets(versions, version, vc_path, vsvars, wince_supported_platforms) # WP80 works with 11.0Exp and 11.0, both of which resolve to the same vc_path. # Stop after one is found. for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) vsvars = os.path.join(vs_path, 'VC', 'WPSDK', 'WP80', 'vcvarsphoneall.bat') if os.path.isfile(vsvars): conf.gather_winphone_targets(versions, '8.0', vc_path, vsvars) break for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) conf.gather_msvc_targets(versions, version, vc_path) @conf def gather_icl_versions(conf, versions): """ Checks ICL compilers :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^...?.?\....?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_icl_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version) path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) for target,arch in all_icl_platforms: try: icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: continue else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) major = version[0:2] versions['intel ' + major] = targets @conf def gather_intel_composer_versions(conf, versions): """ Checks ICL compilers that are part of Intel Composer Suites :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^...?.?\...?.?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Suites') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Suites') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_icl_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: try: defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\'+targetDir) except OSError: if targetDir == 'EM64T_NATIVE': defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\EM64T') else: raise uid,type = Utils.winreg.QueryValueEx(defaults, 'SubKey') Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++') path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) # The intel compilervar_arch.bat is broken when used with Visual Studio Express 2012 # http://software.intel.com/en-us/forums/topic/328487 compilervars_warning_attr = '_compilervars_warning_key' if version[0:2] == '13' and getattr(conf, compilervars_warning_attr, True): setattr(conf, compilervars_warning_attr, False) patch_url = 'http://software.intel.com/en-us/forums/topic/328487' compilervars_arch = os.path.join(path, 'bin', 'compilervars_arch.bat') for vscomntool in ('VS110COMNTOOLS', 'VS100COMNTOOLS'): if vscomntool in os.environ: vs_express_path = os.environ[vscomntool] + r'..\IDE\VSWinExpress.exe' dev_env_path = os.environ[vscomntool] + r'..\IDE\devenv.exe' if (r'if exist "%VS110COMNTOOLS%..\IDE\VSWinExpress.exe"' in Utils.readf(compilervars_arch) and not os.path.exists(vs_express_path) and not os.path.exists(dev_env_path)): Logs.warn(('The Intel compilervar_arch.bat only checks for one Visual Studio SKU ' '(VSWinExpress.exe) but it does not seem to be installed at %r. ' 'The intel command line set up will fail to configure unless the file %r' 'is patched. See: %s') % (vs_express_path, compilervars_arch, patch_url)) major = version[0:2] versions['intel ' + major] = targets @conf def detect_msvc(self): return self.setup_msvc(self.get_msvc_versions()) @conf def get_msvc_versions(self): """ :return: platform to compiler configurations :rtype: dict """ dct = Utils.ordered_iter_dict() self.gather_icl_versions(dct) self.gather_intel_composer_versions(dct) self.gather_wsdk_versions(dct) self.gather_msvc_versions(dct) self.gather_vswhere_versions(dct) Logs.debug('msvc: detected versions %r', list(dct.keys())) return dct @conf def find_lt_names_msvc(self, libname, is_static=False): """ Win32/MSVC specific code to glean out information from libtool la files. this function is not attached to the task_gen class. Returns a triplet: (library absolute path, library name without extension, whether the library is static) """ lt_names=[ 'lib%s.la' % libname, '%s.la' % libname, ] for path in self.env.LIBPATH: for la in lt_names: laf=os.path.join(path,la) dll=None if os.path.exists(laf): ltdict = Utils.read_la_file(laf) lt_libdir=None if ltdict.get('libdir', ''): lt_libdir = ltdict['libdir'] if not is_static and ltdict.get('library_names', ''): dllnames=ltdict['library_names'].split() dll=dllnames[0].lower() dll=re.sub(r'\.dll$', '', dll) return (lt_libdir, dll, False) elif ltdict.get('old_library', ''): olib=ltdict['old_library'] if os.path.exists(os.path.join(path,olib)): return (path, olib, True) elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)): return (lt_libdir, olib, True) else: return (None, olib, True) else: raise self.errors.WafError('invalid libtool object file: %s' % laf) return (None, None, None) @conf def libname_msvc(self, libname, is_static=False): lib = libname.lower() lib = re.sub(r'\.lib$','',lib) if lib in g_msvc_systemlibs: return lib lib=re.sub('^lib','',lib) if lib == 'm': return None (lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static) if lt_path != None and lt_libname != None: if lt_static: # file existence check has been made by find_lt_names return os.path.join(lt_path,lt_libname) if lt_path != None: _libpaths = [lt_path] + self.env.LIBPATH else: _libpaths = self.env.LIBPATH static_libs=[ 'lib%ss.lib' % lib, 'lib%s.lib' % lib, '%ss.lib' % lib, '%s.lib' %lib, ] dynamic_libs=[ 'lib%s.dll.lib' % lib, 'lib%s.dll.a' % lib, '%s.dll.lib' % lib, '%s.dll.a' % lib, 'lib%s_d.lib' % lib, '%s_d.lib' % lib, '%s.lib' %lib, ] libnames=static_libs if not is_static: libnames=dynamic_libs + static_libs for path in _libpaths: for libn in libnames: if os.path.exists(os.path.join(path, libn)): Logs.debug('msvc: lib found: %s', os.path.join(path,libn)) return re.sub(r'\.lib$', '',libn) #if no lib can be found, just return the libname as msvc expects it self.fatal('The library %r could not be found' % libname) return re.sub(r'\.lib$', '', libname) @conf def check_lib_msvc(self, libname, is_static=False, uselib_store=None): """ Ideally we should be able to place the lib in the right env var, either STLIB or LIB, but we don't distinguish static libs from shared libs. This is ok since msvc doesn't have any special linker flag to select static libs (no env.STLIB_MARKER) """ libn = self.libname_msvc(libname, is_static) if not uselib_store: uselib_store = libname.upper() if False and is_static: # disabled self.env['STLIB_' + uselib_store] = [libn] else: self.env['LIB_' + uselib_store] = [libn] @conf def check_libs_msvc(self, libnames, is_static=False): for libname in Utils.to_list(libnames): self.check_lib_msvc(libname, is_static) def configure(conf): """ Configuration methods to call for detecting msvc """ conf.autodetect(True) conf.find_msvc() conf.msvc_common_flags() conf.cc_load_tools() conf.cxx_load_tools() conf.cc_add_flags() conf.cxx_add_flags() conf.link_add_flags() conf.visual_studio_add_flags() @conf def no_autodetect(conf): conf.env.NO_MSVC_DETECT = 1 configure(conf) @conf def autodetect(conf, arch=False): v = conf.env if v.NO_MSVC_DETECT: return compiler, version, path, includes, libdirs, cpu = conf.detect_msvc() if arch: v.DEST_CPU = cpu v.PATH = path v.INCLUDES = includes v.LIBPATH = libdirs v.MSVC_COMPILER = compiler try: v.MSVC_VERSION = float(version) except ValueError: v.MSVC_VERSION = float(version[:-3]) def _get_prog_names(conf, compiler): if compiler == 'intel': compiler_name = 'ICL' linker_name = 'XILINK' lib_name = 'XILIB' else: # assumes CL.exe compiler_name = 'CL' linker_name = 'LINK' lib_name = 'LIB' return compiler_name, linker_name, lib_name @conf def find_msvc(conf): """Due to path format limitations, limit operation only to native Win32. Yeah it sucks.""" if sys.platform == 'cygwin': conf.fatal('MSVC module does not work under cygwin Python!') # the autodetection is supposed to be performed before entering in this method v = conf.env path = v.PATH compiler = v.MSVC_COMPILER version = v.MSVC_VERSION compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) v.MSVC_MANIFEST = (compiler == 'msvc' and version >= 8) or (compiler == 'wsdk' and version >= 6) or (compiler == 'intel' and version >= 11) # compiler cxx = conf.find_program(compiler_name, var='CXX', path_list=path) # before setting anything, check if the compiler is really msvc env = dict(conf.environ) if path: env.update(PATH = ';'.join(path)) if not conf.cmd_and_log(cxx + ['/nologo', '/help'], env=env): conf.fatal('the msvc compiler could not be identified') # c/c++ compiler v.CC = v.CXX = cxx v.CC_NAME = v.CXX_NAME = 'msvc' # linker if not v.LINK_CXX: conf.find_program(linker_name, path_list=path, errmsg='%s was not found (linker)' % linker_name, var='LINK_CXX') if not v.LINK_CC: v.LINK_CC = v.LINK_CXX # staticlib linker if not v.AR: stliblink = conf.find_program(lib_name, path_list=path, var='AR') if not stliblink: return v.ARFLAGS = ['/nologo'] # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later if v.MSVC_MANIFEST: conf.find_program('MT', path_list=path, var='MT') v.MTFLAGS = ['/nologo'] try: conf.load('winres') except Errors.ConfigurationError: Logs.warn('Resource compiler not found. Compiling resource file is disabled') @conf def visual_studio_add_flags(self): """visual studio flags found in the system environment""" v = self.env if self.environ.get('INCLUDE'): v.prepend_value('INCLUDES', [x for x in self.environ['INCLUDE'].split(';') if x]) # notice the 'S' if self.environ.get('LIB'): v.prepend_value('LIBPATH', [x for x in self.environ['LIB'].split(';') if x]) @conf def msvc_common_flags(conf): """ Setup the flags required for executing the msvc compiler """ v = conf.env v.DEST_BINFMT = 'pe' v.append_value('CFLAGS', ['/nologo']) v.append_value('CXXFLAGS', ['/nologo']) v.append_value('LINKFLAGS', ['/nologo']) v.DEFINES_ST = '/D%s' v.CC_SRC_F = '' v.CC_TGT_F = ['/c', '/Fo'] v.CXX_SRC_F = '' v.CXX_TGT_F = ['/c', '/Fo'] if (v.MSVC_COMPILER == 'msvc' and v.MSVC_VERSION >= 8) or (v.MSVC_COMPILER == 'wsdk' and v.MSVC_VERSION >= 6): v.CC_TGT_F = ['/FC'] + v.CC_TGT_F v.CXX_TGT_F = ['/FC'] + v.CXX_TGT_F v.CPPPATH_ST = '/I%s' # template for adding include paths v.AR_TGT_F = v.CCLNK_TGT_F = v.CXXLNK_TGT_F = '/OUT:' # CRT specific flags v.CFLAGS_CRT_MULTITHREADED = v.CXXFLAGS_CRT_MULTITHREADED = ['/MT'] v.CFLAGS_CRT_MULTITHREADED_DLL = v.CXXFLAGS_CRT_MULTITHREADED_DLL = ['/MD'] v.CFLAGS_CRT_MULTITHREADED_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DBG = ['/MTd'] v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = ['/MDd'] v.LIB_ST = '%s.lib' v.LIBPATH_ST = '/LIBPATH:%s' v.STLIB_ST = '%s.lib' v.STLIBPATH_ST = '/LIBPATH:%s' if v.MSVC_MANIFEST: v.append_value('LINKFLAGS', ['/MANIFEST']) v.CFLAGS_cshlib = [] v.CXXFLAGS_cxxshlib = [] v.LINKFLAGS_cshlib = v.LINKFLAGS_cxxshlib = ['/DLL'] v.cshlib_PATTERN = v.cxxshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.lib' v.IMPLIB_ST = '/IMPLIB:%s' v.LINKFLAGS_cstlib = [] v.cstlib_PATTERN = v.cxxstlib_PATTERN = '%s.lib' v.cprogram_PATTERN = v.cxxprogram_PATTERN = '%s.exe' v.def_PATTERN = '/def:%s' ####################################################################################################### ##### conf above, build below @after_method('apply_link') @feature('c', 'cxx') def apply_flags_msvc(self): """ Add additional flags implied by msvc, such as subsystems and pdb files:: def build(bld): bld.stlib(source='main.c', target='bar', subsystem='gruik') """ if self.env.CC_NAME != 'msvc' or not getattr(self, 'link_task', None): return is_static = isinstance(self.link_task, ccroot.stlink_task) subsystem = getattr(self, 'subsystem', '') if subsystem: subsystem = '/subsystem:%s' % subsystem flags = is_static and 'ARFLAGS' or 'LINKFLAGS' self.env.append_value(flags, subsystem) if not is_static: for f in self.env.LINKFLAGS: d = f.lower() if d[1:] in ('debug', 'debug:full', 'debug:fastlink'): pdbnode = self.link_task.outputs[0].change_ext('.pdb') self.link_task.outputs.append(pdbnode) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=pdbnode) break @feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib') @after_method('apply_link') def apply_manifest(self): """ Special linker for MSVC with support for embedding manifests into DLL's and executables compiled by Visual Studio 2005 or probably later. Without the manifest file, the binaries are unusable. See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx """ if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST and getattr(self, 'link_task', None): out_node = self.link_task.outputs[0] man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') self.link_task.outputs.append(man_node) self.env.DO_MANIFEST = True def make_winapp(self, family): append = self.env.append_unique append('DEFINES', 'WINAPI_FAMILY=%s' % family) append('CXXFLAGS', ['/ZW', '/TP']) for lib_path in self.env.LIBPATH: append('CXXFLAGS','/AI%s'%lib_path) @feature('winphoneapp') @after_method('process_use') @after_method('propagate_uselib_vars') def make_winphone_app(self): """ Insert configuration flags for windows phone applications (adds /ZW, /TP...) """ make_winapp(self, 'WINAPI_FAMILY_PHONE_APP') self.env.append_unique('LINKFLAGS', ['/NODEFAULTLIB:ole32.lib', 'PhoneAppModelHost.lib']) @feature('winapp') @after_method('process_use') @after_method('propagate_uselib_vars') def make_windows_app(self): """ Insert configuration flags for windows applications (adds /ZW, /TP...) """ make_winapp(self, 'WINAPI_FAMILY_DESKTOP_APP')
35,520
Python
.py
905
36.308287
180
0.707822
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,736
vala.py
projecthamster_hamster/waflib/Tools/vala.py
#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 # Radosław Szkodziński, 2010 """ At this point, vala is still unstable, so do not expect this tool to be too stable either (apis, etc) """ import re from waflib import Build, Context, Errors, Logs, Node, Options, Task, Utils from waflib.TaskGen import extension, taskgen_method from waflib.Configure import conf class valac(Task.Task): """ Compiles vala files """ #run_str = "${VALAC} ${VALAFLAGS}" # ideally #vars = ['VALAC_VERSION'] vars = ["VALAC", "VALAC_VERSION", "VALAFLAGS"] ext_out = ['.h'] def run(self): cmd = self.env.VALAC + self.env.VALAFLAGS resources = getattr(self, 'vala_exclude', []) cmd.extend([a.abspath() for a in self.inputs if a not in resources]) ret = self.exec_command(cmd, cwd=self.vala_dir_node.abspath()) if ret: return ret if self.generator.dump_deps_node: self.generator.dump_deps_node.write('\n'.join(self.generator.packages)) return ret @taskgen_method def init_vala_task(self): """ Initializes the vala task with the relevant data (acts as a constructor) """ self.profile = getattr(self, 'profile', 'gobject') self.packages = packages = Utils.to_list(getattr(self, 'packages', [])) self.use = Utils.to_list(getattr(self, 'use', [])) if packages and not self.use: self.use = packages[:] # copy if self.profile == 'gobject': if not 'GOBJECT' in self.use: self.use.append('GOBJECT') def addflags(flags): self.env.append_value('VALAFLAGS', flags) if self.profile: addflags('--profile=%s' % self.profile) valatask = self.valatask # output directory if hasattr(self, 'vala_dir'): if isinstance(self.vala_dir, str): valatask.vala_dir_node = self.path.get_bld().make_node(self.vala_dir) try: valatask.vala_dir_node.mkdir() except OSError: raise self.bld.fatal('Cannot create the vala dir %r' % valatask.vala_dir_node) else: valatask.vala_dir_node = self.vala_dir else: valatask.vala_dir_node = self.path.get_bld() addflags('--directory=%s' % valatask.vala_dir_node.abspath()) if hasattr(self, 'thread'): if self.profile == 'gobject': if not 'GTHREAD' in self.use: self.use.append('GTHREAD') else: #Vala doesn't have threading support for dova nor posix Logs.warn('Profile %s means no threading support', self.profile) self.thread = False if self.thread: addflags('--thread') self.is_lib = 'cprogram' not in self.features if self.is_lib: addflags('--library=%s' % self.target) h_node = valatask.vala_dir_node.find_or_declare('%s.h' % self.target) valatask.outputs.append(h_node) addflags('--header=%s' % h_node.name) valatask.outputs.append(valatask.vala_dir_node.find_or_declare('%s.vapi' % self.target)) if getattr(self, 'gir', None): gir_node = valatask.vala_dir_node.find_or_declare('%s.gir' % self.gir) addflags('--gir=%s' % gir_node.name) valatask.outputs.append(gir_node) self.vala_target_glib = getattr(self, 'vala_target_glib', getattr(Options.options, 'vala_target_glib', None)) if self.vala_target_glib: addflags('--target-glib=%s' % self.vala_target_glib) addflags(['--define=%s' % x for x in Utils.to_list(getattr(self, 'vala_defines', []))]) packages_private = Utils.to_list(getattr(self, 'packages_private', [])) addflags(['--pkg=%s' % x for x in packages_private]) def _get_api_version(): api_version = '1.0' if hasattr(Context.g_module, 'API_VERSION'): version = Context.g_module.API_VERSION.split(".") if version[0] == "0": api_version = "0." + version[1] else: api_version = version[0] + ".0" return api_version self.includes = Utils.to_list(getattr(self, 'includes', [])) valatask.install_path = getattr(self, 'install_path', '') valatask.vapi_path = getattr(self, 'vapi_path', '${DATAROOTDIR}/vala/vapi') valatask.pkg_name = getattr(self, 'pkg_name', self.env.PACKAGE) valatask.header_path = getattr(self, 'header_path', '${INCLUDEDIR}/%s-%s' % (valatask.pkg_name, _get_api_version())) valatask.install_binding = getattr(self, 'install_binding', True) self.vapi_dirs = vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', [])) #includes = [] if hasattr(self, 'use'): local_packages = Utils.to_list(self.use)[:] # make sure to have a copy seen = [] while len(local_packages) > 0: package = local_packages.pop() if package in seen: continue seen.append(package) # check if the package exists try: package_obj = self.bld.get_tgen_by_name(package) except Errors.WafError: continue # in practice the other task is already processed # but this makes it explicit package_obj.post() package_name = package_obj.target task = getattr(package_obj, 'valatask', None) if task: for output in task.outputs: if output.name == package_name + ".vapi": valatask.set_run_after(task) if package_name not in packages: packages.append(package_name) if output.parent not in vapi_dirs: vapi_dirs.append(output.parent) if output.parent not in self.includes: self.includes.append(output.parent) if hasattr(package_obj, 'use'): lst = self.to_list(package_obj.use) lst.reverse() local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages addflags(['--pkg=%s' % p for p in packages]) for vapi_dir in vapi_dirs: if isinstance(vapi_dir, Node.Node): v_node = vapi_dir else: v_node = self.path.find_dir(vapi_dir) if not v_node: Logs.warn('Unable to locate Vala API directory: %r', vapi_dir) else: addflags('--vapidir=%s' % v_node.abspath()) self.dump_deps_node = None if self.is_lib and self.packages: self.dump_deps_node = valatask.vala_dir_node.find_or_declare('%s.deps' % self.target) valatask.outputs.append(self.dump_deps_node) if self.is_lib and valatask.install_binding: headers_list = [o for o in valatask.outputs if o.suffix() == ".h"] if headers_list: self.install_vheader = self.add_install_files(install_to=valatask.header_path, install_from=headers_list) vapi_list = [o for o in valatask.outputs if (o.suffix() in (".vapi", ".deps"))] if vapi_list: self.install_vapi = self.add_install_files(install_to=valatask.vapi_path, install_from=vapi_list) gir_list = [o for o in valatask.outputs if o.suffix() == '.gir'] if gir_list: self.install_gir = self.add_install_files( install_to=getattr(self, 'gir_path', '${DATAROOTDIR}/gir-1.0'), install_from=gir_list) if hasattr(self, 'vala_resources'): nodes = self.to_nodes(self.vala_resources) valatask.vala_exclude = getattr(valatask, 'vala_exclude', []) + nodes valatask.inputs.extend(nodes) for x in nodes: addflags(['--gresources', x.abspath()]) @extension('.vala', '.gs') def vala_file(self, node): """ Compile a vala file and bind the task to *self.valatask*. If an existing vala task is already set, add the node to its inputs. The typical example is:: def build(bld): bld.program( packages = 'gtk+-2.0', target = 'vala-gtk-example', use = 'GTK GLIB', source = 'vala-gtk-example.vala foo.vala', vala_defines = ['DEBUG'] # adds --define=<xyz> values to the command-line # the following arguments are for libraries #gir = 'hello-1.0', #gir_path = '/tmp', #vapi_path = '/tmp', #pkg_name = 'hello' # disable installing of gir, vapi and header #install_binding = False # profile = 'xyz' # adds --profile=<xyz> to enable profiling # thread = True, # adds --thread, except if profile is on or not on 'gobject' # vala_target_glib = 'xyz' # adds --target-glib=<xyz>, can be given through the command-line option --vala-target-glib=<xyz> ) :param node: vala file :type node: :py:class:`waflib.Node.Node` """ try: valatask = self.valatask except AttributeError: valatask = self.valatask = self.create_task('valac') self.init_vala_task() valatask.inputs.append(node) name = node.name[:node.name.rfind('.')] + '.c' c_node = valatask.vala_dir_node.find_or_declare(name) valatask.outputs.append(c_node) self.source.append(c_node) @extension('.vapi') def vapi_file(self, node): try: valatask = self.valatask except AttributeError: valatask = self.valatask = self.create_task('valac') self.init_vala_task() valatask.inputs.append(node) @conf def find_valac(self, valac_name, min_version): """ Find the valac program, and execute it to store the version number in *conf.env.VALAC_VERSION* :param valac_name: program name :type valac_name: string or list of string :param min_version: minimum version acceptable :type min_version: tuple of int """ valac = self.find_program(valac_name, var='VALAC') try: output = self.cmd_and_log(valac + ['--version']) except Errors.WafError: valac_version = None else: ver = re.search(r'\d+.\d+.\d+', output).group().split('.') valac_version = tuple([int(x) for x in ver]) self.msg('Checking for %s version >= %r' % (valac_name, min_version), valac_version, valac_version and valac_version >= min_version) if valac and valac_version < min_version: self.fatal("%s version %r is too old, need >= %r" % (valac_name, valac_version, min_version)) self.env.VALAC_VERSION = valac_version return valac @conf def check_vala(self, min_version=(0,8,0), branch=None): """ Check if vala compiler from a given branch exists of at least a given version. :param min_version: minimum version acceptable (0.8.0) :type min_version: tuple :param branch: first part of the version number, in case a snapshot is used (0, 8) :type branch: tuple of int """ if self.env.VALA_MINVER: min_version = self.env.VALA_MINVER if self.env.VALA_MINVER_BRANCH: branch = self.env.VALA_MINVER_BRANCH if not branch: branch = min_version[:2] try: find_valac(self, 'valac-%d.%d' % (branch[0], branch[1]), min_version) except self.errors.ConfigurationError: find_valac(self, 'valac', min_version) @conf def check_vala_deps(self): """ Load the gobject and gthread packages if they are missing. """ if not self.env.HAVE_GOBJECT: pkg_args = {'package': 'gobject-2.0', 'uselib_store': 'GOBJECT', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib self.check_cfg(**pkg_args) if not self.env.HAVE_GTHREAD: pkg_args = {'package': 'gthread-2.0', 'uselib_store': 'GTHREAD', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib self.check_cfg(**pkg_args) def configure(self): """ Use the following to enforce minimum vala version:: def configure(conf): conf.env.VALA_MINVER = (0, 10, 0) conf.load('vala') """ self.load('gnu_dirs') self.check_vala_deps() self.check_vala() self.add_os_flags('VALAFLAGS') self.env.append_unique('VALAFLAGS', ['-C']) def options(opt): """ Load the :py:mod:`waflib.Tools.gnu_dirs` tool and add the ``--vala-target-glib`` command-line option """ opt.load('gnu_dirs') valaopts = opt.add_option_group('Vala Compiler Options') valaopts.add_option('--vala-target-glib', default=None, dest='vala_target_glib', metavar='MAJOR.MINOR', help='Target version of glib for Vala GObject code generation')
11,373
Python
.py
296
35.128378
128
0.691993
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,737
c_preproc.py
projecthamster_hamster/waflib/Tools/c_preproc.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ C/C++ preprocessor for finding dependencies Reasons for using the Waf preprocessor by default #. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files) #. Not all compilers provide .d files for obtaining the dependencies (portability) #. A naive file scanner will not catch the constructs such as "#include foo()" #. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything) Regarding the speed concerns: * the preprocessing is performed only when files must be compiled * the macros are evaluated only for #if/#elif/#include * system headers are not scanned by default Now if you do not want the Waf preprocessor, the tool +gccdeps* uses the .d files produced during the compilation to track the dependencies (useful when used with the boost libraries). It only works with gcc >= 4.4 though. A dumb preprocessor is also available in the tool *c_dumbpreproc* """ # TODO: more varargs, pragma once import re, string, traceback from waflib import Logs, Utils, Errors class PreprocError(Errors.WafError): pass FILE_CACHE_SIZE = 100000 LINE_CACHE_SIZE = 100000 POPFILE = '-' "Constant representing a special token used in :py:meth:`waflib.Tools.c_preproc.c_parser.start` iteration to switch to a header read previously" recursion_limit = 150 "Limit on the amount of files to read in the dependency scanner" go_absolute = False "Set to True to track headers on files in /usr/include, else absolute paths are ignored (but it becomes very slow)" standard_includes = ['/usr/local/include', '/usr/include'] if Utils.is_win32: standard_includes = [] use_trigraphs = 0 """Apply trigraph rules (False by default)""" # obsolete, do not use strict_quotes = 0 g_optrans = { 'not':'!', 'not_eq':'!', 'and':'&&', 'and_eq':'&=', 'or':'||', 'or_eq':'|=', 'xor':'^', 'xor_eq':'^=', 'bitand':'&', 'bitor':'|', 'compl':'~', } """Operators such as and/or/xor for c++. Set an empty dict to disable.""" # ignore #warning and #error re_lines = re.compile( '^[ \t]*(?:#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$', re.IGNORECASE | re.MULTILINE) """Match #include lines""" re_mac = re.compile(r"^[a-zA-Z_]\w*") """Match macro definitions""" re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]') """Match macro functions""" re_pragma_once = re.compile(r'^\s*once\s*', re.IGNORECASE) """Match #pragma once statements""" re_nl = re.compile('\\\\\r*\n', re.MULTILINE) """Match newlines""" re_cpp = re.compile(r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE ) """Filter C/C++ comments""" trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')] """Trigraph definitions""" chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39} """Escape characters""" NUM = 'i' """Number token""" OP = 'O' """Operator token""" IDENT = 'T' """Identifier token""" STR = 's' """String token""" CHAR = 'c' """Character token""" tok_types = [NUM, STR, IDENT, OP] """Token types""" exp_types = [ r"""0[xX](?P<hex>[a-fA-F0-9]+)(?P<qual1>[uUlL]*)|L*?'(?P<char>(\\.|[^\\'])+)'|(?P<n1>\d+)[Ee](?P<exp0>[+-]*?\d+)(?P<float0>[fFlL]*)|(?P<n2>\d*\.\d+)([Ee](?P<exp1>[+-]*?\d+))?(?P<float1>[fFlL]*)|(?P<n4>\d+\.\d*)([Ee](?P<exp2>[+-]*?\d+))?(?P<float2>[fFlL]*)|(?P<oct>0*)(?P<n0>\d+)(?P<qual2>[uUlL]*)""", r'L?"([^"\\]|\\.)*"', r'[a-zA-Z_]\w*', r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]', ] """Expression types""" re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M) """Match expressions into tokens""" accepted = 'a' """Parser state is *accepted*""" ignored = 'i' """Parser state is *ignored*, for example preprocessor lines in an #if 0 block""" undefined = 'u' """Parser state is *undefined* at the moment""" skipped = 's' """Parser state is *skipped*, for example preprocessor lines in a #elif 0 block""" def repl(m): """Replace function used with :py:attr:`waflib.Tools.c_preproc.re_cpp`""" s = m.group() if s[0] == '/': return ' ' return s prec = {} """ Operator precedence rules required for parsing expressions of the form:: #if 1 && 2 != 0 """ ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ','] for x, syms in enumerate(ops): for u in syms.split(): prec[u] = x def reduce_nums(val_1, val_2, val_op): """ Apply arithmetic rules to compute a result :param val1: input parameter :type val1: int or string :param val2: input parameter :type val2: int or string :param val_op: C operator in *+*, */*, *-*, etc :type val_op: string :rtype: int """ #print val_1, val_2, val_op # now perform the operation, make certain a and b are numeric try: a = 0 + val_1 except TypeError: a = int(val_1) try: b = 0 + val_2 except TypeError: b = int(val_2) d = val_op if d == '%': c = a % b elif d=='+': c = a + b elif d=='-': c = a - b elif d=='*': c = a * b elif d=='/': c = a / b elif d=='^': c = a ^ b elif d=='==': c = int(a == b) elif d=='|' or d == 'bitor': c = a | b elif d=='||' or d == 'or' : c = int(a or b) elif d=='&' or d == 'bitand': c = a & b elif d=='&&' or d == 'and': c = int(a and b) elif d=='!=' or d == 'not_eq': c = int(a != b) elif d=='^' or d == 'xor': c = int(a^b) elif d=='<=': c = int(a <= b) elif d=='<': c = int(a < b) elif d=='>': c = int(a > b) elif d=='>=': c = int(a >= b) elif d=='<<': c = a << b elif d=='>>': c = a >> b else: c = 0 return c def get_num(lst): """ Try to obtain a number from a list of tokens. The token types are defined in :py:attr:`waflib.Tools.ccroot.tok_types`. :param lst: list of preprocessor tokens :type lst: list of tuple (tokentype, value) :return: a pair containing the number and the rest of the list :rtype: tuple(value, list) """ if not lst: raise PreprocError('empty list for get_num') (p, v) = lst[0] if p == OP: if v == '(': count_par = 1 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 if count_par == 0: break elif v == '(': count_par += 1 i += 1 else: raise PreprocError('rparen expected %r' % lst) (num, _) = get_term(lst[1:i]) return (num, lst[i+1:]) elif v == '+': return get_num(lst[1:]) elif v == '-': num, lst = get_num(lst[1:]) return (reduce_nums('-1', num, '*'), lst) elif v == '!': num, lst = get_num(lst[1:]) return (int(not int(num)), lst) elif v == '~': num, lst = get_num(lst[1:]) return (~ int(num), lst) else: raise PreprocError('Invalid op token %r for get_num' % lst) elif p == NUM: return v, lst[1:] elif p == IDENT: # all macros should have been replaced, remaining identifiers eval to 0 return 0, lst[1:] else: raise PreprocError('Invalid token %r for get_num' % lst) def get_term(lst): """ Evaluate an expression recursively, for example:: 1+1+1 -> 2+1 -> 3 :param lst: list of tokens :type lst: list of tuple(token, value) :return: the value and the remaining tokens :rtype: value, list """ if not lst: raise PreprocError('empty list for get_term') num, lst = get_num(lst) if not lst: return (num, []) (p, v) = lst[0] if p == OP: if v == ',': # skip return get_term(lst[1:]) elif v == '?': count_par = 0 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 elif v == '(': count_par += 1 elif v == ':': if count_par == 0: break i += 1 else: raise PreprocError('rparen expected %r' % lst) if int(num): return get_term(lst[1:i]) else: return get_term(lst[i+1:]) else: num2, lst = get_num(lst[1:]) if not lst: # no more tokens to process num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) # operator precedence p2, v2 = lst[0] if p2 != OP: raise PreprocError('op expected %r' % lst) if prec[v2] >= prec[v]: num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) else: num3, lst = get_num(lst[1:]) num3 = reduce_nums(num2, num3, v2) return get_term([(NUM, num), (p, v), (NUM, num3)] + lst) raise PreprocError('cannot reduce %r' % lst) def reduce_eval(lst): """ Take a list of tokens and output true or false for #if/#elif conditions. :param lst: a list of tokens :type lst: list of tuple(token, value) :return: a token :rtype: tuple(NUM, int) """ num, lst = get_term(lst) return (NUM, num) def stringize(lst): """ Merge a list of tokens into a string :param lst: a list of tokens :type lst: list of tuple(token, value) :rtype: string """ lst = [str(v2) for (p2, v2) in lst] return "".join(lst) def paste_tokens(t1, t2): """ Token pasting works between identifiers, particular operators, and identifiers and numbers:: a ## b -> ab > ## = -> >= a ## 2 -> a2 :param t1: token :type t1: tuple(type, value) :param t2: token :type t2: tuple(type, value) """ p1 = None if t1[0] == OP and t2[0] == OP: p1 = OP elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM): p1 = IDENT elif t1[0] == NUM and t2[0] == NUM: p1 = NUM if not p1: raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2)) return (p1, t1[1] + t2[1]) def reduce_tokens(lst, defs, ban=[]): """ Replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied :param lst: list of tokens :type lst: list of tuple(token, value) :param defs: macro definitions :type defs: dict :param ban: macros that cannot be substituted (recursion is not allowed) :type ban: list of string :return: the new list of tokens :rtype: value, list """ i = 0 while i < len(lst): (p, v) = lst[i] if p == IDENT and v == "defined": del lst[i] if i < len(lst): (p2, v2) = lst[i] if p2 == IDENT: if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) elif p2 == OP and v2 == '(': del lst[i] (p2, v2) = lst[i] del lst[i] # remove the ident, and change the ) for the value if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) else: raise PreprocError('Invalid define expression %r' % lst) elif p == IDENT and v in defs: if isinstance(defs[v], str): a, b = extract_macro(defs[v]) defs[v] = b macro_def = defs[v] to_add = macro_def[1] if isinstance(macro_def[0], list): # macro without arguments del lst[i] accu = to_add[:] reduce_tokens(accu, defs, ban+[v]) for tmp in accu: lst.insert(i, tmp) i += 1 else: # collect the arguments for the funcall args = [] del lst[i] if i >= len(lst): raise PreprocError('expected ( after %r (got nothing)' % v) (p2, v2) = lst[i] if p2 != OP or v2 != '(': raise PreprocError('expected ( after %r' % v) del lst[i] one_param = [] count_paren = 0 while i < len(lst): p2, v2 = lst[i] del lst[i] if p2 == OP and count_paren == 0: if v2 == '(': one_param.append((p2, v2)) count_paren += 1 elif v2 == ')': if one_param: args.append(one_param) break elif v2 == ',': if not one_param: raise PreprocError('empty param in funcall %r' % v) args.append(one_param) one_param = [] else: one_param.append((p2, v2)) else: one_param.append((p2, v2)) if v2 == '(': count_paren += 1 elif v2 == ')': count_paren -= 1 else: raise PreprocError('malformed macro') # substitute the arguments within the define expression accu = [] arg_table = macro_def[0] j = 0 while j < len(to_add): (p2, v2) = to_add[j] if p2 == OP and v2 == '#': # stringize is for arguments only if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] accu.append((STR, stringize(toks))) j += 1 else: accu.append((p2, v2)) elif p2 == OP and v2 == '##': # token pasting, how can man invent such a complicated system? if accu and j+1 < len(to_add): # we have at least two tokens t1 = accu[-1] if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] if toks: accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1]) accu.extend(toks[1:]) else: # error, case "a##" accu.append((p2, v2)) accu.extend(toks) elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__': # first collect the tokens va_toks = [] st = len(macro_def[0]) pt = len(args) for x in args[pt-st+1:]: va_toks.extend(x) va_toks.append((OP, ',')) if va_toks: va_toks.pop() # extra comma if len(accu)>1: (p3, v3) = accu[-1] (p4, v4) = accu[-2] if v3 == '##': # remove the token paste accu.pop() if v4 == ',' and pt < st: # remove the comma accu.pop() accu += va_toks else: accu[-1] = paste_tokens(t1, to_add[j+1]) j += 1 else: # Invalid paste, case "##a" or "b##" accu.append((p2, v2)) elif p2 == IDENT and v2 in arg_table: toks = args[arg_table[v2]] reduce_tokens(toks, defs, ban+[v]) accu.extend(toks) else: accu.append((p2, v2)) j += 1 reduce_tokens(accu, defs, ban+[v]) for x in range(len(accu)-1, -1, -1): lst.insert(i, accu[x]) i += 1 def eval_macro(lst, defs): """ Reduce the tokens by :py:func:`waflib.Tools.c_preproc.reduce_tokens` and try to return a 0/1 result by :py:func:`waflib.Tools.c_preproc.reduce_eval`. :param lst: list of tokens :type lst: list of tuple(token, value) :param defs: macro definitions :type defs: dict :rtype: int """ reduce_tokens(lst, defs, []) if not lst: raise PreprocError('missing tokens to evaluate') if lst: p, v = lst[0] if p == IDENT and v not in defs: raise PreprocError('missing macro %r' % lst) p, v = reduce_eval(lst) return int(v) != 0 def extract_macro(txt): """ Process a macro definition of the form:: #define f(x, y) x * y into a function or a simple macro without arguments :param txt: expression to exact a macro definition from :type txt: string :return: a tuple containing the name, the list of arguments and the replacement :rtype: tuple(string, [list, list]) """ t = tokenize(txt) if re_fun.search(txt): p, name = t[0] p, v = t[1] if p != OP: raise PreprocError('expected (') i = 1 pindex = 0 params = {} prev = '(' while 1: i += 1 p, v = t[i] if prev == '(': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == ')': break else: raise PreprocError('unexpected token (3)') elif prev == IDENT: if p == OP and v == ',': prev = v elif p == OP and v == ')': break else: raise PreprocError('comma or ... expected') elif prev == ',': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == '...': raise PreprocError('not implemented (1)') else: raise PreprocError('comma or ... expected (2)') elif prev == '...': raise PreprocError('not implemented (2)') else: raise PreprocError('unexpected else') #~ print (name, [params, t[i+1:]]) return (name, [params, t[i+1:]]) else: (p, v) = t[0] if len(t) > 1: return (v, [[], t[1:]]) else: # empty define, assign an empty token return (v, [[], [('T','')]]) re_include = re.compile(r'^\s*(<(?:.*)>|"(?:.*)")') def extract_include(txt, defs): """ Process a line in the form:: #include foo :param txt: include line to process :type txt: string :param defs: macro definitions :type defs: dict :return: the file name :rtype: string """ m = re_include.search(txt) if m: txt = m.group(1) return txt[0], txt[1:-1] # perform preprocessing and look at the result, it must match an include toks = tokenize(txt) reduce_tokens(toks, defs, ['waf_include']) if not toks: raise PreprocError('could not parse include %r' % txt) if len(toks) == 1: if toks[0][0] == STR: return '"', toks[0][1] else: if toks[0][1] == '<' and toks[-1][1] == '>': ret = '<', stringize(toks).lstrip('<').rstrip('>') return ret raise PreprocError('could not parse include %r' % txt) def parse_char(txt): """ Parse a c character :param txt: character to parse :type txt: string :return: a character literal :rtype: string """ if not txt: raise PreprocError('attempted to parse a null char') if txt[0] != '\\': return ord(txt) c = txt[1] if c == 'x': if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16) return int(txt[2:], 16) elif c.isdigit(): if c == '0' and len(txt)==2: return 0 for i in 3, 2, 1: if len(txt) > i and txt[1:1+i].isdigit(): return (1+i, int(txt[1:1+i], 8)) else: try: return chr_esc[c] except KeyError: raise PreprocError('could not parse char literal %r' % txt) def tokenize(s): """ Convert a string into a list of tokens (shlex.split does not apply to c/c++/d) :param s: input to tokenize :type s: string :return: a list of tokens :rtype: list of tuple(token, value) """ return tokenize_private(s)[:] # force a copy of the results def tokenize_private(s): ret = [] for match in re_clexer.finditer(s): m = match.group for name in tok_types: v = m(name) if v: if name == IDENT: if v in g_optrans: name = OP elif v.lower() == "true": v = 1 name = NUM elif v.lower() == "false": v = 0 name = NUM elif name == NUM: if m('oct'): v = int(v, 8) elif m('hex'): v = int(m('hex'), 16) elif m('n0'): v = m('n0') else: v = m('char') if v: v = parse_char(v) else: v = m('n2') or m('n4') elif name == OP: if v == '%:': v = '#' elif v == '%:%:': v = '##' elif name == STR: # remove the quotes around the string v = v[1:-1] ret.append((name, v)) break return ret def format_defines(lst): ret = [] for y in lst: if y: pos = y.find('=') if pos == -1: # "-DFOO" should give "#define FOO 1" ret.append(y) elif pos > 0: # all others are assumed to be -DX=Y ret.append('%s %s' % (y[:pos], y[pos+1:])) else: raise ValueError('Invalid define expression %r' % y) return ret class c_parser(object): """ Used by :py:func:`waflib.Tools.c_preproc.scan` to parse c/h files. Note that by default, only project headers are parsed. """ def __init__(self, nodepaths=None, defines=None): self.lines = [] """list of lines read""" if defines is None: self.defs = {} else: self.defs = dict(defines) # make a copy self.state = [] self.count_files = 0 self.currentnode_stack = [] self.nodepaths = nodepaths or [] """Include paths""" self.nodes = [] """List of :py:class:`waflib.Node.Node` found so far""" self.names = [] """List of file names that could not be matched by any file""" self.curfile = '' """Current file""" self.ban_includes = set() """Includes that must not be read (#pragma once)""" self.listed = set() """Include nodes/names already listed to avoid duplicates in self.nodes/self.names""" def cached_find_resource(self, node, filename): """ Find a file from the input directory :param node: directory :type node: :py:class:`waflib.Node.Node` :param filename: header to find :type filename: string :return: the node if found, or None :rtype: :py:class:`waflib.Node.Node` """ try: cache = node.ctx.preproc_cache_node except AttributeError: cache = node.ctx.preproc_cache_node = Utils.lru_cache(FILE_CACHE_SIZE) key = (node, filename) try: return cache[key] except KeyError: ret = node.find_resource(filename) if ret: if getattr(ret, 'children', None): ret = None elif ret.is_child_of(node.ctx.bldnode): tmp = node.ctx.srcnode.search_node(ret.path_from(node.ctx.bldnode)) if tmp and getattr(tmp, 'children', None): ret = None cache[key] = ret return ret def tryfind(self, filename, kind='"', env=None): """ Try to obtain a node from the filename based from the include paths. Will add the node found to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` or the file name to :py:attr:`waflib.Tools.c_preproc.c_parser.names` if no corresponding file is found. Called by :py:attr:`waflib.Tools.c_preproc.c_parser.start`. :param filename: header to find :type filename: string :return: the node if found :rtype: :py:class:`waflib.Node.Node` """ if filename.endswith('.moc'): # we could let the qt4 module use a subclass, but then the function "scan" below must be duplicated # in the qt4 and in the qt5 classes. So we have two lines here and it is sufficient. self.names.append(filename) return None self.curfile = filename found = None if kind == '"': if env.MSVC_VERSION: for n in reversed(self.currentnode_stack): found = self.cached_find_resource(n, filename) if found: break else: found = self.cached_find_resource(self.currentnode_stack[-1], filename) if not found: for n in self.nodepaths: found = self.cached_find_resource(n, filename) if found: break listed = self.listed if found and not found in self.ban_includes: if found not in listed: listed.add(found) self.nodes.append(found) self.addlines(found) else: if filename not in listed: listed.add(filename) self.names.append(filename) return found def filter_comments(self, node): """ Filter the comments from a c/h file, and return the preprocessor lines. The regexps :py:attr:`waflib.Tools.c_preproc.re_cpp`, :py:attr:`waflib.Tools.c_preproc.re_nl` and :py:attr:`waflib.Tools.c_preproc.re_lines` are used internally. :return: the preprocessor directives as a list of (keyword, line) :rtype: a list of string pairs """ # return a list of tuples : keyword, line code = node.read() if use_trigraphs: for (a, b) in trig_def: code = code.split(a).join(b) code = re_nl.sub('', code) code = re_cpp.sub(repl, code) return re_lines.findall(code) def parse_lines(self, node): try: cache = node.ctx.preproc_cache_lines except AttributeError: cache = node.ctx.preproc_cache_lines = Utils.lru_cache(LINE_CACHE_SIZE) try: return cache[node] except KeyError: cache[node] = lines = self.filter_comments(node) lines.append((POPFILE, '')) lines.reverse() return lines def addlines(self, node): """ Add the lines from a header in the list of preprocessor lines to parse :param node: header :type node: :py:class:`waflib.Node.Node` """ self.currentnode_stack.append(node.parent) self.count_files += 1 if self.count_files > recursion_limit: # issue #812 raise PreprocError('recursion limit exceeded') if Logs.verbose: Logs.debug('preproc: reading file %r', node) try: lines = self.parse_lines(node) except EnvironmentError: raise PreprocError('could not read the file %r' % node) except Exception: if Logs.verbose > 0: Logs.error('parsing %r failed %s', node, traceback.format_exc()) else: self.lines.extend(lines) def start(self, node, env): """ Preprocess a source file to obtain the dependencies, which are accumulated to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` and :py:attr:`waflib.Tools.c_preproc.c_parser.names`. :param node: source file :type node: :py:class:`waflib.Node.Node` :param env: config set containing additional defines to take into account :type env: :py:class:`waflib.ConfigSet.ConfigSet` """ Logs.debug('preproc: scanning %s (in %s)', node.name, node.parent.name) self.current_file = node self.addlines(node) # macros may be defined on the command-line, so they must be parsed as if they were part of the file if env.DEFINES: lst = format_defines(env.DEFINES) lst.reverse() self.lines.extend([('define', x) for x in lst]) while self.lines: (token, line) = self.lines.pop() if token == POPFILE: self.count_files -= 1 self.currentnode_stack.pop() continue try: state = self.state # make certain we define the state if we are about to enter in an if block if token[:2] == 'if': state.append(undefined) elif token == 'endif': state.pop() # skip lines when in a dead 'if' branch, wait for the endif if token[0] != 'e': if skipped in self.state or ignored in self.state: continue if token == 'if': ret = eval_macro(tokenize(line), self.defs) if ret: state[-1] = accepted else: state[-1] = ignored elif token == 'ifdef': m = re_mac.match(line) if m and m.group() in self.defs: state[-1] = accepted else: state[-1] = ignored elif token == 'ifndef': m = re_mac.match(line) if m and m.group() in self.defs: state[-1] = ignored else: state[-1] = accepted elif token == 'include' or token == 'import': (kind, inc) = extract_include(line, self.defs) self.current_file = self.tryfind(inc, kind, env) if token == 'import': self.ban_includes.add(self.current_file) elif token == 'elif': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: if eval_macro(tokenize(line), self.defs): state[-1] = accepted elif token == 'else': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: state[-1] = accepted elif token == 'define': try: self.defs[self.define_name(line)] = line except AttributeError: raise PreprocError('Invalid define line %r' % line) elif token == 'undef': m = re_mac.match(line) if m and m.group() in self.defs: self.defs.__delitem__(m.group()) #print "undef %s" % name elif token == 'pragma': if re_pragma_once.match(line.lower()): self.ban_includes.add(self.current_file) except Exception as e: if Logs.verbose: Logs.debug('preproc: line parsing failed (%s): %s %s', e, line, traceback.format_exc()) def define_name(self, line): """ :param line: define line :type line: string :rtype: string :return: the define name """ return re_mac.match(line).group() def scan(task): """ Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind:: #include some_macro() This function is bound as a task method on :py:class:`waflib.Tools.c.c` and :py:class:`waflib.Tools.cxx.cxx` for example """ try: incn = task.generator.includes_nodes except AttributeError: raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": ' % task.generator) if go_absolute: nodepaths = incn + [task.generator.bld.root.find_dir(x) for x in standard_includes] else: nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)] tmp = c_parser(nodepaths) tmp.start(task.inputs[0], task.env) return (tmp.nodes, tmp.names)
27,701
Python
.py
930
25.874194
301
0.612852
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,738
gdc.py
projecthamster_hamster/waflib/Tools/gdc.py
#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_gdc(conf): """ Finds the program gdc and set the variable *D* """ conf.find_program('gdc', var='D') out = conf.cmd_and_log(conf.env.D + ['--version']) if out.find("gdc") == -1: conf.fatal("detected compiler is not gdc") @conf def common_flags_gdc(conf): """ Sets the flags required by *gdc* """ v = conf.env v.DFLAGS = [] v.D_SRC_F = ['-c'] v.D_TGT_F = '-o%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-o%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L%s' v.LINKFLAGS_dshlib = ['-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = '-fintfc' v.D_HDR_F = '-fintfc-file=%s' def configure(conf): """ Configuration for gdc """ conf.find_gdc() conf.load('ar') conf.load('d') conf.common_flags_gdc() conf.d_platform_flags()
1,107
Python
.py
43
23.674419
51
0.593156
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,739
intltool.py
projecthamster_hamster/waflib/Tools/intltool.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Support for translation tools such as msgfmt and intltool Usage:: def configure(conf): conf.load('gnu_dirs intltool') def build(bld): # process the .po files into .gmo files, and install them in LOCALEDIR bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") # process an input file, substituting the translations from the po dir bld( features = "intltool_in", podir = "../po", style = "desktop", flags = ["-u"], source = 'kupfer.desktop.in', install_path = "${DATADIR}/applications", ) Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory. """ from __future__ import with_statement import os, re from waflib import Context, Task, Utils, Logs import waflib.Tools.ccroot from waflib.TaskGen import feature, before_method, taskgen_method from waflib.Logs import error from waflib.Configure import conf _style_flags = { 'ba': '-b', 'desktop': '-d', 'keys': '-k', 'quoted': '--quoted-style', 'quotedxml': '--quotedxml-style', 'rfc822deb': '-r', 'schemas': '-s', 'xml': '-x', } @taskgen_method def ensure_localedir(self): """ Expands LOCALEDIR from DATAROOTDIR/locale if possible, or falls back to PREFIX/share/locale """ # use the tool gnu_dirs to provide options to define this if not self.env.LOCALEDIR: if self.env.DATAROOTDIR: self.env.LOCALEDIR = os.path.join(self.env.DATAROOTDIR, 'locale') else: self.env.LOCALEDIR = os.path.join(self.env.PREFIX, 'share', 'locale') @before_method('process_source') @feature('intltool_in') def apply_intltool_in_f(self): """ Creates tasks to translate files by intltool-merge:: def build(bld): bld( features = "intltool_in", podir = "../po", style = "desktop", flags = ["-u"], source = 'kupfer.desktop.in', install_path = "${DATADIR}/applications", ) :param podir: location of the .po files :type podir: string :param source: source files to process :type source: list of string :param style: the intltool-merge mode of operation, can be one of the following values: ``ba``, ``desktop``, ``keys``, ``quoted``, ``quotedxml``, ``rfc822deb``, ``schemas`` and ``xml``. See the ``intltool-merge`` man page for more information about supported modes of operation. :type style: string :param flags: compilation flags ("-quc" by default) :type flags: list of string :param install_path: installation path :type install_path: string """ try: self.meths.remove('process_source') except ValueError: pass self.ensure_localedir() podir = getattr(self, 'podir', '.') podirnode = self.path.find_dir(podir) if not podirnode: error("could not find the podir %r" % podir) return cache = getattr(self, 'intlcache', '.intlcache') self.env.INTLCACHE = [os.path.join(str(self.path.get_bld()), podir, cache)] self.env.INTLPODIR = podirnode.bldpath() self.env.append_value('INTLFLAGS', getattr(self, 'flags', self.env.INTLFLAGS_DEFAULT)) if '-c' in self.env.INTLFLAGS: self.bld.fatal('Redundant -c flag in intltool task %r' % self) style = getattr(self, 'style', None) if style: try: style_flag = _style_flags[style] except KeyError: self.bld.fatal('intltool_in style "%s" is not valid' % style) self.env.append_unique('INTLFLAGS', [style_flag]) for i in self.to_list(self.source): node = self.path.find_resource(i) task = self.create_task('intltool', node, node.change_ext('')) inst = getattr(self, 'install_path', None) if inst: self.add_install_files(install_to=inst, install_from=task.outputs) @feature('intltool_po') def apply_intltool_po(self): """ Creates tasks to process po files:: def build(bld): bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") The relevant task generator arguments are: :param podir: directory of the .po files :type podir: string :param appname: name of the application :type appname: string :param install_path: installation directory :type install_path: string The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process. """ try: self.meths.remove('process_source') except ValueError: pass self.ensure_localedir() appname = getattr(self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name')) podir = getattr(self, 'podir', '.') inst = getattr(self, 'install_path', '${LOCALEDIR}') linguas = self.path.find_node(os.path.join(podir, 'LINGUAS')) if linguas: # scan LINGUAS file for locales to process with open(linguas.abspath()) as f: langs = [] for line in f.readlines(): # ignore lines containing comments if not line.startswith('#'): langs += line.split() re_linguas = re.compile('[-a-zA-Z_@.]+') for lang in langs: # Make sure that we only process lines which contain locales if re_linguas.match(lang): node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po')) task = self.create_task('po', node, node.change_ext('.mo')) if inst: filename = task.outputs[0].name (langname, ext) = os.path.splitext(filename) inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo' self.add_install_as(install_to=inst_file, install_from=task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644)) else: Logs.pprint('RED', "Error no LINGUAS file found in po directory") class po(Task.Task): """ Compiles .po files into .gmo files """ run_str = '${MSGFMT} -o ${TGT} ${SRC}' color = 'BLUE' class intltool(Task.Task): """ Calls intltool-merge to update translation files """ run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE_ST:INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}' color = 'BLUE' @conf def find_msgfmt(conf): """ Detects msgfmt and sets the ``MSGFMT`` variable """ conf.find_program('msgfmt', var='MSGFMT') @conf def find_intltool_merge(conf): """ Detects intltool-merge """ if not conf.env.PERL: conf.find_program('perl', var='PERL') conf.env.INTLCACHE_ST = '--cache=%s' conf.env.INTLFLAGS_DEFAULT = ['-q', '-u'] conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL') def configure(conf): """ Detects the program *msgfmt* and set *conf.env.MSGFMT*. Detects the program *intltool-merge* and set *conf.env.INTLTOOL*. It is possible to set INTLTOOL in the environment, but it must not have spaces in it:: $ INTLTOOL="/path/to/the program/intltool" waf configure If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*. """ conf.find_msgfmt() conf.find_intltool_merge() if conf.env.CC or conf.env.CXX: conf.check(header_name='locale.h')
6,784
Python
.py
192
32.614583
112
0.702579
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,740
fc_scan.py
projecthamster_hamster/waflib/Tools/fc_scan.py
#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import re INC_REGEX = r"""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])""" USE_REGEX = r"""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)""" MOD_REGEX = r"""(?:^|;)\s*MODULE(?!\s+(?:PROCEDURE|SUBROUTINE|FUNCTION))\s+(\w+)""" SMD_REGEX = r"""(?:^|;)\s*SUBMODULE\s*\(([\w:]+)\)\s*(\w+)""" re_inc = re.compile(INC_REGEX, re.I) re_use = re.compile(USE_REGEX, re.I) re_mod = re.compile(MOD_REGEX, re.I) re_smd = re.compile(SMD_REGEX, re.I) class fortran_parser(object): """ This parser returns: * the nodes corresponding to the module names to produce * the nodes corresponding to the include files used * the module names used by the fortran files """ def __init__(self, incpaths): self.seen = [] """Files already parsed""" self.nodes = [] """List of :py:class:`waflib.Node.Node` representing the dependencies to return""" self.names = [] """List of module names to return""" self.incpaths = incpaths """List of :py:class:`waflib.Node.Node` representing the include paths""" def find_deps(self, node): """ Parses a Fortran file to obtain the dependencies used/provided :param node: fortran file to read :type node: :py:class:`waflib.Node.Node` :return: lists representing the includes, the modules used, and the modules created by a fortran file :rtype: tuple of list of strings """ txt = node.read() incs = [] uses = [] mods = [] for line in txt.splitlines(): # line by line regexp search? optimize? m = re_inc.search(line) if m: incs.append(m.group(1)) m = re_use.search(line) if m: uses.append(m.group(1)) m = re_mod.search(line) if m: mods.append(m.group(1)) m = re_smd.search(line) if m: uses.append(m.group(1)) mods.append('{0}:{1}'.format(m.group(1),m.group(2))) return (incs, uses, mods) def start(self, node): """ Start parsing. Use the stack ``self.waiting`` to hold nodes to iterate on :param node: fortran file :type node: :py:class:`waflib.Node.Node` """ self.waiting = [node] while self.waiting: nd = self.waiting.pop(0) self.iter(nd) def iter(self, node): """ Processes a single file during dependency parsing. Extracts files used modules used and modules provided. """ incs, uses, mods = self.find_deps(node) for x in incs: if x in self.seen: continue self.seen.append(x) self.tryfind_header(x) for x in uses: name = "USE@%s" % x if not name in self.names: self.names.append(name) for x in mods: name = "MOD@%s" % x if not name in self.names: self.names.append(name) def tryfind_header(self, filename): """ Adds an include file to the list of nodes to process :param filename: file name :type filename: string """ found = None for n in self.incpaths: found = n.find_resource(filename) if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename)
3,089
Python
.py
102
27.04902
103
0.652745
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,741
lua.py
projecthamster_hamster/waflib/Tools/lua.py
#!/usr/bin/env python # encoding: utf-8 # Sebastian Schlingmann, 2008 # Thomas Nagy, 2008-2018 (ita) """ Lua support. Compile *.lua* files into *.luac*:: def configure(conf): conf.load('lua') conf.env.LUADIR = '/usr/local/share/myapp/scripts/' def build(bld): bld(source='foo.lua') """ from waflib.TaskGen import extension from waflib import Task @extension('.lua') def add_lua(self, node): tsk = self.create_task('luac', node, node.change_ext('.luac')) inst_to = getattr(self, 'install_path', self.env.LUADIR and '${LUADIR}' or None) if inst_to: self.add_install_files(install_to=inst_to, install_from=tsk.outputs) return tsk class luac(Task.Task): run_str = '${LUAC} -s -o ${TGT} ${SRC}' color = 'PINK' def configure(conf): """ Detect the luac compiler and set *conf.env.LUAC* """ conf.find_program('luac', var='LUAC')
851
Python
.py
30
26.433333
81
0.702337
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,742
xlc.py
projecthamster_hamster/waflib/Tools/xlc.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_xlc(conf): """ Detects the Aix C compiler """ cc = conf.find_program(['xlc_r', 'xlc'], var='CC') conf.get_xlc_version(cc) conf.env.CC_NAME = 'xlc' @conf def xlc_common_flags(conf): """ Flags required for executing the Aix C compiler """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o'] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = [] v.CCLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.LINKFLAGS_cprogram = ['-Wl,-brtl'] v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-G', '-Wl,-brtl,-bexpfull'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = [] v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_xlc() conf.find_ar() conf.xlc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
1,441
Python
.py
52
25.692308
62
0.579942
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,743
cs.py
projecthamster_hamster/waflib/Tools/cs.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ C# support. A simple example:: def configure(conf): conf.load('cs') def build(bld): bld(features='cs', source='main.cs', gen='foo') Note that the configuration may compile C# snippets:: FRAG = ''' namespace Moo { public class Test { public static int Main(string[] args) { return 0; } } }''' def configure(conf): conf.check(features='cs', fragment=FRAG, compile_filename='test.cs', gen='test.exe', bintype='exe', csflags=['-pkg:gtk-sharp-2.0'], msg='Checking for Gtksharp support') """ from waflib import Utils, Task, Options, Errors from waflib.TaskGen import before_method, after_method, feature from waflib.Tools import ccroot from waflib.Configure import conf ccroot.USELIB_VARS['cs'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES']) ccroot.lib_patterns['csshlib'] = ['%s'] @feature('cs') @before_method('process_source') def apply_cs(self): """ Create a C# task bound to the attribute *cs_task*. There can be only one C# task by task generator. """ cs_nodes = [] no_nodes = [] for x in self.to_nodes(self.source): if x.name.endswith('.cs'): cs_nodes.append(x) else: no_nodes.append(x) self.source = no_nodes bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe') self.cs_task = tsk = self.create_task('mcs', cs_nodes, self.path.find_or_declare(self.gen)) tsk.env.CSTYPE = '/target:%s' % bintype tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath() self.env.append_value('CSFLAGS', '/platform:%s' % getattr(self, 'platform', 'anycpu')) inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}') if inst_to: # note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644) self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod) @feature('cs') @after_method('apply_cs') def use_cs(self): """ C# applications honor the **use** keyword:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', name='mylib') bld(features='cs', source='Hi.cs', includes='.', bintype='exe', gen='hi.exe', use='mylib', name='hi') """ names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: y = get(x) except Errors.WafError: self.env.append_value('CSFLAGS', '/reference:%s' % x) continue y.post() tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None) if not tsk: self.bld.fatal('cs task has no link task for use %r' % self) self.cs_task.dep_nodes.extend(tsk.outputs) # dependency self.cs_task.set_run_after(tsk) # order (redundant, the order is inferred from the nodes inputs/outputs) self.env.append_value('CSFLAGS', '/reference:%s' % tsk.outputs[0].abspath()) @feature('cs') @after_method('apply_cs', 'use_cs') def debug_cs(self): """ The C# targets may create .mdb or .pdb files:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdebug='full') # csdebug is a value in (True, 'full', 'pdbonly') """ csdebug = getattr(self, 'csdebug', self.env.CSDEBUG) if not csdebug: return node = self.cs_task.outputs[0] if self.env.CS_NAME == 'mono': out = node.parent.find_or_declare(node.name + '.mdb') else: out = node.change_ext('.pdb') self.cs_task.outputs.append(out) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=out) if csdebug == 'pdbonly': val = ['/debug+', '/debug:pdbonly'] elif csdebug == 'full': val = ['/debug+', '/debug:full'] else: val = ['/debug-'] self.env.append_value('CSFLAGS', val) @feature('cs') @after_method('debug_cs') def doc_cs(self): """ The C# targets may create .xml documentation files:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdoc=True) # csdoc is a boolean value """ csdoc = getattr(self, 'csdoc', self.env.CSDOC) if not csdoc: return node = self.cs_task.outputs[0] out = node.change_ext('.xml') self.cs_task.outputs.append(out) if getattr(self, 'install_task', None): self.doc_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=out) self.env.append_value('CSFLAGS', '/doc:%s' % out.abspath()) class mcs(Task.Task): """ Compile C# files """ color = 'YELLOW' run_str = '${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}' def split_argfile(self, cmd): inline = [cmd[0]] infile = [] for x in cmd[1:]: # csc doesn't want /noconfig in @file if x.lower() == '/noconfig': inline.append(x) else: infile.append(self.quote_flag(x)) return (inline, infile) def configure(conf): """ Find a C# compiler, set the variable MCS for the compiler and CS_NAME (mono or csc) """ csc = getattr(Options.options, 'cscbinary', None) if csc: conf.env.MCS = csc conf.find_program(['csc', 'mcs', 'gmcs'], var='MCS') conf.env.ASS_ST = '/r:%s' conf.env.RES_ST = '/resource:%s' conf.env.CS_NAME = 'csc' if str(conf.env.MCS).lower().find('mcs') > -1: conf.env.CS_NAME = 'mono' def options(opt): """ Add a command-line option for the configuration:: $ waf configure --with-csc-binary=/foo/bar/mcs """ opt.add_option('--with-csc-binary', type='string', dest='cscbinary') class fake_csshlib(Task.Task): """ Task used for reading a foreign .net assembly and adding the dependency on it """ color = 'YELLOW' inst_to = None def runnable_status(self): return Task.SKIP_ME @conf def read_csshlib(self, name, paths=[]): """ Read a foreign .net assembly for the *use* system:: def build(bld): bld.read_csshlib('ManagedLibrary.dll', paths=[bld.env.mylibrarypath]) bld(features='cs', source='Hi.cs', bintype='exe', gen='hi.exe', use='ManagedLibrary.dll') :param name: Name of the library :type name: string :param paths: Folders in which the library may be found :type paths: list of string :return: A task generator having the feature *fake_lib* which will call :py:func:`waflib.Tools.ccroot.process_lib` :rtype: :py:class:`waflib.TaskGen.task_gen` """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib')
6,397
Python
.py
177
33.649718
115
0.686065
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,744
d_config.py
projecthamster_hamster/waflib/Tools/d_config.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) from waflib import Utils from waflib.Configure import conf @conf def d_platform_flags(self): """ Sets the extensions dll/so for d programs and libraries """ v = self.env if not v.DEST_OS: v.DEST_OS = Utils.unversioned_sys_platform() binfmt = Utils.destos_to_binfmt(self.env.DEST_OS) if binfmt == 'pe': v.dprogram_PATTERN = '%s.exe' v.dshlib_PATTERN = 'lib%s.dll' v.dstlib_PATTERN = 'lib%s.a' elif binfmt == 'mac-o': v.dprogram_PATTERN = '%s' v.dshlib_PATTERN = 'lib%s.dylib' v.dstlib_PATTERN = 'lib%s.a' else: v.dprogram_PATTERN = '%s' v.dshlib_PATTERN = 'lib%s.so' v.dstlib_PATTERN = 'lib%s.a' DLIB = ''' version(D_Version2) { import std.stdio; int main() { writefln("phobos2"); return 0; } } else { version(Tango) { import tango.stdc.stdio; int main() { printf("tango"); return 0; } } else { import std.stdio; int main() { writefln("phobos1"); return 0; } } } ''' """Detection string for the D standard library""" @conf def check_dlibrary(self, execute=True): """ Detects the kind of standard library that comes with the compiler, and sets conf.env.DLIBRARY to tango, phobos1 or phobos2 """ ret = self.check_cc(features='d dprogram', fragment=DLIB, compile_filename='test.d', execute=execute, define_ret=True) if execute: self.env.DLIBRARY = ret.strip()
1,415
Python
.py
59
21.694915
119
0.687639
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,745
d_scan.py
projecthamster_hamster/waflib/Tools/d_scan.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) """ Provide a scanner for finding dependencies on d files """ import re from waflib import Utils def filter_comments(filename): """ :param filename: d file name :type filename: string :rtype: list :return: a list of characters """ txt = Utils.readf(filename) i = 0 buf = [] max = len(txt) begin = 0 while i < max: c = txt[i] if c == '"' or c == "'": # skip a string or character literal buf.append(txt[begin:i]) delim = c i += 1 while i < max: c = txt[i] if c == delim: break elif c == '\\': # skip the character following backslash i += 1 i += 1 i += 1 begin = i elif c == '/': # try to replace a comment with whitespace buf.append(txt[begin:i]) i += 1 if i == max: break c = txt[i] if c == '+': # eat nesting /+ +/ comment i += 1 nesting = 1 c = None while i < max: prev = c c = txt[i] if prev == '/' and c == '+': nesting += 1 c = None elif prev == '+' and c == '/': nesting -= 1 if nesting == 0: break c = None i += 1 elif c == '*': # eat /* */ comment i += 1 c = None while i < max: prev = c c = txt[i] if prev == '*' and c == '/': break i += 1 elif c == '/': # eat // comment i += 1 while i < max and txt[i] != '\n': i += 1 else: # no comment begin = i - 1 continue i += 1 begin = i buf.append(' ') else: i += 1 buf.append(txt[begin:]) return buf class d_parser(object): """ Parser for d files """ def __init__(self, env, incpaths): #self.code = '' #self.module = '' #self.imports = [] self.allnames = [] self.re_module = re.compile(r"module\s+([^;]+)") self.re_import = re.compile(r"import\s+([^;]+)") self.re_import_bindings = re.compile("([^:]+):(.*)") self.re_import_alias = re.compile("[^=]+=(.+)") self.env = env self.nodes = [] self.names = [] self.incpaths = incpaths def tryfind(self, filename): """ Search file a file matching an module/import directive :param filename: file to read :type filename: string """ found = 0 for n in self.incpaths: found = n.find_resource(filename.replace('.', '/') + '.d') if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename) def get_strings(self, code): """ :param code: d code to parse :type code: string :return: the modules that the code uses :rtype: a list of match objects """ #self.imports = [] self.module = '' lst = [] # get the module name (if present) mod_name = self.re_module.search(code) if mod_name: self.module = re.sub(r'\s+', '', mod_name.group(1)) # strip all whitespaces # go through the code, have a look at all import occurrences # first, lets look at anything beginning with "import" and ending with ";" import_iterator = self.re_import.finditer(code) if import_iterator: for import_match in import_iterator: import_match_str = re.sub(r'\s+', '', import_match.group(1)) # strip all whitespaces # does this end with an import bindings declaration? # (import bindings always terminate the list of imports) bindings_match = self.re_import_bindings.match(import_match_str) if bindings_match: import_match_str = bindings_match.group(1) # if so, extract the part before the ":" (since the module declaration(s) is/are located there) # split the matching string into a bunch of strings, separated by a comma matches = import_match_str.split(',') for match in matches: alias_match = self.re_import_alias.match(match) if alias_match: # is this an alias declaration? (alias = module name) if so, extract the module name match = alias_match.group(1) lst.append(match) return lst def start(self, node): """ The parsing starts here :param node: input file :type node: :py:class:`waflib.Node.Node` """ self.waiting = [node] # while the stack is not empty, add the dependencies while self.waiting: nd = self.waiting.pop(0) self.iter(nd) def iter(self, node): """ Find all the modules that a file depends on, uses :py:meth:`waflib.Tools.d_scan.d_parser.tryfind` to process dependent files :param node: input file :type node: :py:class:`waflib.Node.Node` """ path = node.abspath() # obtain the absolute path code = "".join(filter_comments(path)) # read the file and filter the comments names = self.get_strings(code) # obtain the import strings for x in names: # optimization if x in self.allnames: continue self.allnames.append(x) # for each name, see if it is like a node or not self.tryfind(x) def scan(self): "look for .d/.di used by a d file" env = self.env gruik = d_parser(env, self.generator.includes_nodes) node = self.inputs[0] gruik.start(node) nodes = gruik.nodes names = gruik.names return (nodes, names)
5,056
Python
.py
184
23.657609
126
0.633437
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,746
winres.py
projecthamster_hamster/waflib/Tools/winres.py
#!/usr/bin/env python # encoding: utf-8 # Brant Young, 2007 "Process *.rc* files for C/C++: X{.rc -> [.res|.rc.o]}" import os import re from waflib import Task from waflib.TaskGen import extension from waflib.Tools import c_preproc from waflib import Utils @extension('.rc') def rc_file(self, node): """ Binds the .rc extension to a winrc task """ obj_ext = '.rc.o' if self.env.WINRC_TGT_F == '/fo': obj_ext = '.res' rctask = self.create_task('winrc', node, node.change_ext(obj_ext)) try: self.compiled_tasks.append(rctask) except AttributeError: self.compiled_tasks = [rctask] re_lines = re.compile( r'(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|'\ r'(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)', re.IGNORECASE | re.MULTILINE) class rc_parser(c_preproc.c_parser): """ Calculates dependencies in .rc files """ def filter_comments(self, node): """ Overrides :py:meth:`waflib.Tools.c_preproc.c_parser.filter_comments` """ code = node.read() if c_preproc.use_trigraphs: for (a, b) in c_preproc.trig_def: code = code.split(a).join(b) code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) ret = [] for m in re.finditer(re_lines, code): if m.group(2): ret.append((m.group(2), m.group(3))) else: ret.append(('include', m.group(5))) return ret class winrc(Task.Task): """ Compiles resource files """ run_str = '${WINRC} ${WINRCFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}' color = 'BLUE' def scan(self): tmp = rc_parser(self.generator.includes_nodes) tmp.start(self.inputs[0], self.env) return (tmp.nodes, tmp.names) def exec_command(self, cmd, **kw): if self.env.WINRC_TGT_F == '/fo': # Since winres include paths may contain spaces, they do not fit in # response files and are best passed as environment variables replace_cmd = [] incpaths = [] while cmd: # filter include path flags flag = cmd.pop(0) if flag.upper().startswith('/I'): if len(flag) == 2: incpaths.append(cmd.pop(0)) else: incpaths.append(flag[2:]) else: replace_cmd.append(flag) cmd = replace_cmd if incpaths: # append to existing environment variables in INCLUDE env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) pre_includes = env.get('INCLUDE', '') env['INCLUDE'] = pre_includes + os.pathsep + os.pathsep.join(incpaths) return super(winrc, self).exec_command(cmd, **kw) def quote_flag(self, flag): if self.env.WINRC_TGT_F == '/fo': # winres does not support quotes around flags in response files return flag return super(winrc, self).quote_flag(flag) def configure(conf): """ Detects the programs RC or windres, depending on the C/C++ compiler in use """ v = conf.env if not v.WINRC: if v.CC_NAME == 'msvc': conf.find_program('RC', var='WINRC', path_list=v.PATH) v.WINRC_TGT_F = '/fo' v.WINRC_SRC_F = '' else: conf.find_program('windres', var='WINRC', path_list=v.PATH) v.WINRC_TGT_F = '-o' v.WINRC_SRC_F = '-i'
3,205
Python
.py
100
28.95
124
0.665912
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,747
perl.py
projecthamster_hamster/waflib/Tools/perl.py
#!/usr/bin/env python # encoding: utf-8 # andersg at 0x63.nu 2007 # Thomas Nagy 2016-2018 (ita) """ Support for Perl extensions. A C/C++ compiler is required:: def options(opt): opt.load('compiler_c perl') def configure(conf): conf.load('compiler_c perl') conf.check_perl_version((5,6,0)) conf.check_perl_ext_devel() conf.check_perl_module('Cairo') conf.check_perl_module('Devel::PPPort 4.89') def build(bld): bld( features = 'c cshlib perlext', source = 'Mytest.xs', target = 'Mytest', install_path = '${ARCHDIR_PERL}/auto') bld.install_files('${ARCHDIR_PERL}', 'Mytest.pm') """ import os from waflib import Task, Options, Utils, Errors from waflib.Configure import conf from waflib.TaskGen import extension, feature, before_method @before_method('apply_incpaths', 'apply_link', 'propagate_uselib_vars') @feature('perlext') def init_perlext(self): """ Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the *lib* prefix from library names. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PERLEXT' in self.uselib: self.uselib.append('PERLEXT') self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.perlext_PATTERN @extension('.xs') def xsubpp_file(self, node): """ Create :py:class:`waflib.Tools.perl.xsubpp` tasks to process *.xs* files """ outnode = node.change_ext('.c') self.create_task('xsubpp', node, outnode) self.source.append(outnode) class xsubpp(Task.Task): """ Process *.xs* files """ run_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}' color = 'BLUE' ext_out = ['.h'] @conf def check_perl_version(self, minver=None): """ Check if Perl is installed, and set the variable PERL. minver is supposed to be a tuple """ res = True if minver: cver = '.'.join(map(str,minver)) else: cver = '' self.start_msg('Checking for minimum perl version %s' % cver) perl = self.find_program('perl', var='PERL', value=getattr(Options.options, 'perlbinary', None)) version = self.cmd_and_log(perl + ["-e", 'printf \"%vd\", $^V']) if not version: res = False version = "Unknown" elif not minver is None: ver = tuple(map(int, version.split("."))) if ver < minver: res = False self.end_msg(version, color=res and 'GREEN' or 'YELLOW') return res @conf def check_perl_module(self, module): """ Check if specified perlmodule is installed. The minimum version can be specified by specifying it after modulename like this:: def configure(conf): conf.check_perl_module("Some::Module 2.92") """ cmd = self.env.PERL + ['-e', 'use %s' % module] self.start_msg('perl module %s' % module) try: r = self.cmd_and_log(cmd) except Errors.WafError: self.end_msg(False) return None self.end_msg(r or True) return r @conf def check_perl_ext_devel(self): """ Check for configuration needed to build perl extensions. Sets different xxx_PERLEXT variables in the environment. Also sets the ARCHDIR_PERL variable useful as installation path, which can be overridden by ``--with-perl-archdir`` option. """ env = self.env perl = env.PERL if not perl: self.fatal('find perl first') def cmd_perl_config(s): return perl + ['-MConfig', '-e', 'print \"%s\"' % s] def cfg_str(cfg): return self.cmd_and_log(cmd_perl_config(cfg)) def cfg_lst(cfg): return Utils.to_list(cfg_str(cfg)) def find_xsubpp(): for var in ('privlib', 'vendorlib'): xsubpp = cfg_lst('$Config{%s}/ExtUtils/xsubpp$Config{exe_ext}' % var) if xsubpp and os.path.isfile(xsubpp[0]): return xsubpp return self.find_program('xsubpp') env.LINKFLAGS_PERLEXT = cfg_lst('$Config{lddlflags}') env.INCLUDES_PERLEXT = cfg_lst('$Config{archlib}/CORE') env.CFLAGS_PERLEXT = cfg_lst('$Config{ccflags} $Config{cccdlflags}') env.EXTUTILS_TYPEMAP = cfg_lst('$Config{privlib}/ExtUtils/typemap') env.XSUBPP = find_xsubpp() if not getattr(Options.options, 'perlarchdir', None): env.ARCHDIR_PERL = cfg_str('$Config{sitearch}') else: env.ARCHDIR_PERL = getattr(Options.options, 'perlarchdir') env.perlext_PATTERN = '%s.' + cfg_str('$Config{dlext}') def options(opt): """ Add the ``--with-perl-archdir`` and ``--with-perl-binary`` command-line options. """ opt.add_option('--with-perl-binary', type='string', dest='perlbinary', help = 'Specify alternate perl binary', default=None) opt.add_option('--with-perl-archdir', type='string', dest='perlarchdir', help = 'Specify directory where to install arch specific files', default=None)
4,517
Python
.py
133
31.62406
152
0.70626
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,748
irixcc.py
projecthamster_hamster/waflib/Tools/irixcc.py
#! /usr/bin/env python # encoding: utf-8 # imported from samba """ Compiler definition for irix/MIPSpro cc compiler """ from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_irixcc(conf): v = conf.env cc = conf.find_program('cc', var='CC') try: conf.cmd_and_log(cc + ['-version']) except Errors.WafError: conf.fatal('%r -version could not be executed' % cc) v.CC_NAME = 'irix' @conf def irixcc_common_flags(conf): v = conf.env v.CC_SRC_F = '' v.CC_TGT_F = ['-c', '-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o'] v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.cprogram_PATTERN = '%s' v.cshlib_PATTERN = 'lib%s.so' v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_irixcc() conf.find_ar() conf.irixcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
1,171
Python
.py
43
25.232558
62
0.611459
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,749
ifort.py
projecthamster_hamster/waflib/Tools/ifort.py
#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import os, re, traceback from waflib import Utils, Logs, Errors from waflib.Tools import fc, fc_config, fc_scan, ar, ccroot from waflib.Configure import conf from waflib.TaskGen import after_method, feature @conf def find_ifort(conf): fc = conf.find_program('ifort', var='FC') conf.get_ifort_version(fc) conf.env.FC_NAME = 'IFORT' @conf def ifort_modifier_win32(self): v = self.env v.IFORT_WIN32 = True v.FCSTLIB_MARKER = '' v.FCSHLIB_MARKER = '' v.FCLIB_ST = v.FCSTLIB_ST = '%s.lib' v.FCLIBPATH_ST = v.STLIBPATH_ST = '/LIBPATH:%s' v.FCINCPATH_ST = '/I%s' v.FCDEFINES_ST = '/D%s' v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' v.fcshlib_PATTERN = '%s.dll' v.fcstlib_PATTERN = v.implib_PATTERN = '%s.lib' v.FCLNK_TGT_F = '/out:' v.FC_TGT_F = ['/c', '/o', ''] v.FCFLAGS_fcshlib = '' v.LINKFLAGS_fcshlib = '/DLL' v.AR_TGT_F = '/out:' v.IMPLIB_ST = '/IMPLIB:%s' v.append_value('LINKFLAGS', '/subsystem:console') if v.IFORT_MANIFEST: v.append_value('LINKFLAGS', ['/MANIFEST']) @conf def ifort_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def ifort_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() ifort_modifier_func = getattr(conf, 'ifort_modifier_' + dest_os, None) if ifort_modifier_func: ifort_modifier_func() @conf def get_ifort_version(conf, fc): """ Detects the compiler version and sets ``conf.env.FC_VERSION`` """ version_re = re.compile(r"\bIntel\b.*\bVersion\s*(?P<major>\d*)\.(?P<minor>\d*)",re.I).search if Utils.is_win32: cmd = fc else: cmd = fc + ['-logo'] out, err = fc_config.getoutput(conf, cmd, stdin=False) match = version_re(out) or version_re(err) if not match: conf.fatal('cannot determine ifort version.') k = match.groupdict() conf.env.FC_VERSION = (k['major'], k['minor']) def configure(conf): """ Detects the Intel Fortran compilers """ if Utils.is_win32: compiler, version, path, includes, libdirs, arch = conf.detect_ifort() v = conf.env v.DEST_CPU = arch v.PATH = path v.INCLUDES = includes v.LIBPATH = libdirs v.MSVC_COMPILER = compiler try: v.MSVC_VERSION = float(version) except ValueError: v.MSVC_VERSION = float(version[:-3]) conf.find_ifort_win32() conf.ifort_modifier_win32() else: conf.find_ifort() conf.find_program('xiar', var='AR') conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.ifort_modifier_platform() all_ifort_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] """List of icl platforms""" @conf def gather_ifort_versions(conf, versions): """ List compiler versions by looking up registry keys """ version_pattern = re.compile(r'^...?.?\....?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\Fortran') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\Fortran') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_ifort_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version) path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','ifortvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) for target,arch in all_ifort_platforms: try: icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: continue else: batch_file=os.path.join(path,'bin','ifortvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) major = version[0:2] versions['intel ' + major] = targets @conf def setup_ifort(conf, versiondict): """ Checks installed compilers and targets and returns the first combination from the user's options, env, or the global supported lists that checks. :param versiondict: dict(platform -> dict(architecture -> configuration)) :type versiondict: dict(string -> dict(string -> target_compiler) :return: the compiler, revision, path, include dirs, library paths and target architecture :rtype: tuple of strings """ platforms = Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_ifort_platforms] desired_versions = conf.env.MSVC_VERSIONS or list(reversed(list(versiondict.keys()))) for version in desired_versions: try: targets = versiondict[version] except KeyError: continue for arch in platforms: try: cfg = targets[arch] except KeyError: continue cfg.evaluate() if cfg.is_valid: compiler,revision = version.rsplit(' ', 1) return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu conf.fatal('ifort: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) @conf def get_ifort_version_win32(conf, compiler, version, target, vcvars): # FIXME hack try: conf.msvc_cnt += 1 except AttributeError: conf.msvc_cnt = 1 batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) batfile.write("""@echo off set INCLUDE= set LIB= call "%s" %s echo PATH=%%PATH%% echo INCLUDE=%%INCLUDE%% echo LIB=%%LIB%%;%%LIBPATH%% """ % (vcvars,target)) sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()]) batfile.delete() lines = sout.splitlines() if not lines[0]: lines.pop(0) MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None for line in lines: if line.startswith('PATH='): path = line[5:] MSVC_PATH = path.split(';') elif line.startswith('INCLUDE='): MSVC_INCDIR = [i for i in line[8:].split(';') if i] elif line.startswith('LIB='): MSVC_LIBDIR = [i for i in line[4:].split(';') if i] if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): conf.fatal('ifort: Could not find a valid architecture for building (get_ifort_version_win32)') # Check if the compiler is usable at all. # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. env = dict(os.environ) env.update(PATH = path) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) fc = conf.find_program(compiler_name, path_list=MSVC_PATH) # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. if 'CL' in env: del(env['CL']) try: conf.cmd_and_log(fc + ['/help'], env=env) except UnicodeError: st = traceback.format_exc() if conf.logger: conf.logger.error(st) conf.fatal('ifort: Unicode error - check the code page?') except Exception as e: Logs.debug('ifort: get_ifort_version: %r %r %r -> failure %s', compiler, version, target, str(e)) conf.fatal('ifort: cannot run the compiler in get_ifort_version (run with -v to display errors)') else: Logs.debug('ifort: get_ifort_version: %r %r %r -> OK', compiler, version, target) finally: conf.env[compiler_name] = '' return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) class target_compiler(object): """ Wraps a compiler configuration; call evaluate() to determine whether the configuration is usable. """ def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): """ :param ctx: configuration context to use to eventually get the version environment :param compiler: compiler name :param cpu: target cpu :param version: compiler version number :param bat_target: ? :param bat: path to the batch file to run :param callback: optional function to take the realized environment variables tup and map it (e.g. to combine other constant paths) """ self.conf = ctx self.name = None self.is_valid = False self.is_done = False self.compiler = compiler self.cpu = cpu self.version = version self.bat_target = bat_target self.bat = bat self.callback = callback def evaluate(self): if self.is_done: return self.is_done = True try: vs = self.conf.get_ifort_version_win32(self.compiler, self.version, self.bat_target, self.bat) except Errors.ConfigurationError: self.is_valid = False return if self.callback: vs = self.callback(self, vs) self.is_valid = True (self.bindirs, self.incdirs, self.libdirs) = vs def __str__(self): return str((self.bindirs, self.incdirs, self.libdirs)) def __repr__(self): return repr((self.bindirs, self.incdirs, self.libdirs)) @conf def detect_ifort(self): return self.setup_ifort(self.get_ifort_versions(False)) @conf def get_ifort_versions(self, eval_and_save=True): """ :return: platforms to compiler configurations :rtype: dict """ dct = {} self.gather_ifort_versions(dct) return dct def _get_prog_names(self, compiler): if compiler=='intel': compiler_name = 'ifort' linker_name = 'XILINK' lib_name = 'XILIB' else: # assumes CL.exe compiler_name = 'CL' linker_name = 'LINK' lib_name = 'LIB' return compiler_name, linker_name, lib_name @conf def find_ifort_win32(conf): # the autodetection is supposed to be performed before entering in this method v = conf.env path = v.PATH compiler = v.MSVC_COMPILER version = v.MSVC_VERSION compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) v.IFORT_MANIFEST = (compiler == 'intel' and version >= 11) # compiler fc = conf.find_program(compiler_name, var='FC', path_list=path) # before setting anything, check if the compiler is really intel fortran env = dict(conf.environ) if path: env.update(PATH = ';'.join(path)) if not conf.cmd_and_log(fc + ['/nologo', '/help'], env=env): conf.fatal('not intel fortran compiler could not be identified') v.FC_NAME = 'IFORT' if not v.LINK_FC: conf.find_program(linker_name, var='LINK_FC', path_list=path, mandatory=True) if not v.AR: conf.find_program(lib_name, path_list=path, var='AR', mandatory=True) v.ARFLAGS = ['/nologo'] # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later if v.IFORT_MANIFEST: conf.find_program('MT', path_list=path, var='MT') v.MTFLAGS = ['/nologo'] try: conf.load('winres') except Errors.WafError: Logs.warn('Resource compiler not found. Compiling resource file is disabled') ####################################################################################################### ##### conf above, build below @after_method('apply_link') @feature('fc') def apply_flags_ifort(self): """ Adds additional flags implied by msvc, such as subsystems and pdb files:: def build(bld): bld.stlib(source='main.c', target='bar', subsystem='gruik') """ if not self.env.IFORT_WIN32 or not getattr(self, 'link_task', None): return is_static = isinstance(self.link_task, ccroot.stlink_task) subsystem = getattr(self, 'subsystem', '') if subsystem: subsystem = '/subsystem:%s' % subsystem flags = is_static and 'ARFLAGS' or 'LINKFLAGS' self.env.append_value(flags, subsystem) if not is_static: for f in self.env.LINKFLAGS: d = f.lower() if d[1:] == 'debug': pdbnode = self.link_task.outputs[0].change_ext('.pdb') self.link_task.outputs.append(pdbnode) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files(install_to=self.install_task.install_to, install_from=pdbnode) break @feature('fcprogram', 'fcshlib', 'fcprogram_test') @after_method('apply_link') def apply_manifest_ifort(self): """ Enables manifest embedding in Fortran DLLs when using ifort on Windows See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx """ if self.env.IFORT_WIN32 and getattr(self, 'link_task', None): # it seems ifort.exe cannot be called for linking self.link_task.env.FC = self.env.LINK_FC if self.env.IFORT_WIN32 and self.env.IFORT_MANIFEST and getattr(self, 'link_task', None): out_node = self.link_task.outputs[0] man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') self.link_task.outputs.append(man_node) self.env.DO_MANIFEST = True
12,473
Python
.py
359
32.033426
133
0.709121
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,750
flex.py
projecthamster_hamster/waflib/Tools/flex.py
#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy, 2006-2018 (ita) """ The **flex** program is a code generator which creates C or C++ files. The generated files are compiled into object files. """ import os, re from waflib import Task, TaskGen from waflib.Tools import ccroot def decide_ext(self, node): if 'cxx' in self.features: return ['.lex.cc'] return ['.lex.c'] def flexfun(tsk): env = tsk.env bld = tsk.generator.bld wd = bld.variant_dir def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = lst = [] lst.extend(to_list(env.FLEX)) lst.extend(to_list(env.FLEXFLAGS)) inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs] if env.FLEX_MSYS: inputs = [x.replace(os.sep, '/') for x in inputs] lst.extend(inputs) lst = [x for x in lst if x] txt = bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207 TaskGen.declare_chain( name = 'flex', rule = flexfun, # issue #854 ext_in = '.l', decider = decide_ext, ) # To support the following: # bld(features='c', flexflags='-P/foo') Task.classes['flex'].vars = ['FLEXFLAGS', 'FLEX'] ccroot.USELIB_VARS['c'].add('FLEXFLAGS') ccroot.USELIB_VARS['cxx'].add('FLEXFLAGS') def configure(conf): """ Detect the *flex* program """ conf.find_program('flex', var='FLEX') conf.env.FLEXFLAGS = ['-t'] if re.search (r"\\msys\\[0-9.]+\\bin\\flex.exe$", conf.env.FLEX[0]): # this is the flex shipped with MSYS conf.env.FLEX_MSYS = True
1,553
Python
.py
53
27.377358
82
0.682093
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,751
tex.py
projecthamster_hamster/waflib/Tools/tex.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ TeX/LaTeX/PDFLaTeX/XeLaTeX support Example:: def configure(conf): conf.load('tex') if not conf.env.LATEX: conf.fatal('The program LaTex is required') def build(bld): bld( features = 'tex', type = 'latex', # pdflatex or xelatex source = 'document.ltx', # mandatory, the source outs = 'ps', # 'pdf' or 'ps pdf' deps = 'crossreferencing.lst', # to give dependencies directly prompt = 1, # 0 for the batch mode ) Notes: - To configure with a special program, use:: $ PDFLATEX=luatex waf configure - This tool does not use the target attribute of the task generator (``bld(target=...)``); the target file name is built from the source base name and the output type(s) """ import os, re from waflib import Utils, Task, Errors, Logs, Node from waflib.TaskGen import feature, before_method re_bibunit = re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M) def bibunitscan(self): """ Parses TeX inputs and try to find the *bibunit* file dependencies :return: list of bibunit files :rtype: list of :py:class:`waflib.Node.Node` """ node = self.inputs[0] nodes = [] if not node: return nodes code = node.read() for match in re_bibunit.finditer(code): path = match.group('file') if path: found = None for k in ('', '.bib'): # add another loop for the tex include paths? Logs.debug('tex: trying %s%s', path, k) fi = node.parent.find_resource(path + k) if fi: found = True nodes.append(fi) # no break if not found: Logs.debug('tex: could not find %s', path) Logs.debug('tex: found the following bibunit files: %s', nodes) return nodes exts_deps_tex = ['', '.ltx', '.tex', '.bib', '.pdf', '.png', '.eps', '.ps', '.sty'] """List of typical file extensions included in latex files""" exts_tex = ['.ltx', '.tex'] """List of typical file extensions that contain latex""" re_tex = re.compile(r'\\(?P<type>usepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M) """Regexp for expressions that may include latex files""" g_bibtex_re = re.compile('bibdata', re.M) """Regexp for bibtex files""" g_glossaries_re = re.compile('\\@newglossary', re.M) """Regexp for expressions that create glossaries""" class tex(Task.Task): """ Compiles a tex/latex file. .. inheritance-diagram:: waflib.Tools.tex.latex waflib.Tools.tex.xelatex waflib.Tools.tex.pdflatex :top-classes: waflib.Tools.tex.tex """ bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False) bibtex_fun.__doc__ = """ Execute the program **bibtex** """ makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False) makeindex_fun.__doc__ = """ Execute the program **makeindex** """ makeglossaries_fun, _ = Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}', shell=False) makeglossaries_fun.__doc__ = """ Execute the program **makeglossaries** """ def exec_command(self, cmd, **kw): """ Executes TeX commands without buffering (latex may prompt for inputs) :return: the return code :rtype: int """ if self.env.PROMPT_LATEX: # capture the outputs in configuration tests kw['stdout'] = kw['stderr'] = None return super(tex, self).exec_command(cmd, **kw) def scan_aux(self, node): """ Recursive regex-based scanner that finds included auxiliary files. """ nodes = [node] re_aux = re.compile(r'\\@input{(?P<file>[^{}]*)}', re.M) def parse_node(node): code = node.read() for match in re_aux.finditer(code): path = match.group('file') found = node.parent.find_or_declare(path) if found and found not in nodes: Logs.debug('tex: found aux node %r', found) nodes.append(found) parse_node(found) parse_node(node) return nodes def scan(self): """ Recursive regex-based scanner that finds latex dependencies. It uses :py:attr:`waflib.Tools.tex.re_tex` Depending on your needs you might want: * to change re_tex:: from waflib.Tools import tex tex.re_tex = myregex * or to change the method scan from the latex tasks:: from waflib.Task import classes classes['latex'].scan = myscanfunction """ node = self.inputs[0] nodes = [] names = [] seen = [] if not node: return (nodes, names) def parse_node(node): if node in seen: return seen.append(node) code = node.read() for match in re_tex.finditer(code): multibib = match.group('type') if multibib and multibib.startswith('bibliography'): multibib = multibib[len('bibliography'):] if multibib.startswith('style'): continue else: multibib = None for path in match.group('file').split(','): if path: add_name = True found = None for k in exts_deps_tex: # issue 1067, scan in all texinputs folders for up in self.texinputs_nodes: Logs.debug('tex: trying %s%s', path, k) found = up.find_resource(path + k) if found: break for tsk in self.generator.tasks: if not found or found in tsk.outputs: break else: nodes.append(found) add_name = False for ext in exts_tex: if found.name.endswith(ext): parse_node(found) break # multibib stuff if found and multibib and found.name.endswith('.bib'): try: self.multibibs.append(found) except AttributeError: self.multibibs = [found] # no break, people are crazy if add_name: names.append(path) parse_node(node) for x in nodes: x.parent.get_bld().mkdir() Logs.debug("tex: found the following : %s and names %s", nodes, names) return (nodes, names) def check_status(self, msg, retcode): """ Checks an exit status and raise an error with a particular message :param msg: message to display if the code is non-zero :type msg: string :param retcode: condition :type retcode: boolean """ if retcode != 0: raise Errors.WafError('%r command exit status %r' % (msg, retcode)) def info(self, *k, **kw): try: info = self.generator.bld.conf.logger.info except AttributeError: info = Logs.info info(*k, **kw) def bibfile(self): """ Parses *.aux* files to find bibfiles to process. If present, execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun` """ for aux_node in self.aux_nodes: try: ct = aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r', aux_node.abspath()) continue if g_bibtex_re.findall(ct): self.info('calling bibtex') self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = aux_node.name[:-4] self.check_status('error when calling bibtex', self.bibtex_fun()) for node in getattr(self, 'multibibs', []): self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = node.name[:-4] self.check_status('error when calling bibtex', self.bibtex_fun()) def bibunits(self): """ Parses *.aux* file to find bibunit files. If there are bibunit files, runs :py:meth:`waflib.Tools.tex.tex.bibtex_fun`. """ try: bibunits = bibunitscan(self) except OSError: Logs.error('error bibunitscan') else: if bibunits: fn = ['bu' + str(i) for i in range(1, len(bibunits) + 1)] if fn: self.info('calling bibtex on bibunits') for f in fn: self.env.env = {'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()} self.env.SRCFILE = f self.check_status('error when calling bibtex', self.bibtex_fun()) def makeindex(self): """ Searches the filesystem for *.idx* files to process. If present, runs :py:meth:`waflib.Tools.tex.tex.makeindex_fun` """ self.idx_node = self.inputs[0].change_ext('.idx') try: idx_path = self.idx_node.abspath() os.stat(idx_path) except OSError: self.info('index file %s absent, not calling makeindex', idx_path) else: self.info('calling makeindex') self.env.SRCFILE = self.idx_node.name self.env.env = {} self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun()) def bibtopic(self): """ Lists additional .aux files from the bibtopic package """ p = self.inputs[0].parent.get_bld() if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')): self.aux_nodes += p.ant_glob('*[0-9].aux') def makeglossaries(self): """ Lists additional glossaries from .aux files. If present, runs the makeglossaries program. """ src_file = self.inputs[0].abspath() base_file = os.path.basename(src_file) base, _ = os.path.splitext(base_file) for aux_node in self.aux_nodes: try: ct = aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r', aux_node.abspath()) continue if g_glossaries_re.findall(ct): if not self.env.MAKEGLOSSARIES: raise Errors.WafError("The program 'makeglossaries' is missing!") Logs.warn('calling makeglossaries') self.env.SRCFILE = base self.check_status('error when calling makeglossaries %s' % base, self.makeglossaries_fun()) return def texinputs(self): """ Returns the list of texinput nodes as a string suitable for the TEXINPUTS environment variables :rtype: string """ return os.pathsep.join([k.abspath() for k in self.texinputs_nodes]) + os.pathsep def run(self): """ Runs the whole TeX build process Multiple passes are required depending on the usage of cross-references, bibliographies, glossaries, indexes and additional contents The appropriate TeX compiler is called until the *.aux* files stop changing. """ env = self.env if not env.PROMPT_LATEX: env.append_value('LATEXFLAGS', '-interaction=batchmode') env.append_value('PDFLATEXFLAGS', '-interaction=batchmode') env.append_value('XELATEXFLAGS', '-interaction=batchmode') # important, set the cwd for everybody self.cwd = self.inputs[0].parent.get_bld() self.info('first pass on %s', self.__class__.__name__) # Hash .aux files before even calling the LaTeX compiler cur_hash = self.hash_aux_nodes() self.call_latex() # Find the .aux files again since bibtex processing can require it self.hash_aux_nodes() self.bibtopic() self.bibfile() self.bibunits() self.makeindex() self.makeglossaries() for i in range(10): # There is no need to call latex again if the .aux hash value has not changed prev_hash = cur_hash cur_hash = self.hash_aux_nodes() if not cur_hash: Logs.error('No aux.h to process') if cur_hash and cur_hash == prev_hash: break # run the command self.info('calling %s', self.__class__.__name__) self.call_latex() def hash_aux_nodes(self): """ Returns a hash of the .aux file contents :rtype: string or bytes """ try: self.aux_nodes except AttributeError: try: self.aux_nodes = self.scan_aux(self.inputs[0].change_ext('.aux')) except IOError: return None return Utils.h_list([Utils.h_file(x.abspath()) for x in self.aux_nodes]) def call_latex(self): """ Runs the TeX compiler once """ self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'TEXINPUTS': self.texinputs()}) self.env.SRCFILE = self.inputs[0].abspath() self.check_status('error when calling latex', self.texfun()) class latex(tex): "Compiles LaTeX files" texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) class pdflatex(tex): "Compiles PdfLaTeX files" texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) class xelatex(tex): "XeLaTeX files" texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False) class dvips(Task.Task): "Converts dvi files to postscript" run_str = '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] class dvipdf(Task.Task): "Converts dvi files to pdf" run_str = '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] class pdf2ps(Task.Task): "Converts pdf files to postscript" run_str = '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] @feature('tex') @before_method('process_source') def apply_tex(self): """ Creates :py:class:`waflib.Tools.tex.tex` objects, and dvips/dvipdf/pdf2ps tasks if necessary (outs='ps', etc). """ if not getattr(self, 'type', None) in ('latex', 'pdflatex', 'xelatex'): self.type = 'pdflatex' outs = Utils.to_list(getattr(self, 'outs', [])) # prompt for incomplete files (else the batchmode is used) try: self.generator.bld.conf except AttributeError: default_prompt = False else: default_prompt = True self.env.PROMPT_LATEX = getattr(self, 'prompt', default_prompt) deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for dep in deps: if isinstance(dep, str): n = self.path.find_resource(dep) if not n: self.bld.fatal('Could not find %r for %r' % (dep, self)) if not n in deps_lst: deps_lst.append(n) elif isinstance(dep, Node.Node): deps_lst.append(dep) for node in self.to_nodes(self.source): if self.type == 'latex': task = self.create_task('latex', node, node.change_ext('.dvi')) elif self.type == 'pdflatex': task = self.create_task('pdflatex', node, node.change_ext('.pdf')) elif self.type == 'xelatex': task = self.create_task('xelatex', node, node.change_ext('.pdf')) task.env = self.env # add the manual dependencies if deps_lst: for n in deps_lst: if not n in task.dep_nodes: task.dep_nodes.append(n) # texinputs is a nasty beast if hasattr(self, 'texinputs_nodes'): task.texinputs_nodes = self.texinputs_nodes else: task.texinputs_nodes = [node.parent, node.parent.get_bld(), self.path, self.path.get_bld()] lst = os.environ.get('TEXINPUTS', '') if self.env.TEXINPUTS: lst += os.pathsep + self.env.TEXINPUTS if lst: lst = lst.split(os.pathsep) for x in lst: if x: if os.path.isabs(x): p = self.bld.root.find_node(x) if p: task.texinputs_nodes.append(p) else: Logs.error('Invalid TEXINPUTS folder %s', x) else: Logs.error('Cannot resolve relative paths in TEXINPUTS %s', x) if self.type == 'latex': if 'ps' in outs: tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps')) tsk.env.env = dict(os.environ) if 'pdf' in outs: tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf')) tsk.env.env = dict(os.environ) elif self.type == 'pdflatex': if 'ps' in outs: self.create_task('pdf2ps', task.outputs, node.change_ext('.ps')) self.source = [] def configure(self): """ Find the programs tex, latex and others without raising errors. """ v = self.env for p in 'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split(): try: self.find_program(p, var=p.upper()) except self.errors.ConfigurationError: pass v.DVIPSFLAGS = '-Ppdf'
15,360
Python
.py
451
30.228381
192
0.676836
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,752
ccroot.py
projecthamster_hamster/waflib/Tools/ccroot.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Classes and methods shared by tools providing support for C-like language such as C/C++/D/Assembly/Go (this support module is almost never used alone). """ import os, re from waflib import Task, Utils, Node, Errors, Logs from waflib.TaskGen import after_method, before_method, feature, taskgen_method, extension from waflib.Tools import c_aliases, c_preproc, c_config, c_osx, c_tests from waflib.Configure import conf SYSTEM_LIB_PATHS = ['/usr/lib64', '/usr/lib', '/usr/local/lib64', '/usr/local/lib'] USELIB_VARS = Utils.defaultdict(set) """ Mapping for features to :py:class:`waflib.ConfigSet.ConfigSet` variables. See :py:func:`waflib.Tools.ccroot.propagate_uselib_vars`. """ USELIB_VARS['c'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CCDEPS', 'CFLAGS', 'ARCH']) USELIB_VARS['cxx'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CXXDEPS', 'CXXFLAGS', 'ARCH']) USELIB_VARS['d'] = set(['INCLUDES', 'DFLAGS']) USELIB_VARS['includes'] = set(['INCLUDES', 'FRAMEWORKPATH', 'ARCH']) USELIB_VARS['cprogram'] = USELIB_VARS['cxxprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cshlib'] = USELIB_VARS['cxxshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cstlib'] = USELIB_VARS['cxxstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['dprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['asm'] = set(['ASFLAGS']) # ================================================================================================= @taskgen_method def create_compiled_task(self, name, node): """ Create the compilation task: c, cxx, asm, etc. The output node is created automatically (object file with a typical **.o** extension). The task is appended to the list *compiled_tasks* which is then used by :py:func:`waflib.Tools.ccroot.apply_link` :param name: name of the task class :type name: string :param node: the file to compile :type node: :py:class:`waflib.Node.Node` :return: The task created :rtype: :py:class:`waflib.Task.Task` """ out = '%s.%d.o' % (node.name, self.idx) task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @taskgen_method def to_incnodes(self, inlst): """ Task generator method provided to convert a list of string/nodes into a list of includes folders. The paths are assumed to be relative to the task generator path, except if they begin by **#** in which case they are searched from the top-level directory (``bld.srcnode``). The folders are simply assumed to be existing. The node objects in the list are returned in the output list. The strings are converted into node objects if possible. The node is searched from the source directory, and if a match is found, the equivalent build directory is created and added to the returned list too. When a folder cannot be found, it is ignored. :param inlst: list of folders :type inlst: space-delimited string or a list of string/nodes :rtype: list of :py:class:`waflib.Node.Node` :return: list of include folders as nodes """ lst = [] seen = set() for x in self.to_list(inlst): if x in seen or not x: continue seen.add(x) # with a real lot of targets, it is sometimes interesting to cache the results below if isinstance(x, Node.Node): lst.append(x) else: if os.path.isabs(x): lst.append(self.bld.root.make_node(x) or x) else: if x[0] == '#': p = self.bld.bldnode.make_node(x[1:]) v = self.bld.srcnode.make_node(x[1:]) else: p = self.path.get_bld().make_node(x) v = self.path.make_node(x) if p.is_child_of(self.bld.bldnode): p.mkdir() lst.append(p) lst.append(v) return lst @feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') @after_method('propagate_uselib_vars', 'process_source') def apply_incpaths(self): """ Task generator method that processes the attribute *includes*:: tg = bld(features='includes', includes='.') The folders only need to be relative to the current directory, the equivalent build directory is added automatically (for headers created in the build directory). This enables using a build directory or not (``top == out``). This method will add a list of nodes read by :py:func:`waflib.Tools.ccroot.to_incnodes` in ``tg.env.INCPATHS``, and the list of include paths in ``tg.env.INCLUDES``. """ lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env.INCLUDES) self.includes_nodes = lst cwd = self.get_cwd() self.env.INCPATHS = [x.path_from(cwd) for x in lst] class link_task(Task.Task): """ Base class for all link tasks. A task generator is supposed to have at most one link task bound in the attribute *link_task*. See :py:func:`waflib.Tools.ccroot.apply_link`. .. inheritance-diagram:: waflib.Tools.ccroot.stlink_task waflib.Tools.c.cprogram waflib.Tools.c.cshlib waflib.Tools.cxx.cxxstlib waflib.Tools.cxx.cxxprogram waflib.Tools.cxx.cxxshlib waflib.Tools.d.dprogram waflib.Tools.d.dshlib waflib.Tools.d.dstlib waflib.Tools.ccroot.fake_shlib waflib.Tools.ccroot.fake_stlib waflib.Tools.asm.asmprogram waflib.Tools.asm.asmshlib waflib.Tools.asm.asmstlib :top-classes: waflib.Tools.ccroot.link_task """ color = 'YELLOW' weight = 3 """Try to process link tasks as early as possible""" inst_to = None """Default installation path for the link task outputs, or None to disable""" chmod = Utils.O755 """Default installation mode for the link task outputs""" def add_target(self, target): """ Process the *target* attribute to add the platform-specific prefix/suffix such as *.so* or *.exe*. The settings are retrieved from ``env.clsname_PATTERN`` """ if isinstance(target, str): base = self.generator.path if target.startswith('#'): # for those who like flat structures target = target[1:] base = self.generator.bld.bldnode pattern = self.env[self.__class__.__name__ + '_PATTERN'] if not pattern: pattern = '%s' folder, name = os.path.split(target) if self.__class__.__name__.find('shlib') > 0 and getattr(self.generator, 'vnum', None): nums = self.generator.vnum.split('.') if self.env.DEST_BINFMT == 'pe': # include the version in the dll file name, # the import lib file name stays unversioned. name = name + '-' + nums[0] elif self.env.DEST_OS == 'openbsd': pattern = '%s.%s' % (pattern, nums[0]) if len(nums) >= 2: pattern += '.%s' % nums[1] if folder: tmp = folder + os.sep + pattern % name else: tmp = pattern % name target = base.find_or_declare(tmp) self.set_outputs(target) def exec_command(self, *k, **kw): ret = super(link_task, self).exec_command(*k, **kw) if not ret and self.env.DO_MANIFEST: ret = self.exec_mf() return ret def exec_mf(self): """ Create manifest files for VS-like compilers (msvc, ifort, ...) """ if not self.env.MT: return 0 manifest = None for out_node in self.outputs: if out_node.name.endswith('.manifest'): manifest = out_node.abspath() break else: # Should never get here. If we do, it means the manifest file was # never added to the outputs list, thus we don't have a manifest file # to embed, so we just return. return 0 # embedding mode. Different for EXE's and DLL's. # see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx mode = '' for x in Utils.to_list(self.generator.features): if x in ('cprogram', 'cxxprogram', 'fcprogram', 'fcprogram_test'): mode = 1 elif x in ('cshlib', 'cxxshlib', 'fcshlib'): mode = 2 Logs.debug('msvc: embedding manifest in mode %r', mode) lst = [] + self.env.MT lst.extend(Utils.to_list(self.env.MTFLAGS)) lst.extend(['-manifest', manifest]) lst.append('-outputresource:%s;%s' % (self.outputs[0].abspath(), mode)) return super(link_task, self).exec_command(lst) class stlink_task(link_task): """ Base for static link tasks, which use *ar* most of the time. The target is always removed before being written. """ run_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}' chmod = Utils.O644 """Default installation mode for the static libraries""" def rm_tgt(cls): old = cls.run def wrap(self): try: os.remove(self.outputs[0].abspath()) except OSError: pass return old(self) setattr(cls, 'run', wrap) rm_tgt(stlink_task) @feature('skip_stlib_link_deps') @before_method('process_use') def apply_skip_stlib_link_deps(self): """ This enables an optimization in the :py:func:wafilb.Tools.ccroot.processes_use: method that skips dependency and link flag optimizations for targets that generate static libraries (via the :py:class:Tools.ccroot.stlink_task task). The actual behavior is implemented in :py:func:wafilb.Tools.ccroot.processes_use: method so this feature only tells waf to enable the new behavior. """ self.env.SKIP_STLIB_LINK_DEPS = True @feature('c', 'cxx', 'd', 'fc', 'asm') @after_method('process_source') def apply_link(self): """ Collect the tasks stored in ``compiled_tasks`` (created by :py:func:`waflib.Tools.ccroot.create_compiled_task`), and use the outputs for a new instance of :py:class:`waflib.Tools.ccroot.link_task`. The class to use is the first link task matching a name from the attribute *features*, for example:: def build(bld): tg = bld(features='cxx cxxprogram cprogram', source='main.c', target='app') will create the task ``tg.link_task`` as a new instance of :py:class:`waflib.Tools.cxx.cxxprogram` """ for x in self.features: if x == 'cprogram' and 'cxx' in self.features: # limited compat x = 'cxxprogram' elif x == 'cshlib' and 'cxx' in self.features: x = 'cxxshlib' if x in Task.classes: if issubclass(Task.classes[x], link_task): link = x break else: return objs = [t.outputs[0] for t in getattr(self, 'compiled_tasks', [])] self.link_task = self.create_task(link, objs) self.link_task.add_target(self.target) # remember that the install paths are given by the task generators try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: # install a copy of the node list we have at this moment (implib not added) self.install_task = self.add_install_files( install_to=inst_to, install_from=self.link_task.outputs[:], chmod=self.link_task.chmod, task=self.link_task) @taskgen_method def use_rec(self, name, **kw): """ Processes the ``use`` keyword recursively. This method is kind of private and only meant to be used from ``process_use`` """ if name in self.tmp_use_not or name in self.tmp_use_seen: return try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) self.tmp_use_not.add(name) return self.tmp_use_seen.append(name) y.post() # bind temporary attributes on the task generator y.tmp_use_objects = objects = kw.get('objects', True) y.tmp_use_stlib = stlib = kw.get('stlib', True) try: link_task = y.link_task except AttributeError: y.tmp_use_var = '' else: objects = False if not isinstance(link_task, stlink_task): stlib = False y.tmp_use_var = 'LIB' else: y.tmp_use_var = 'STLIB' p = self.tmp_use_prec for x in self.to_list(getattr(y, 'use', [])): if self.env["STLIB_" + x]: continue try: p[x].append(name) except KeyError: p[x] = [name] self.use_rec(x, objects=objects, stlib=stlib) @feature('c', 'cxx', 'd', 'use', 'fc') @before_method('apply_incpaths', 'propagate_uselib_vars') @after_method('apply_link', 'process_source') def process_use(self): """ Process the ``use`` attribute which contains a list of task generator names:: def build(bld): bld.shlib(source='a.c', target='lib1') bld.program(source='main.c', target='app', use='lib1') See :py:func:`waflib.Tools.ccroot.use_rec`. """ use_not = self.tmp_use_not = set() self.tmp_use_seen = [] # we would like an ordered set use_prec = self.tmp_use_prec = {} self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'use', [])) for x in names: self.use_rec(x) for x in use_not: if x in use_prec: del use_prec[x] # topological sort out = self.tmp_use_sorted = [] tmp = [] for x in self.tmp_use_seen: for k in use_prec.values(): if x in k: break else: tmp.append(x) while tmp: e = tmp.pop() out.append(e) try: nlst = use_prec[e] except KeyError: pass else: del use_prec[e] for x in nlst: for y in use_prec: if x in use_prec[y]: break else: tmp.append(x) if use_prec: raise Errors.WafError('Cycle detected in the use processing %r' % use_prec) out.reverse() link_task = getattr(self, 'link_task', None) for x in out: y = self.bld.get_tgen_by_name(x) var = y.tmp_use_var if var and link_task: if self.env.SKIP_STLIB_LINK_DEPS and isinstance(link_task, stlink_task): # If the skip_stlib_link_deps feature is enabled then we should # avoid adding lib deps to the stlink_task instance. pass elif var == 'LIB' or y.tmp_use_stlib or x in names: self.env.append_value(var, [y.target[y.target.rfind(os.sep) + 1:]]) self.link_task.dep_nodes.extend(y.link_task.outputs) tmp_path = y.link_task.outputs[0].parent.path_from(self.get_cwd()) self.env.append_unique(var + 'PATH', [tmp_path]) else: if y.tmp_use_objects: self.add_objects_from_tgen(y) if getattr(y, 'export_includes', None): # self.includes may come from a global variable #2035 self.includes = self.includes + y.to_incnodes(y.export_includes) if getattr(y, 'export_defines', None): self.env.append_value('DEFINES', self.to_list(y.export_defines)) # and finally, add the use variables (no recursion needed) for x in names: try: y = self.bld.get_tgen_by_name(x) except Errors.WafError: if not self.env['STLIB_' + x] and not x in self.uselib: self.uselib.append(x) else: for k in self.to_list(getattr(y, 'use', [])): if not self.env['STLIB_' + k] and not k in self.uselib: self.uselib.append(k) @taskgen_method def accept_node_to_link(self, node): """ PRIVATE INTERNAL USE ONLY """ return not node.name.endswith('.pdb') @taskgen_method def add_objects_from_tgen(self, tg): """ Add the objects from the depending compiled tasks as link task inputs. Some objects are filtered: for instance, .pdb files are added to the compiled tasks but not to the link tasks (to avoid errors) PRIVATE INTERNAL USE ONLY """ try: link_task = self.link_task except AttributeError: pass else: for tsk in getattr(tg, 'compiled_tasks', []): for x in tsk.outputs: if self.accept_node_to_link(x): link_task.inputs.append(x) @taskgen_method def get_uselib_vars(self): """ :return: the *uselib* variables associated to the *features* attribute (see :py:attr:`waflib.Tools.ccroot.USELIB_VARS`) :rtype: list of string """ _vars = set() for x in self.features: if x in USELIB_VARS: _vars |= USELIB_VARS[x] return _vars @feature('c', 'cxx', 'd', 'fc', 'javac', 'cs', 'uselib', 'asm') @after_method('process_use') def propagate_uselib_vars(self): """ Process uselib variables for adding flags. For example, the following target:: def build(bld): bld.env.AFLAGS_aaa = ['bar'] from waflib.Tools.ccroot import USELIB_VARS USELIB_VARS['aaa'] = ['AFLAGS'] tg = bld(features='aaa', aflags='test') The *aflags* attribute will be processed and this method will set:: tg.env.AFLAGS = ['bar', 'test'] """ _vars = self.get_uselib_vars() env = self.env app = env.append_value feature_uselib = self.features + self.to_list(getattr(self, 'uselib', [])) for var in _vars: y = var.lower() val = getattr(self, y, []) if val: app(var, self.to_list(val)) for x in feature_uselib: val = env['%s_%s' % (var, x)] if val: app(var, val) # ============ the code above must not know anything about import libs ========== @feature('cshlib', 'cxxshlib', 'fcshlib') @after_method('apply_link') def apply_implib(self): """ Handle dlls and their import libs on Windows-like systems. A ``.dll.a`` file called *import library* is generated. It must be installed as it is required for linking the library. """ if not self.env.DEST_BINFMT == 'pe': return dll = self.link_task.outputs[0] if isinstance(self.target, Node.Node): name = self.target.name else: name = os.path.split(self.target)[1] implib = self.env.implib_PATTERN % name implib = dll.parent.find_or_declare(implib) self.env.append_value('LINKFLAGS', self.env.IMPLIB_ST % implib.bldpath()) self.link_task.outputs.append(implib) if getattr(self, 'defs', None) and self.env.DEST_BINFMT == 'pe': node = self.path.find_resource(self.defs) if not node: raise Errors.WafError('invalid def file %r' % self.defs) if self.env.def_PATTERN: self.env.append_value('LINKFLAGS', self.env.def_PATTERN % node.path_from(self.get_cwd())) self.link_task.dep_nodes.append(node) else: # gcc for windows takes *.def file as input without any special flag self.link_task.inputs.append(node) # where to put the import library if getattr(self, 'install_task', None): try: # user has given a specific installation path for the import library inst_to = self.install_path_implib except AttributeError: try: # user has given an installation path for the main library, put the import library in it inst_to = self.install_path except AttributeError: # else, put the library in BINDIR and the import library in LIBDIR inst_to = '${IMPLIBDIR}' self.install_task.install_to = '${BINDIR}' if not self.env.IMPLIBDIR: self.env.IMPLIBDIR = self.env.LIBDIR self.implib_install_task = self.add_install_files(install_to=inst_to, install_from=implib, chmod=self.link_task.chmod, task=self.link_task) # ============ the code above must not know anything about vnum processing on unix platforms ========= re_vnum = re.compile('^([1-9]\\d*|0)([.]([1-9]\\d*|0)){0,2}?$') @feature('cshlib', 'cxxshlib', 'dshlib', 'fcshlib', 'vnum') @after_method('apply_link', 'propagate_uselib_vars') def apply_vnum(self): """ Enforce version numbering on shared libraries. The valid version numbers must have either zero or two dots:: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16') In this example on Linux platform, ``libfoo.so`` is installed as ``libfoo.so.14.15.16``, and the following symbolic links are created: * ``libfoo.so → libfoo.so.14.15.16`` * ``libfoo.so.14 → libfoo.so.14.15.16`` By default, the library will be assigned SONAME ``libfoo.so.14``, effectively declaring ABI compatibility between all minor and patch releases for the major version of the library. When necessary, the compatibility can be explicitly defined using `cnum` parameter: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16', cnum='14.15') In this case, the assigned SONAME will be ``libfoo.so.14.15`` with ABI compatibility only between path releases for a specific major and minor version of the library. On OS X platform, install-name parameter will follow the above logic for SONAME with exception that it also specifies an absolute path (based on install_path) of the library. """ if not getattr(self, 'vnum', '') or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'): return link = self.link_task if not re_vnum.match(self.vnum): raise Errors.WafError('Invalid vnum %r for target %r' % (self.vnum, getattr(self, 'name', self))) nums = self.vnum.split('.') node = link.outputs[0] cnum = getattr(self, 'cnum', str(nums[0])) cnums = cnum.split('.') if len(cnums)>len(nums) or nums[0:len(cnums)] != cnums: raise Errors.WafError('invalid compatibility version %s' % cnum) libname = node.name if libname.endswith('.dylib'): name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum) name2 = libname.replace('.dylib', '.%s.dylib' % cnum) else: name3 = libname + '.' + self.vnum name2 = libname + '.' + cnum # add the so name for the ld linker - to disable, just unset env.SONAME_ST if self.env.SONAME_ST: v = self.env.SONAME_ST % name2 self.env.append_value('LINKFLAGS', v.split()) # the following task is just to enable execution from the build dir :-/ if self.env.DEST_OS != 'openbsd': outs = [node.parent.make_node(name3)] if name2 != name3: outs.append(node.parent.make_node(name2)) self.create_task('vnum', node, outs) if getattr(self, 'install_task', None): self.install_task.hasrun = Task.SKIPPED self.install_task.no_errcheck_out = True path = self.install_task.install_to if self.env.DEST_OS == 'openbsd': libname = self.link_task.outputs[0].name t1 = self.add_install_as(install_to='%s/%s' % (path, libname), install_from=node, chmod=self.link_task.chmod) self.vnum_install_task = (t1,) else: t1 = self.add_install_as(install_to=path + os.sep + name3, install_from=node, chmod=self.link_task.chmod) t3 = self.add_symlink_as(install_to=path + os.sep + libname, install_from=name3) if name2 != name3: t2 = self.add_symlink_as(install_to=path + os.sep + name2, install_from=name3) self.vnum_install_task = (t1, t2, t3) else: self.vnum_install_task = (t1, t3) if '-dynamiclib' in self.env.LINKFLAGS: # this requires after(propagate_uselib_vars) try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: p = Utils.subst_vars(inst_to, self.env) path = os.path.join(p, name2) self.env.append_value('LINKFLAGS', ['-install_name', path]) self.env.append_value('LINKFLAGS', '-Wl,-compatibility_version,%s' % cnum) self.env.append_value('LINKFLAGS', '-Wl,-current_version,%s' % self.vnum) class vnum(Task.Task): """ Create the symbolic links for a versioned shared library. Instances are created by :py:func:`waflib.Tools.ccroot.apply_vnum` """ color = 'CYAN' ext_in = ['.bin'] def keyword(self): return 'Symlinking' def run(self): for x in self.outputs: path = x.abspath() try: os.remove(path) except OSError: pass try: os.symlink(self.inputs[0].name, path) except OSError: return 1 class fake_shlib(link_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME class fake_stlib(stlink_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME @conf def read_shlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes:: def build(bld): bld.read_shlib('m') bld.program(source='main.c', use='m') """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='shlib', export_includes=export_includes, export_defines=export_defines) @conf def read_stlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system static library, enabling a use as a local library. Will trigger a rebuild if the file changes. """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='stlib', export_includes=export_includes, export_defines=export_defines) lib_patterns = { 'shlib' : ['lib%s.so', '%s.so', 'lib%s.dylib', 'lib%s.dll', '%s.dll'], 'stlib' : ['lib%s.a', '%s.a', 'lib%s.dll', '%s.dll', 'lib%s.lib', '%s.lib'], } @feature('fake_lib') def process_lib(self): """ Find the location of a foreign library. Used by :py:class:`waflib.Tools.ccroot.read_shlib` and :py:class:`waflib.Tools.ccroot.read_stlib`. """ node = None names = [x % self.name for x in lib_patterns[self.lib_type]] for x in self.lib_paths + [self.path] + SYSTEM_LIB_PATHS: if not isinstance(x, Node.Node): x = self.bld.root.find_node(x) or self.path.find_node(x) if not x: continue for y in names: node = x.find_node(y) if node: try: Utils.h_file(node.abspath()) except EnvironmentError: raise ValueError('Could not read %r' % y) break else: continue break else: raise Errors.WafError('could not find library %r' % self.name) self.link_task = self.create_task('fake_%s' % self.lib_type, [], [node]) self.target = self.name class fake_o(Task.Task): def runnable_status(self): return Task.SKIP_ME @extension('.o', '.obj') def add_those_o_files(self, node): tsk = self.create_task('fake_o', [], node) try: self.compiled_tasks.append(tsk) except AttributeError: self.compiled_tasks = [tsk] @feature('fake_obj') @before_method('process_source') def process_objs(self): """ Puts object files in the task generator outputs """ for node in self.to_nodes(self.source): self.add_those_o_files(node) self.source = [] @conf def read_object(self, obj): """ Read an object file, enabling injection in libs/programs. Will trigger a rebuild if the file changes. :param obj: object file path, as string or Node """ if not isinstance(obj, self.path.__class__): obj = self.path.find_resource(obj) return self(features='fake_obj', source=obj, name=obj.name) @feature('cxxprogram', 'cprogram') @after_method('apply_link', 'process_use') def set_full_paths_hpux(self): """ On hp-ux, extend the libpaths and static library paths to absolute paths """ if self.env.DEST_OS != 'hp-ux': return base = self.bld.bldnode.abspath() for var in ['LIBPATH', 'STLIBPATH']: lst = [] for x in self.env[var]: if x.startswith('/'): lst.append(x) else: lst.append(os.path.normpath(os.path.join(base, x))) self.env[var] = lst
26,307
Python
.py
674
36.081602
394
0.696758
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,753
fc.py
projecthamster_hamster/waflib/Tools/fc.py
#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) """ Fortran support """ from waflib import Utils, Task, Errors from waflib.Tools import ccroot, fc_config, fc_scan from waflib.TaskGen import extension from waflib.Configure import conf ccroot.USELIB_VARS['fc'] = set(['FCFLAGS', 'DEFINES', 'INCLUDES', 'FCPPFLAGS']) ccroot.USELIB_VARS['fcprogram_test'] = ccroot.USELIB_VARS['fcprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'LDFLAGS']) ccroot.USELIB_VARS['fcshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'LDFLAGS']) ccroot.USELIB_VARS['fcstlib'] = set(['ARFLAGS', 'LINKDEPS']) @extension('.f','.F','.f90','.F90','.for','.FOR','.f95','.F95','.f03','.F03','.f08','.F08') def fc_hook(self, node): "Binds the Fortran file extensions create :py:class:`waflib.Tools.fc.fc` instances" return self.create_compiled_task('fc', node) @conf def modfile(conf, name): """ Turns a module name into the right module file name. Defaults to all lower case. """ if name.find(':') >= 0: # Depending on a submodule! separator = conf.env.FC_SUBMOD_SEPARATOR or '@' # Ancestors of the submodule will be prefixed to the # submodule name, separated by a colon. modpath = name.split(':') # Only the ancestor (actual) module and the submodule name # will be used for the filename. modname = modpath[0] + separator + modpath[-1] suffix = conf.env.FC_SUBMOD_SUFFIX or '.smod' else: modname = name suffix = '.mod' return {'lower' :modname.lower() + suffix.lower(), 'lower.MOD' :modname.lower() + suffix.upper(), 'UPPER.mod' :modname.upper() + suffix.lower(), 'UPPER' :modname.upper() + suffix.upper()}[conf.env.FC_MOD_CAPITALIZATION or 'lower'] def get_fortran_tasks(tsk): """ Obtains all fortran tasks from the same build group. Those tasks must not have the attribute 'nomod' or 'mod_fortran_done' :return: a list of :py:class:`waflib.Tools.fc.fc` instances """ bld = tsk.generator.bld tasks = bld.get_tasks_group(bld.get_group_idx(tsk.generator)) return [x for x in tasks if isinstance(x, fc) and not getattr(x, 'nomod', None) and not getattr(x, 'mod_fortran_done', None)] class fc(Task.Task): """ Fortran tasks can only run when all fortran tasks in a current task group are ready to be executed This may cause a deadlock if some fortran task is waiting for something that cannot happen (circular dependency) Should this ever happen, set the 'nomod=True' on those tasks instances to break the loop """ color = 'GREEN' run_str = '${FC} ${FCFLAGS} ${FCINCPATH_ST:INCPATHS} ${FCDEFINES_ST:DEFINES} ${_FCMODOUTFLAGS} ${FC_TGT_F}${TGT[0].abspath()} ${FC_SRC_F}${SRC[0].abspath()} ${FCPPFLAGS}' vars = ["FORTRANMODPATHFLAG"] def scan(self): """Fortran dependency scanner""" tmp = fc_scan.fortran_parser(self.generator.includes_nodes) tmp.task = self tmp.start(self.inputs[0]) return (tmp.nodes, tmp.names) def runnable_status(self): """ Sets the mod file outputs and the dependencies on the mod files over all Fortran tasks executed by the main thread so there are no concurrency issues """ if getattr(self, 'mod_fortran_done', None): return super(fc, self).runnable_status() # now, if we reach this part it is because this fortran task is the first in the list bld = self.generator.bld # obtain the fortran tasks lst = get_fortran_tasks(self) # disable this method for other tasks for tsk in lst: tsk.mod_fortran_done = True # wait for all the .f tasks to be ready for execution # and ensure that the scanners are called at least once for tsk in lst: ret = tsk.runnable_status() if ret == Task.ASK_LATER: # we have to wait for one of the other fortran tasks to be ready # this may deadlock if there are dependencies between fortran tasks # but this should not happen (we are setting them here!) for x in lst: x.mod_fortran_done = None return Task.ASK_LATER ins = Utils.defaultdict(set) outs = Utils.defaultdict(set) # the .mod files to create for tsk in lst: key = tsk.uid() for x in bld.raw_deps[key]: if x.startswith('MOD@'): name = bld.modfile(x.replace('MOD@', '')) node = bld.srcnode.find_or_declare(name) tsk.set_outputs(node) outs[node].add(tsk) # the .mod files to use for tsk in lst: key = tsk.uid() for x in bld.raw_deps[key]: if x.startswith('USE@'): name = bld.modfile(x.replace('USE@', '')) node = bld.srcnode.find_resource(name) if node and node not in tsk.outputs: if not node in bld.node_deps[key]: bld.node_deps[key].append(node) ins[node].add(tsk) # if the intersection matches, set the order for k in ins.keys(): for a in ins[k]: a.run_after.update(outs[k]) for x in outs[k]: self.generator.bld.producer.revdeps[x].add(a) # the scanner cannot output nodes, so we have to set them # ourselves as task.dep_nodes (additional input nodes) tmp = [] for t in outs[k]: tmp.extend(t.outputs) a.dep_nodes.extend(tmp) a.dep_nodes.sort(key=lambda x: x.abspath()) # the task objects have changed: clear the signature cache for tsk in lst: try: delattr(tsk, 'cache_sig') except AttributeError: pass return super(fc, self).runnable_status() class fcprogram(ccroot.link_task): """Links Fortran programs""" color = 'YELLOW' run_str = '${FC} ${LINKFLAGS} ${FCLNK_SRC_F}${SRC} ${FCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FCSTLIB_MARKER} ${FCSTLIBPATH_ST:STLIBPATH} ${FCSTLIB_ST:STLIB} ${FCSHLIB_MARKER} ${FCLIBPATH_ST:LIBPATH} ${FCLIB_ST:LIB} ${LDFLAGS}' inst_to = '${BINDIR}' class fcshlib(fcprogram): """Links Fortran libraries""" inst_to = '${LIBDIR}' class fcstlib(ccroot.stlink_task): """Links Fortran static libraries (uses ar by default)""" pass # do not remove the pass statement class fcprogram_test(fcprogram): """Custom link task to obtain compiler outputs for Fortran configuration tests""" def runnable_status(self): """This task is always executed""" ret = super(fcprogram_test, self).runnable_status() if ret == Task.SKIP_ME: ret = Task.RUN_ME return ret def exec_command(self, cmd, **kw): """Stores the compiler std our/err onto the build context, to bld.out + bld.err""" bld = self.generator.bld kw['shell'] = isinstance(cmd, str) kw['stdout'] = kw['stderr'] = Utils.subprocess.PIPE kw['cwd'] = self.get_cwd() bld.out = bld.err = '' bld.to_log('command: %s\n' % cmd) kw['output'] = 0 try: (bld.out, bld.err) = bld.cmd_and_log(cmd, **kw) except Errors.WafError: return -1 if bld.out: bld.to_log('out: %s\n' % bld.out) if bld.err: bld.to_log('err: %s\n' % bld.err)
6,763
Python
.py
169
36.786982
239
0.691768
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,754
test_stuff.py
projecthamster_hamster/tests/test_stuff.py
import sys, os.path # a convoluted line to add hamster module to absolute path sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), "../src"))) import datetime as pdt import unittest import re from hamster.lib import datetime as dt from hamster.lib.dbus import ( to_dbus_fact, to_dbus_fact_json, to_dbus_range, from_dbus_fact, from_dbus_fact_json, from_dbus_range, ) from hamster.lib.fact import Fact from hamster.lib.parsing import get_tags_from_description class TestFact(unittest.TestCase): def test_range(self): t1 = dt.datetime(2020, 1, 15, 13, 30) t2 = dt.datetime(2020, 1, 15, 15, 30) range = dt.Range(t1, t2) fact = Fact(range=range) self.assertEqual(fact.range.start, t1) self.assertEqual(fact.range.end, t2) fact = Fact(start=t1, end=t2) self.assertEqual(fact.range.start, t1) self.assertEqual(fact.range.end, t2) # backward compatibility (before v3.0) fact = Fact(start_time=t1, end_time=t2) self.assertEqual(fact.range.start, t1) self.assertEqual(fact.range.end, t2) class TestFactParsing(unittest.TestCase): def test_plain_name(self): # plain activity name activity = Fact.parse("just a simple case with ütf-8") self.assertEqual(activity.activity, "just a simple case with ütf-8") assert activity.start_time is None assert activity.end_time is None assert not activity.category assert not activity.description def test_only_range(self): fact = Fact.parse("-20") assert not fact.activity fact = Fact.parse("-20 -10") assert not fact.activity def test_with_start_time(self): # with time activity = Fact.parse("12:35 with start time") self.assertEqual(activity.activity, "with start time") self.assertEqual(activity.start_time.strftime("%H:%M"), "12:35") #rest must be empty assert not activity.category assert activity.end_time is None assert not activity.description def test_with_start_and_end_time(self): # with time activity = Fact.parse("12:35-14:25 with start-end time") self.assertEqual(activity.activity, "with start-end time") self.assertEqual(activity.start_time.strftime("%H:%M"), "12:35") self.assertEqual(activity.end_time.strftime("%H:%M"), "14:25") #rest must be empty assert not activity.category assert not activity.description def test_category(self): # plain activity name activity = Fact.parse("just a simple case@hämster") self.assertEqual(activity.activity, "just a simple case") self.assertEqual(activity.category, "hämster") assert activity.start_time is None assert activity.end_time is None assert not activity.description def test_description(self): # plain activity name activity = Fact.parse("case, with added descriptiön") self.assertEqual(activity.activity, "case") self.assertEqual(activity.description, "with added descriptiön") assert not activity.category assert activity.start_time is None assert activity.end_time is None assert not activity.category def test_description_with_commas(self): activity = Fact.parse("case, meet with a, b and c, #holiday") self.assertEqual(activity.description, "meet with a, b and c") def test_tags(self): # plain activity name activity = Fact.parse("#case, description with #hash, #and #some #tägs") self.assertEqual(activity.activity, "#case") self.assertEqual(activity.description, "description with #hash") self.assertEqual(set(activity.tags), set(["and", "hash", "some", "tägs"])) assert not activity.category assert activity.start_time is None assert activity.end_time is None def test_multiple_tags_separated_with_commas(self): activity = Fact.parse("devel, fun times, #bugs, #pr, #hamster") self.assertEqual(set(activity.tags), set(["bugs", "pr", "hamster"])) def test_tag_in_description_ignores_tag_starting_with_a_number(self): activity = Fact.parse("case, fix bug #123, #tag1") self.assertEqual(activity.description, "fix bug #123") self.assertEqual(set(activity.tags), set(["tag1"])) def test_serialization_does_not_duplicate_tag_from_description(self): fact = Fact(activity="activity", description="review #pr", tags=["pr", "hamster"]) self.assertEqual(fact.serialized(), "activity, review #pr, #hamster") fact = Fact(activity="activity", description="review #pr in #hamster", tags=["pr", "hamster"]) self.assertEqual(fact.serialized(), "activity, review #pr in #hamster") def test_tags_without_description(self): activity = Fact.parse("case, #tag1 #tag2") self.assertEqual(activity.activity, "case") self.assertEqual(activity.description, "") self.assertEqual(set(activity.tags), set(["tag1", "tag2"])) def test_tags_with_spaces(self): activity = Fact.parse("case, #tag with space #tag2") self.assertEqual(activity.activity, "case") self.assertEqual(activity.description, "") self.assertEqual(set(activity.tags), set(["tag with space", "tag2"])) def test_full(self): # plain activity name activity = Fact.parse( "1225-1325 case@cat, description #hash non-tag, #tag #bäg") self.assertEqual(activity.start_time.strftime("%H:%M"), "12:25") self.assertEqual(activity.end_time.strftime("%H:%M"), "13:25") self.assertEqual(activity.activity, "case") self.assertEqual(activity.category, "cat") self.assertEqual(activity.description, "description #hash non-tag") self.assertEqual(set(activity.tags), set(["hash", "bäg", "tag"])) def test_copy(self): fact1 = Fact.parse("12:25-13:25 case@cat, description #tag #bäg") fact2 = fact1.copy() self.assertEqual(fact1.start_time, fact2.start_time) self.assertEqual(fact1.end_time, fact2.end_time) self.assertEqual(fact1.activity, fact2.activity) self.assertEqual(fact1.category, fact2.category) self.assertEqual(fact1.description, fact2.description) self.assertEqual(fact1.tags, fact2.tags) fact3 = fact1.copy(activity="changed") self.assertEqual(fact3.activity, "changed") fact3 = fact1.copy(category="changed") self.assertEqual(fact3.category, "changed") fact3 = fact1.copy(description="changed") self.assertEqual(fact3.description, "changed") fact3 = fact1.copy(tags=["changed"]) self.assertEqual(fact3.tags, ["changed"]) def test_comparison(self): fact1 = Fact.parse("12:25-13:25 case@cat, description, #tag #bäg") fact2 = fact1.copy() self.assertEqual(fact1, fact2) fact2 = fact1.copy() fact2.activity = "abcd" self.assertNotEqual(fact1, fact2) fact2 = fact1.copy() fact2.category = "abcd" self.assertNotEqual(fact1, fact2) fact2 = fact1.copy() fact2.description = "abcd" self.assertNotEqual(fact1, fact2) fact2 = fact1.copy() fact2.range.start = fact1.range.start + dt.timedelta(minutes=1) self.assertNotEqual(fact1, fact2) fact2 = fact1.copy() fact2.range.end = fact1.range.end + dt.timedelta(minutes=1) self.assertNotEqual(fact1, fact2) # wrong order fact2 = fact1.copy() fact2.tags = ["bäg", "tag"] self.assertNotEqual(fact1, fact2) # correct order fact2 = fact1.copy() fact2.tags = ["tag", "bäg"] self.assertEqual(fact1, fact2) def test_decimal_in_activity(self): # cf. issue #270 fact = Fact.parse("12:25-13:25 10.0@ABC, Two Words #tag #bäg") self.assertEqual(fact.activity, "10.0") self.assertEqual(fact.category, "ABC") # should not pick up a time here fact = Fact.parse("10.00@ABC, Two Words #tag #bäg") self.assertEqual(fact.activity, "10.00") self.assertEqual(fact.category, "ABC") def test_activity_with_spaces(self): fact = Fact.parse("11:00 12:00 BPC-261 - Task title@Project") self.assertEqual(fact.activity, "BPC-261 - Task title") self.assertEqual(fact.category, "Project") self.assertEqual(fact.description, "") self.assertEqual(fact.tags, []) def test_activity_and_category_with_hash_and_space(self): fact = Fact.parse("11:00 12:00 Activity #1@Category #2") self.assertEqual(fact.activity, "Activity #1") self.assertEqual(fact.category, "Category #2") self.assertEqual(fact.description, "") def test_serialization_of_an_empty_fact(self): fact = Fact() self.assertEqual(fact.serialized(), "") def test_commas(self): fact = Fact.parse("11:00 12:00 activity@category, description, with comma") self.assertEqual(fact.activity, "activity") self.assertEqual(fact.category, "category") self.assertEqual(fact.description, "description, with comma") self.assertEqual(fact.tags, []) fact = Fact.parse("11:00 12:00 activity@category, description, with comma, #tag1 #tag2") self.assertEqual(fact.activity, "activity") self.assertEqual(fact.category, "category") self.assertEqual(fact.description, "description, with comma") self.assertEqual(fact.tags, ["tag1", "tag2"]) fact = Fact.parse("11:00 12:00 activity@category, description, with comma and #hash, #tag1 #tag2") self.assertEqual(fact.activity, "activity") self.assertEqual(fact.category, "category") self.assertEqual(fact.description, "description, with comma and #hash") self.assertEqual(fact.tags, ["hash", "tag1", "tag2"]) def test_backwards_compat_double_comma(self): fact = Fact.parse("act@cat,, My description,, #tag1 #tag2") self.assertEqual(fact.description, "My description") self.assertEqual(fact.tags, ["tag1", "tag2"]) fact = Fact.parse("act@cat,, My description, with comma,, #tag1 #tag2") self.assertEqual(fact.description, "My description, with comma") self.assertEqual(fact.tags, ["tag1", "tag2"]) # ugly. Really need pytest def test_roundtrips(self): for start_time in ( None, dt.time(12, 33), ): for end_time in ( None, dt.time(13, 34), ): for activity in ( "activity", "#123 with two #hash", "17.00 tea", ): for category in ( "", "category", ): for description in ( "", "description", "with, comma", "with @at", "multiline\ndescription", ): for tags in ( [], ["single"], ["with space"], ["two", "tags"], ["with @at"], ): start = dt.datetime.from_day_time(dt.hday.today(), start_time ) if start_time else None end = dt.datetime.from_day_time(dt.hday.today(), end_time ) if end_time else None if end and not start: # end without start is not parseable continue fact = Fact(start_time=start, end_time=end, activity=activity, category=category, description=description, tags=tags) for range_pos in ("head", "tail"): fact_str = fact.serialized(range_pos=range_pos) parsed = Fact.parse(fact_str, range_pos=range_pos) self.assertEqual(parsed.range.start, fact.range.start) self.assertEqual(parsed.range.end, fact.range.end) self.assertEqual(parsed.activity, fact.activity) self.assertEqual(parsed.category, fact.category) self.assertEqual(parsed.description, fact.description) self.assertEqual(parsed.tags, fact.tags) self.assertEqual(fact, parsed) class TestDatetime(unittest.TestCase): def test_datetime_from_day_time(self): day = dt.date(2018, 8, 13) time = dt.time(23, 10) expected = dt.datetime(2018, 8, 13, 23, 10) # 2018-08-13 23:10 self.assertEqual(dt.datetime.from_day_time(day, time), expected) day = dt.date(2018, 8, 13) time = dt.time(0, 10) expected = dt.datetime(2018, 8, 14, 0, 10) # 2018-08-14 00:10 self.assertEqual(dt.datetime.from_day_time(day, time), expected) def test_format_timedelta(self): delta = dt.timedelta(minutes=10) self.assertEqual(delta.format("human"), "10min") delta = dt.timedelta(hours=5, minutes=0) self.assertEqual(delta.format("human"), "5h") delta = dt.timedelta(hours=5, minutes=10) self.assertEqual(delta.format("human"), "5h 10min") delta = dt.timedelta(hours=5, minutes=10) self.assertEqual(delta.format("HH:MM"), "05:10") def test_datetime_hday(self): date_time = dt.datetime(2018, 8, 13, 23, 10) # 2018-08-13 23:10 expected = dt.date(2018, 8, 13) self.assertEqual(date_time.hday(), expected) date_time = dt.datetime(2018, 8, 14, 0, 10) # 2018-08-14 0:10 expected = dt.date(2018, 8, 13) self.assertEqual(date_time.hday(), expected) today = dt.hday.today() self.assertEqual(type(today), dt.hday) def test_parse_date(self): date = dt.date.parse("2020-01-05") self.assertEqual(date, pdt.date(2020, 1, 5)) def test_parse_time(self): self.assertEqual(dt.time.parse("9:01"), pdt.time(9, 1)) self.assertEqual(dt.time.parse("9.01"), pdt.time(9, 1)) self.assertEqual(dt.time.parse("12:01"), pdt.time(12, 1)) self.assertEqual(dt.time.parse("12.01"), pdt.time(12, 1)) self.assertEqual(dt.time.parse("1201"), pdt.time(12, 1)) def test_parse_datetime(self): self.assertEqual(dt.datetime.parse("2020-01-05 9:01"), pdt.datetime(2020, 1, 5, 9, 1)) def test_datetime_patterns(self): p = dt.datetime.pattern(1) s = "12:03" m = re.fullmatch(p, s, re.VERBOSE) time = dt.datetime._extract_datetime(m, d="date1", h="hour1", m="minute1", r="relative1", default_day=dt.hday.today()) self.assertEqual(time.strftime("%H:%M"), "12:03") s = "2019-12-01 12:36" m = re.fullmatch(p, s, re.VERBOSE) time = dt.datetime._extract_datetime(m, d="date1", h="hour1", m="minute1", r="relative1") self.assertEqual(time.strftime("%Y-%m-%d %H:%M"), "2019-12-01 12:36") s = "-25" m = re.fullmatch(p, s, re.VERBOSE) relative = dt.datetime._extract_datetime(m, d="date1", h="hour1", m="minute1", r="relative1", default_day=dt.hday.today()) self.assertEqual(relative, dt.timedelta(minutes=-25)) s = "2019-12-05" m = re.search(p, s, re.VERBOSE) self.assertEqual(m, None) def test_parse_datetime_range(self): # only match clean s = "10.00@cat" (start, end), rest = dt.Range.parse(s, position="head") self.assertEqual(start, None) self.assertEqual(end, None) s = "12:02" (start, end), rest = dt.Range.parse(s) self.assertEqual(start.strftime("%H:%M"), "12:02") self.assertEqual(end, None) s = "12:03 13:04" (start, end), rest = dt.Range.parse(s) self.assertEqual(start.strftime("%H:%M"), "12:03") self.assertEqual(end.strftime("%H:%M"), "13:04") s = "12:35 activity" (start, end), rest = dt.Range.parse(s, position="head") self.assertEqual(start.strftime("%H:%M"), "12:35") self.assertEqual(end, None) s = "2019-12-01 12:33 activity" (start, end), rest = dt.Range.parse(s, position="head") self.assertEqual(start.strftime("%Y-%m-%d %H:%M"), "2019-12-01 12:33") self.assertEqual(end, None) ref = dt.datetime(2019, 11, 29, 13, 55) # 2019-11-29 13:55 s = "-25 activity" (start, end), rest = dt.Range.parse(s, position="head", ref=ref) self.assertEqual(start.strftime("%Y-%m-%d %H:%M"), "2019-11-29 13:30") self.assertEqual(end, None) s = "+25 activity" (start, end), rest = dt.Range.parse(s, position="head", ref=ref) self.assertEqual(start.strftime("%Y-%m-%d %H:%M"), "2019-11-29 14:20") self.assertEqual(end, None) s = "-55 -25 activity" (start, end), rest = dt.Range.parse(s, position="head", ref=ref) self.assertEqual(start.strftime("%Y-%m-%d %H:%M"), "2019-11-29 13:00") self.assertEqual(end.strftime("%Y-%m-%d %H:%M"), "2019-11-29 13:30") s = "+25 +55 activity" (start, end), rest = dt.Range.parse(s, position="head", ref=ref) self.assertEqual(start.strftime("%Y-%m-%d %H:%M"), "2019-11-29 14:20") self.assertEqual(end.strftime("%Y-%m-%d %H:%M"), "2019-11-29 14:50") s = "-55 -120 activity" (start, end), rest = dt.Range.parse(s, position="head", ref=ref) self.assertEqual(start.strftime("%Y-%m-%d %H:%M"), "2019-11-29 13:00") self.assertEqual(end.strftime("%Y-%m-%d %H:%M"), "2019-11-29 11:55") s = "-50 20 activity" (start, end), rest = dt.Range.parse(s, position="head", ref=ref) self.assertEqual(start.strftime("%Y-%m-%d %H:%M"), "2019-11-29 13:05") self.assertEqual(end.strftime("%Y-%m-%d %H:%M"), "2019-11-29 13:25") s = "2019-12-05" # single hamster day (start, end), rest = dt.Range.parse(s, ref=ref) just_before = start - dt.timedelta(seconds=1) just_after = end + dt.timedelta(seconds=1) self.assertEqual(just_before.hday(), pdt.date(2019, 12, 4)) self.assertEqual(just_after.hday(), pdt.date(2019, 12, 6)) s = "2019-12-05 2019-12-07" # hamster days range (start, end), rest = dt.Range.parse(s, ref=ref) just_before = start - dt.timedelta(seconds=1) just_after = end + dt.timedelta(seconds=1) self.assertEqual(just_before.hday(), dt.date(2019, 12, 4)) self.assertEqual(just_after.hday(), dt.date(2019, 12, 8)) s = "14:30 - --" (start, end), rest = dt.Range.parse(s, ref=ref) self.assertEqual(start.strftime("%H:%M"), "14:30") self.assertEqual(end, None) def test_range(self): day = dt.hday(2020, 2, 2) time = dt.time(21, 20) base = dt.datetime.from_day_time(day, time) range = dt.Range(base, base + dt.timedelta(minutes=30)) range_str = range.format(default_day=day) self.assertEqual(range_str, "21:20 - 21:50") range = dt.Range(None, base) range_str = range.format(default_day=day) self.assertEqual(range_str, "-- - 21:20") # issue #576 start = dt.datetime(2020, 3, 8, 17, 7) end = dt.datetime(2020, 3, 8, 18, 6) range = dt.Range.from_start_end(start, end) self.assertEqual(range.start, start) self.assertEqual(range.end, end) # check passthrough range2 = dt.Range.from_start_end(range) self.assertEqual(range2.start, range.start) self.assertEqual(range2.end, range.end) def test_rounding(self): dt1 = dt.datetime(2019, 12, 31, hour=13, minute=14, second=10, microsecond=11) self.assertEqual(dt1.second, 0) self.assertEqual(dt1.microsecond, 0) self.assertEqual(str(dt1), "2019-12-31 13:14") def test_type_stability(self): dt1 = dt.datetime(2020, 1, 10, hour=13, minute=30) dt2 = dt.datetime(2020, 1, 10, hour=13, minute=40) delta = dt2 - dt1 self.assertEqual(type(delta), dt.timedelta) _sum = dt1 + delta self.assertEqual(_sum, dt.datetime(2020, 1, 10, hour=13, minute=40)) self.assertEqual(type(_sum), dt.datetime) _sub = dt1 - delta self.assertEqual(_sub, dt.datetime(2020, 1, 10, hour=13, minute=20)) self.assertEqual(type(_sub), dt.datetime) opposite = - delta self.assertEqual(opposite, dt.timedelta(minutes=-10)) self.assertEqual(type(opposite), dt.timedelta) _sum = delta + delta self.assertEqual(_sum, dt.timedelta(minutes=20)) self.assertEqual(type(_sum), dt.timedelta) _sub = delta - delta self.assertEqual(_sub, dt.timedelta()) self.assertEqual(type(_sub), dt.timedelta) def test_timedelta(self): delta = dt.timedelta(seconds=90) self.assertEqual(delta.total_minutes(), 1.5) class TestDBus(unittest.TestCase): def test_round_trip(self): fact = Fact.parse("11:00 12:00 activity@category, description, with comma #and #tags") dbus_fact = to_dbus_fact_json(fact) return_fact = from_dbus_fact_json(dbus_fact) self.assertEqual(return_fact, fact) dbus_fact = to_dbus_fact(fact) return_fact = from_dbus_fact(dbus_fact) self.assertEqual(return_fact, fact) fact = Fact.parse("11:00 activity") dbus_fact = to_dbus_fact_json(fact) return_fact = from_dbus_fact_json(dbus_fact) self.assertEqual(return_fact, fact) dbus_fact = to_dbus_fact(fact) return_fact = from_dbus_fact(dbus_fact) self.assertEqual(return_fact, fact) range, __ = dt.Range.parse("2020-01-19 11:00 - 2020-01-19 12:00") dbus_range = to_dbus_range(range) return_range = from_dbus_range(dbus_range) self.assertEqual(return_range, range) if __name__ == '__main__': unittest.main()
23,239
Python
.py
468
38.08547
106
0.58406
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,755
fixpy2.py
projecthamster_hamster/waflib/fixpy2.py
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2018 (ita) from __future__ import with_statement import os all_modifs = {} def fixdir(dir): """Call all substitution functions on Waf folders""" for k in all_modifs: for v in all_modifs[k]: modif(os.path.join(dir, 'waflib'), k, v) def modif(dir, name, fun): """Call a substitution function""" if name == '*': lst = [] for y in '. Tools extras'.split(): for x in os.listdir(os.path.join(dir, y)): if x.endswith('.py'): lst.append(y + os.sep + x) for x in lst: modif(dir, x, fun) return filename = os.path.join(dir, name) with open(filename, 'r') as f: txt = f.read() txt = fun(txt) with open(filename, 'w') as f: f.write(txt) def subst(*k): """register a substitution function""" def do_subst(fun): for x in k: try: all_modifs[x].append(fun) except KeyError: all_modifs[x] = [fun] return fun return do_subst @subst('*') def r1(code): "utf-8 fixes for python < 2.6" code = code.replace('as e:', ',e:') code = code.replace(".decode(sys.stdout.encoding or'latin-1',errors='replace')", '') return code.replace('.encode()', '') @subst('Runner.py') def r4(code): "generator syntax" return code.replace('next(self.biter)', 'self.biter.next()').replace('self.daemon = True', 'self.setDaemon(1)') @subst('Context.py') def r5(code): return code.replace("('Execution failure: %s'%str(e),ex=e)", "('Execution failure: %s'%str(e),ex=e),None,sys.exc_info()[2]")
1,488
Python
.xpy
51
26.588235
125
0.656601
projecthamster/hamster
1,069
250
128
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,756
comictagger.py
evilhero_mylar/comictagger.py
#!/usr/bin/env python from lib.comictaggerlib.main import ctmain if __name__ == '__main__': ctmain()
106
Python
.py
4
24.25
42
0.673267
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,757
Mylar.py
evilhero_mylar/Mylar.py
#!/usr/bin/env python # This file is part of Mylar. # # Mylar is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mylar is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Mylar. If not, see <http://www.gnu.org/licenses/>. import os, sys, locale import errno import shutil import time import threading import signal sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'lib')) import mylar from mylar import webstart, logger, filechecker, versioncheck, maintenance import argparse if ( sys.platform == 'win32' and sys.executable.split( '\\' )[-1] == 'pythonw.exe'): sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") def handler_sigterm(signum, frame): mylar.SIGNAL = 'shutdown' def main(): # Fixed paths to mylar if hasattr(sys, 'frozen'): mylar.FULL_PATH = os.path.abspath(sys.executable) else: mylar.FULL_PATH = os.path.abspath(__file__) mylar.PROG_DIR = os.path.dirname(mylar.FULL_PATH) mylar.ARGS = sys.argv[1:] # From sickbeard mylar.SYS_ENCODING = None try: locale.setlocale(locale.LC_ALL, "") mylar.SYS_ENCODING = locale.getpreferredencoding() except (locale.Error, IOError): pass # for OSes that are poorly configured I'll just force UTF-8 if not mylar.SYS_ENCODING or mylar.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): mylar.SYS_ENCODING = 'UTF-8' if not logger.LOG_LANG.startswith('en'): print 'language detected as non-English (%s). Forcing specific logging module - errors WILL NOT be captured in the logs' % logger.LOG_LANG else: print 'log language set to %s' % logger.LOG_LANG # Set up and gather command line arguments parser = argparse.ArgumentParser(description='Automated Comic Book Downloader') subparsers = parser.add_subparsers(title='Subcommands', dest='maintenance') parser_maintenance = subparsers.add_parser('maintenance', help='Enter maintenance mode (no GUI). Additional commands are available (maintenance --help)') #main parser parser.add_argument('-v', '--verbose', action='store_true', help='Increase console logging verbosity') parser.add_argument('-q', '--quiet', action='store_true', help='Turn off console logging') parser.add_argument('-d', '--daemon', action='store_true', help='Run as a daemon') parser.add_argument('-p', '--port', type=int, help='Force mylar to run on a specified port') parser.add_argument('-b', '--backup', action='store_true', help='Will automatically backup & keep the last 2 copies of the .db & ini files prior to startup') parser.add_argument('-w', '--noweekly', action='store_true', help='Turn off weekly pull list check on startup (quicker boot sequence)') parser.add_argument('--datadir', help='Specify a directory where to store your data files') parser.add_argument('--config', help='Specify a config file to use') parser.add_argument('--nolaunch', action='store_true', help='Prevent browser from launching on startup') parser.add_argument('--pidfile', help='Create a pid file (only relevant when running as a daemon)') parser.add_argument('--safe', action='store_true', help='redirect the startup page to point to the Manage Comics screen on startup') parser_maintenance.add_argument('-xj', '--exportjson', action='store', help='Export existing mylar.db to json file') parser_maintenance.add_argument('-id', '--importdatabase', action='store', help='Import a mylar.db into current db') parser_maintenance.add_argument('-ij', '--importjson', action='store', help='Import a specified json file containing just {"ComicID": "XXXXX"} into current db') parser_maintenance.add_argument('-st', '--importstatus', action='store_true', help='Provide current maintenance status') parser_maintenance.add_argument('-u', '--update', action='store_true', help='force mylar to perform an update as if in GUI') parser_maintenance.add_argument('-fs', '--fixslashes', action='store_true', help='remove double-slashes from within paths in db') #parser_maintenance.add_argument('-it', '--importtext', action='store', help='Import a specified text file into current db') args = parser.parse_args() if args.maintenance: if all([args.exportjson is None, args.importdatabase is None, args.importjson is None, args.importstatus is False, args.update is False, args.fixslashes is False]): print 'Expecting subcommand with the maintenance positional argumeent' sys.exit() mylar.MAINTENANCE = True else: mylar.MAINTENANCE = False if args.verbose: print 'Verbose/Debugging mode enabled...' mylar.LOG_LEVEL = 2 elif args.quiet: mylar.QUIET = True print 'Quiet logging mode enabled...' mylar.LOG_LEVEL = 0 else: mylar.LOG_LEVEL = 1 if args.daemon: if sys.platform == 'win32': print "Daemonize not supported under Windows, starting normally" else: mylar.DAEMON = True if args.pidfile: mylar.PIDFILE = str(args.pidfile) # If the pidfile already exists, mylar may still be running, so exit if os.path.exists(mylar.PIDFILE): sys.exit("PID file '" + mylar.PIDFILE + "' already exists. Exiting.") # The pidfile is only useful in daemon mode, make sure we can write the file properly if mylar.DAEMON: mylar.CREATEPID = True try: file(mylar.PIDFILE, 'w').write("pid\n") except IOError, e: raise SystemExit("Unable to write PID file: %s [%d]" % (e.strerror, e.errno)) else: print("Not running in daemon mode. PID file creation disabled.") if args.datadir: mylar.DATA_DIR = args.datadir else: mylar.DATA_DIR = mylar.PROG_DIR if args.config: mylar.CONFIG_FILE = args.config else: mylar.CONFIG_FILE = os.path.join(mylar.DATA_DIR, 'config.ini') if args.safe: mylar.SAFESTART = True else: mylar.SAFESTART = False if args.noweekly: mylar.NOWEEKLY = True else: mylar.NOWEEKLY = False # Put the database in the DATA_DIR mylar.DB_FILE = os.path.join(mylar.DATA_DIR, 'mylar.db') # Read config and start logging if mylar.MAINTENANCE is False: print('Initializing startup sequence....') #try: mylar.initialize(mylar.CONFIG_FILE) #except Exception as e: # print e # raise SystemExit('FATAL ERROR') if mylar.MAINTENANCE is False: filechecker.validateAndCreateDirectory(mylar.DATA_DIR, True) # Make sure the DATA_DIR is writeable if not os.access(mylar.DATA_DIR, os.W_OK): raise SystemExit('Cannot write to the data directory: ' + mylar.DATA_DIR + '. Exiting...') # backup the db and configs before they load. if args.backup: print '[AUTO-BACKUP] Backing up .db and config.ini files for safety.' backupdir = os.path.join(mylar.DATA_DIR, 'backup') try: os.makedirs(backupdir) print '[AUTO-BACKUP] Directory does not exist for backup - creating : ' + backupdir except OSError as exception: if exception.errno != errno.EEXIST: print '[AUTO-BACKUP] Directory already exists.' raise i = 0 while (i < 2): if i == 0: ogfile = mylar.DB_FILE back = os.path.join(backupdir, 'mylar.db') back_1 = os.path.join(backupdir, 'mylar.db.1') else: ogfile = mylar.CONFIG_FILE back = os.path.join(backupdir, 'config.ini') back_1 = os.path.join(backupdir, 'config.ini.1') try: print '[AUTO-BACKUP] Now Backing up mylar.db file' if os.path.isfile(back_1): print '[AUTO-BACKUP] ' + back_1 + ' exists. Deleting and keeping new.' os.remove(back_1) if os.path.isfile(back): print '[AUTO-BACKUP] Now renaming ' + back + ' to ' + back_1 shutil.move(back, back_1) print '[AUTO-BACKUP] Now copying db file to ' + back shutil.copy(ogfile, back) except OSError as exception: if exception.errno != errno.EXIST: raise i += 1 # Rename the main thread threading.currentThread().name = "MAIN" if mylar.DAEMON: mylar.daemonize() if mylar.MAINTENANCE is True and any([args.exportjson, args.importjson, args.update is True, args.importstatus is True, args.fixslashes is True]): loggermode = '[MAINTENANCE-MODE]' if args.importstatus: #mylar.MAINTENANCE is True: cs = maintenance.Maintenance('status') cstat = cs.check_status() else: logger.info('%s Initializing maintenance mode' % loggermode) if args.update is True: logger.info('%s Attempting to update Mylar so things can work again...' % loggermode) try: mylar.shutdown(restart=True, update=True, maintenance=True) except Exception as e: sys.exit('%s Mylar failed to update: %s' % (loggermode, e)) elif args.importdatabase: #for attempted db import. maintenance_path = args.importdatabase logger.info('%s db path accepted as %s' % (loggermode, maintenance_path)) di = maintenance.Maintenance('database-import', file=maintenance_path) d = di.database_import() elif args.importjson: #for attempted file re-import (json format) maintenance_path = args.importjson logger.info('%s file indicated as being in json format - path accepted as %s' % (loggermode, maintenance_path)) ij = maintenance.Maintenance('json-import', file=maintenance_path) j = ij.json_import() #elif args.importtext: # #for attempted file re-import (list format) # maintenance_path = args.importtext # logger.info('%s file indicated as being in list format - path accepted as %s' % (loggermode, maintenance_path)) # it = maintenance.Maintenance('list-import', file=maintenance_path) # t = it.list_import() elif args.exportjson: #for export of db comicid's in json format maintenance_path = args.exportjson logger.info('%s file indicated as being written to json format - destination accepted as %s' % (loggermode, maintenance_path)) ej = maintenance.Maintenance('json-export', output=maintenance_path) j = ej.json_export() elif args.fixslashes: #for running the fix slashes on the db manually logger.info('%s method indicated as fix slashes' % loggermode) fs = maintenance.Maintenance('fixslashes') j = fs.fix_slashes() else: logger.info('%s Not a valid command: %s' % (loggermode, maintenance_info)) sys.exit() logger.info('%s Exiting Maintenance mode' % (loggermode)) #possible option to restart automatically after maintenance has completed... sys.exit() # Force the http port if neccessary if args.port: http_port = args.port logger.info('Starting Mylar on forced port: %i' % http_port) else: http_port = int(mylar.CONFIG.HTTP_PORT) # Check if pyOpenSSL is installed. It is required for certificate generation # and for CherryPy. if mylar.CONFIG.ENABLE_HTTPS: try: import OpenSSL except ImportError: logger.warn("The pyOpenSSL module is missing. Install this " \ "module to enable HTTPS. HTTPS will be disabled.") mylar.CONFIG.ENABLE_HTTPS = False # Try to start the server. Will exit here is address is already in use. web_config = { 'http_port': http_port, 'http_host': mylar.CONFIG.HTTP_HOST, 'http_root': mylar.CONFIG.HTTP_ROOT, 'enable_https': mylar.CONFIG.ENABLE_HTTPS, 'https_cert': mylar.CONFIG.HTTPS_CERT, 'https_key': mylar.CONFIG.HTTPS_KEY, 'https_chain': mylar.CONFIG.HTTPS_CHAIN, 'http_username': mylar.CONFIG.HTTP_USERNAME, 'http_password': mylar.CONFIG.HTTP_PASSWORD, 'authentication': mylar.CONFIG.AUTHENTICATION, 'login_timeout': mylar.CONFIG.LOGIN_TIMEOUT, 'opds_enable': mylar.CONFIG.OPDS_ENABLE, 'opds_authentication': mylar.CONFIG.OPDS_AUTHENTICATION, 'opds_username': mylar.CONFIG.OPDS_USERNAME, 'opds_password': mylar.CONFIG.OPDS_PASSWORD, 'opds_pagesize': mylar.CONFIG.OPDS_PAGESIZE, } # Try to start the server. webstart.initialize(web_config) #check for version here after web server initialized so it doesn't try to repeatidly hit github #for version info if it's already running versioncheck.versionload() if mylar.CONFIG.LAUNCH_BROWSER and not args.nolaunch: mylar.launch_browser(mylar.CONFIG.HTTP_HOST, http_port, mylar.CONFIG.HTTP_ROOT) # Start the background threads mylar.start() signal.signal(signal.SIGTERM, handler_sigterm) while True: if not mylar.SIGNAL: try: time.sleep(1) except KeyboardInterrupt: mylar.SIGNAL = 'shutdown' else: logger.info('Received signal: ' + mylar.SIGNAL) if mylar.SIGNAL == 'shutdown': mylar.shutdown() elif mylar.SIGNAL == 'restart': mylar.shutdown(restart=True) else: mylar.shutdown(restart=True, update=True) mylar.SIGNAL = None return if __name__ == "__main__": main()
14,559
Python
.py
291
40.735395
172
0.638001
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,758
get_image_size.py
evilhero_mylar/lib/get_image_size.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function """ get_image_size.py ==================== :Name: get_image_size :Purpose: extract image dimensions given a file path :Author: Paulo Scardine (based on code from Emmanuel VA�SSE) :Created: 26/09/2013 :Copyright: (c) Paulo Scardine 2013 :Licence: MIT """ import collections import json import os import struct FILE_UNKNOWN = "Sorry, don't know how to get size for this file." class UnknownImageFormat(Exception): pass types = collections.OrderedDict() BMP = types['BMP'] = 'BMP' GIF = types['GIF'] = 'GIF' ICO = types['ICO'] = 'ICO' JPEG = types['JPEG'] = 'JPEG' PNG = types['PNG'] = 'PNG' TIFF = types['TIFF'] = 'TIFF' image_fields = ['path', 'type', 'file_size', 'width', 'height'] class Image(collections.namedtuple('Image', image_fields)): def to_str_row(self): return ("%d\t%d\t%d\t%s\t%s" % ( self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t'), )) def to_str_row_verbose(self): return ("%d\t%d\t%d\t%s\t%s\t##%s" % ( self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t'), self)) def to_str_json(self, indent=None): return json.dumps(self._asdict(), indent=indent) def get_image_size(file_path): """ Return (width, height) for a given img file content - no external dependencies except the os and struct builtin modules """ img = get_image_metadata(file_path) return (img.width, img.height) def get_image_metadata(file_path): """ Return an `Image` object for a given img file content - no external dependencies except the os and struct builtin modules Args: file_path (str): path to an image file Returns: Image: (path, type, file_size, width, height) """ size = os.path.getsize(file_path) # be explicit with open arguments - we need binary mode with open(file_path, "rb") as input: height = -1 width = -1 data = input.read(26) msg = " raised while trying to decode as JPEG." if (size >= 10) and data[:6] in (b'GIF87a', b'GIF89a'): # GIFs imgtype = GIF w, h = struct.unpack("<HH", data[6:10]) width = int(w) height = int(h) elif ((size >= 24) and data.startswith(b'\211PNG\r\n\032\n') and (data[12:16] == b'IHDR')): # PNGs imgtype = PNG w, h = struct.unpack(">LL", data[16:24]) width = int(w) height = int(h) elif (size >= 16) and data.startswith(b'\211PNG\r\n\032\n'): # older PNGs imgtype = PNG w, h = struct.unpack(">LL", data[8:16]) width = int(w) height = int(h) elif (size >= 2) and data.startswith(b'\377\330'): # JPEG imgtype = JPEG input.seek(0) input.read(2) b = input.read(1) try: while (b and ord(b) != 0xDA): while (ord(b) != 0xFF): b = input.read(1) while (ord(b) == 0xFF): b = input.read(1) if (ord(b) >= 0xC0 and ord(b) <= 0xC3): input.read(3) h, w = struct.unpack(">HH", input.read(4)) break else: input.read( int(struct.unpack(">H", input.read(2))[0]) - 2) b = input.read(1) width = int(w) height = int(h) except struct.error: raise UnknownImageFormat("StructError" + msg) except ValueError: raise UnknownImageFormat("ValueError" + msg) except Exception as e: raise UnknownImageFormat(e.__class__.__name__ + msg) elif (size >= 26) and data.startswith(b'BM'): # BMP imgtype = 'BMP' headersize = struct.unpack("<I", data[14:18])[0] if headersize == 12: w, h = struct.unpack("<HH", data[18:22]) width = int(w) height = int(h) elif headersize >= 40: w, h = struct.unpack("<ii", data[18:26]) width = int(w) # as h is negative when stored upside down height = abs(int(h)) else: raise UnknownImageFormat( "Unkown DIB header size:" + str(headersize)) elif (size >= 8) and data[:4] in (b"II\052\000", b"MM\000\052"): # Standard TIFF, big- or little-endian # BigTIFF and other different but TIFF-like formats are not # supported currently imgtype = TIFF byteOrder = data[:2] boChar = ">" if byteOrder == "MM" else "<" # maps TIFF type id to size (in bytes) # and python format char for struct tiffTypes = { 1: (1, boChar + "B"), # BYTE 2: (1, boChar + "c"), # ASCII 3: (2, boChar + "H"), # SHORT 4: (4, boChar + "L"), # LONG 5: (8, boChar + "LL"), # RATIONAL 6: (1, boChar + "b"), # SBYTE 7: (1, boChar + "c"), # UNDEFINED 8: (2, boChar + "h"), # SSHORT 9: (4, boChar + "l"), # SLONG 10: (8, boChar + "ll"), # SRATIONAL 11: (4, boChar + "f"), # FLOAT 12: (8, boChar + "d") # DOUBLE } ifdOffset = struct.unpack(boChar + "L", data[4:8])[0] try: countSize = 2 input.seek(ifdOffset) ec = input.read(countSize) ifdEntryCount = struct.unpack(boChar + "H", ec)[0] # 2 bytes: TagId + 2 bytes: type + 4 bytes: count of values + 4 # bytes: value offset ifdEntrySize = 12 for i in range(ifdEntryCount): entryOffset = ifdOffset + countSize + i * ifdEntrySize input.seek(entryOffset) tag = input.read(2) tag = struct.unpack(boChar + "H", tag)[0] if(tag == 256 or tag == 257): # if type indicates that value fits into 4 bytes, value # offset is not an offset but value itself type = input.read(2) type = struct.unpack(boChar + "H", type)[0] if type not in tiffTypes: raise UnknownImageFormat( "Unkown TIFF field type:" + str(type)) typeSize = tiffTypes[type][0] typeChar = tiffTypes[type][1] input.seek(entryOffset + 8) value = input.read(typeSize) value = int(struct.unpack(typeChar, value)[0]) if tag == 256: width = value else: height = value if width > -1 and height > -1: break except Exception as e: raise UnknownImageFormat(str(e)) elif size >= 2: # see http://en.wikipedia.org/wiki/ICO_(file_format) imgtype = 'ICO' input.seek(0) reserved = input.read(2) if 0 != struct.unpack("<H", reserved)[0]: raise UnknownImageFormat(FILE_UNKNOWN) format = input.read(2) assert 1 == struct.unpack("<H", format)[0] num = input.read(2) num = struct.unpack("<H", num)[0] if num > 1: import warnings warnings.warn("ICO File contains more than one image") # http://msdn.microsoft.com/en-us/library/ms997538.aspx w = input.read(1) h = input.read(1) width = ord(w) height = ord(h) else: raise UnknownImageFormat(FILE_UNKNOWN) return Image(path=file_path, type=imgtype, file_size=size, width=width, height=height) import unittest class Test_get_image_size(unittest.TestCase): data = [{ 'path': 'lookmanodeps.png', 'width': 251, 'height': 208, 'file_size': 22228, 'type': 'PNG'}] def setUp(self): pass def test_get_image_metadata(self): img = self.data[0] output = get_image_metadata(img['path']) self.assertTrue(output) self.assertEqual(output.path, img['path']) self.assertEqual(output.width, img['width']) self.assertEqual(output.height, img['height']) self.assertEqual(output.type, img['type']) self.assertEqual(output.file_size, img['file_size']) for field in image_fields: self.assertEqual(getattr(output, field), img[field]) def test_get_image_metadata__ENOENT_OSError(self): with self.assertRaises(OSError): get_image_metadata('THIS_DOES_NOT_EXIST') def test_get_image_metadata__not_an_image_UnknownImageFormat(self): with self.assertRaises(UnknownImageFormat): get_image_metadata('README.rst') def test_get_image_size(self): img = self.data[0] output = get_image_size(img['path']) self.assertTrue(output) self.assertEqual(output, (img['width'], img['height'])) def tearDown(self): pass def main(argv=None): """ Print image metadata fields for the given file path. Keyword Arguments: argv (list): commandline arguments (e.g. sys.argv[1:]) Returns: int: zero for OK """ import logging import optparse import sys prs = optparse.OptionParser( usage="%prog [-v|--verbose] [--json|--json-indent] <path0> [<pathN>]", description="Print metadata for the given image paths " "(without image library bindings).") prs.add_option('--json', dest='json', action='store_true') prs.add_option('--json-indent', dest='json_indent', action='store_true') prs.add_option('-v', '--verbose', dest='verbose', action='store_true',) prs.add_option('-q', '--quiet', dest='quiet', action='store_true',) prs.add_option('-t', '--test', dest='run_tests', action='store_true',) argv = list(argv) if argv is not None else sys.argv[1:] (opts, args) = prs.parse_args(args=argv) loglevel = logging.INFO if opts.verbose: loglevel = logging.DEBUG elif opts.quiet: loglevel = logging.ERROR logging.basicConfig(level=loglevel) log = logging.getLogger() log.debug('argv: %r', argv) log.debug('opts: %r', opts) log.debug('args: %r', args) if opts.run_tests: import sys sys.argv = [sys.argv[0]] + args import unittest return unittest.main() output_func = Image.to_str_row if opts.json_indent: import functools output_func = functools.partial(Image.to_str_json, indent=2) elif opts.json: output_func = Image.to_str_json elif opts.verbose: output_func = Image.to_str_row_verbose EX_OK = 0 EX_NOT_OK = 2 if len(args) < 1: prs.print_help() print('') prs.error("You must specify one or more paths to image files") errors = [] for path_arg in args: try: img = get_image_metadata(path_arg) print(output_func(img)) except KeyboardInterrupt: raise except OSError as e: log.error((path_arg, e)) errors.append((path_arg, e)) except Exception as e: log.exception(e) errors.append((path_arg, e)) pass if len(errors): import pprint print("ERRORS", file=sys.stderr) print("======", file=sys.stderr) print(pprint.pformat(errors, indent=2), file=sys.stderr) return EX_NOT_OK return EX_OK if __name__ == "__main__": import sys sys.exit(main(argv=sys.argv[1:]))
12,859
Python
.py
340
25.997059
79
0.505453
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,759
argparse.py
evilhero_mylar/lib/argparse.py
# Author: Steven J. Bethard <steven.bethard@gmail.com>. """Command-line parsing library This module is an optparse-inspired command-line parsing library that: - handles both optional and positional arguments - produces highly informative usage messages - supports parsers that dispatch to sub-parsers The following is a simple usage example that sums integers from the command-line and writes the result to a file:: parser = argparse.ArgumentParser( description='sum the integers at the command line') parser.add_argument( 'integers', metavar='int', nargs='+', type=int, help='an integer to be summed') parser.add_argument( '--log', default=sys.stdout, type=argparse.FileType('w'), help='the file where the sum should be written') args = parser.parse_args() args.log.write('%s' % sum(args.integers)) args.log.close() The module contains the following public classes: - ArgumentParser -- The main entry point for command-line parsing. As the example above shows, the add_argument() method is used to populate the parser with actions for optional and positional arguments. Then the parse_args() method is invoked to convert the args at the command-line into an object with attributes. - ArgumentError -- The exception raised by ArgumentParser objects when there are errors with the parser's actions. Errors raised while parsing the command-line are caught by ArgumentParser and emitted as command-line messages. - FileType -- A factory for defining types of files to be created. As the example above shows, instances of FileType are typically passed as the type= argument of add_argument() calls. - Action -- The base class for parser actions. Typically actions are selected by passing strings like 'store_true' or 'append_const' to the action= argument of add_argument(). However, for greater customization of ArgumentParser actions, subclasses of Action may be defined and passed as the action= argument. - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, ArgumentDefaultsHelpFormatter -- Formatter classes which may be passed as the formatter_class= argument to the ArgumentParser constructor. HelpFormatter is the default, RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser not to change the formatting for help text, and ArgumentDefaultsHelpFormatter adds information about argument defaults to the help. All other classes in this module are considered implementation details. (Also note that HelpFormatter and RawDescriptionHelpFormatter are only considered public as object names -- the API of the formatter objects is still considered an implementation detail.) """ __version__ = '1.1' __all__ = [ 'ArgumentParser', 'ArgumentError', 'ArgumentTypeError', 'FileType', 'HelpFormatter', 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', 'MetavarTypeHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', 'OPTIONAL', 'PARSER', 'REMAINDER', 'SUPPRESS', 'ZERO_OR_MORE', ] import collections as _collections import copy as _copy import os as _os import re as _re import sys as _sys import textwrap as _textwrap from gettext import gettext as _, ngettext def _callable(obj): return hasattr(obj, '__call__') or hasattr(obj, '__bases__') SUPPRESS = '==SUPPRESS==' OPTIONAL = '?' ZERO_OR_MORE = '*' ONE_OR_MORE = '+' PARSER = 'A...' REMAINDER = '...' _UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' # ============================= # Utility functions and classes # ============================= class _AttributeHolder(object): """Abstract base class that provides __repr__. The __repr__ method returns a string in the format:: ClassName(attr=name, attr=name, ...) The attributes are determined either by a class-level attribute, '_kwarg_names', or by inspecting the instance __dict__. """ def __repr__(self): type_name = type(self).__name__ arg_strings = [] for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): arg_strings.append('%s=%r' % (name, value)) return '%s(%s)' % (type_name, ', '.join(arg_strings)) def _get_kwargs(self): return sorted(self.__dict__.items()) def _get_args(self): return [] def _ensure_value(namespace, name, value): if getattr(namespace, name, None) is None: setattr(namespace, name, value) return getattr(namespace, name) # =============== # Formatting Help # =============== class HelpFormatter(object): """Formatter for generating usage messages and argument help strings. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def __init__(self, prog, indent_increment=2, max_help_position=24, width=None): # default setting for width if width is None: try: width = int(_os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position self._width = width self._current_indent = 0 self._level = 0 self._action_max_length = 0 self._root_section = self._Section(self, None) self._current_section = self._root_section self._whitespace_matcher = _re.compile(r'\s+') self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== # Section and indentation methods # =============================== def _indent(self): self._current_indent += self._indent_increment self._level += 1 def _dedent(self): self._current_indent -= self._indent_increment assert self._current_indent >= 0, 'Indent decreased below 0.' self._level -= 1 class _Section(object): def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent self.heading = heading self.items = [] def format_help(self): # format the indented section if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts for func, args in self.items: func(*args) item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() # return nothing if the section was empty if not item_help: return '' # add the heading if the section was non-empty if self.heading is not SUPPRESS and self.heading is not None: current_indent = self.formatter._current_indent heading = '%*s%s:\n' % (current_indent, '', self.heading) else: heading = '' # join the section-initial newline, the heading and the help return join(['\n', heading, item_help, '\n']) def _add_item(self, func, args): self._current_section.items.append((func, args)) # ======================== # Message building methods # ======================== def start_section(self, heading): self._indent() section = self._Section(self, self._current_section, heading) self._add_item(section.format_help, []) self._current_section = section def end_section(self): self._current_section = self._current_section.parent self._dedent() def add_text(self, text): if text is not SUPPRESS and text is not None: self._add_item(self._format_text, [text]) def add_usage(self, usage, actions, groups, prefix=None): if usage is not SUPPRESS: args = usage, actions, groups, prefix self._add_item(self._format_usage, args) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) def add_arguments(self, actions): for action in actions: self.add_argument(action) # ======================= # Help-formatting methods # ======================= def format_help(self): help = self._root_section.format_help() if help: help = self._long_break_matcher.sub('\n\n', help) help = help.strip('\n') + '\n' return help def _join_parts(self, part_strings): return ''.join([part for part in part_strings if part and part is not SUPPRESS]) def _format_usage(self, usage, actions, groups, prefix): if prefix is None: prefix = _('usage: ') # if usage is specified, use that if usage is not None: usage = usage % dict(prog=self._prog) # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: usage = '%(prog)s' % dict(prog=self._prog) # if optionals and positionals are available, calculate usage elif usage is None: prog = '%(prog)s' % dict(prog=self._prog) # split optionals from positionals optionals = [] positionals = [] for action in actions: if action.option_strings: optionals.append(action) else: positionals.append(action) # build full usage string format = self._format_actions_usage action_usage = format(optionals + positionals, groups) usage = ' '.join([s for s in [prog, action_usage] if s]) # wrap the usage parts if it's too long text_width = self._width - self._current_indent if len(prefix) + len(usage) > text_width: # break usage into wrappable parts part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' opt_usage = format(optionals, groups) pos_usage = format(positionals, groups) opt_parts = _re.findall(part_regexp, opt_usage) pos_parts = _re.findall(part_regexp, pos_usage) assert ' '.join(opt_parts) == opt_usage assert ' '.join(pos_parts) == pos_usage # helper for wrapping lines def get_lines(parts, indent, prefix=None): lines = [] line = [] if prefix is not None: line_len = len(prefix) - 1 else: line_len = len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 line.append(part) line_len += len(part) + 1 if line: lines.append(indent + ' '.join(line)) if prefix is not None: lines[0] = lines[0][len(indent):] return lines # if prog is short, follow it with optionals or positionals if len(prefix) + len(prog) <= 0.75 * text_width: indent = ' ' * (len(prefix) + len(prog) + 1) if opt_parts: lines = get_lines([prog] + opt_parts, indent, prefix) lines.extend(get_lines(pos_parts, indent)) elif pos_parts: lines = get_lines([prog] + pos_parts, indent, prefix) else: lines = [prog] # if prog is long, put it on its own line else: indent = ' ' * len(prefix) parts = opt_parts + pos_parts lines = get_lines(parts, indent) if len(lines) > 1: lines = [] lines.extend(get_lines(opt_parts, indent)) lines.extend(get_lines(pos_parts, indent)) lines = [prog] + lines # join lines into usage usage = '\n'.join(lines) # prefix with 'usage:' return '%s%s\n\n' % (prefix, usage) def _format_actions_usage(self, actions, groups): # find group indices and identify actions in groups group_actions = set() inserts = {} for group in groups: try: start = actions.index(group._group_actions[0]) except ValueError: continue else: end = start + len(group._group_actions) if actions[start:end] == group._group_actions: for action in group._group_actions: group_actions.add(action) if not group.required: if start in inserts: inserts[start] += ' [' else: inserts[start] = '[' inserts[end] = ']' else: if start in inserts: inserts[start] += ' (' else: inserts[start] = '(' inserts[end] = ')' for i in range(start + 1, end): inserts[i] = '|' # collect all actions format strings parts = [] for i, action in enumerate(actions): # suppressed arguments are marked with None # remove | separators for suppressed arguments if action.help is SUPPRESS: parts.append(None) if inserts.get(i) == '|': inserts.pop(i) elif inserts.get(i + 1) == '|': inserts.pop(i + 1) # produce all arg strings elif not action.option_strings: default = self._get_default_metavar_for_positional(action) part = self._format_args(action, default) # if it's in a group, strip the outer [] if action in group_actions: if part[0] == '[' and part[-1] == ']': part = part[1:-1] # add the action string to the list parts.append(part) # produce the first way to invoke the option in brackets else: option_string = action.option_strings[0] # if the Optional doesn't take a value, format is: # -s or --long if action.nargs == 0: part = '%s' % option_string # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) # make it look optional if it's not required or in a group if not action.required and action not in group_actions: part = '[%s]' % part # add the action string to the list parts.append(part) # insert things at the necessary indices for i in sorted(inserts, reverse=True): parts[i:i] = [inserts[i]] # join all the action items with spaces text = ' '.join([item for item in parts if item is not None]) # clean up separators for mutually exclusive groups open = r'[\[(]' close = r'[\])]' text = _re.sub(r'(%s) ' % open, r'\1', text) text = _re.sub(r' (%s)' % close, r'\1', text) text = _re.sub(r'%s *%s' % (open, close), r'', text) text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() # return the text return text def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) text_width = self._width - self._current_indent indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' def _format_action(self, action): # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) # ho nelp; start on same line and add a final newline if not action.help: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: tup = self._current_indent, '', action_width, action_header action_header = '%*s%-*s ' % tup indent_first = 0 # long action name; start on the next line else: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup indent_first = help_position # collect the pieces of the action help parts = [action_header] # if there was help for the action, add lines of help text if action.help: help_text = self._expand_help(action) help_lines = self._split_lines(help_text, help_width) parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) for line in help_lines[1:]: parts.append('%*s%s\n' % (help_position, '', line)) # or add a newline if the description doesn't end with one elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): parts.append(self._format_action(subaction)) # return a single string return self._join_parts(parts) def _format_action_invocation(self, action): if not action.option_strings: default = self._get_default_metavar_for_positional(action) metavar, = self._metavar_formatter(action, default)(1) return metavar else: parts = [] # if the Optional doesn't take a value, format is: # -s, --long if action.nargs == 0: parts.extend(action.option_strings) # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) return ', '.join(parts) def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: choice_strs = [str(choice) for choice in action.choices] result = '{%s}' % ','.join(choice_strs) else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result, ) * tuple_size return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: result = '%s' % get_metavar(1) elif action.nargs == OPTIONAL: result = '[%s]' % get_metavar(1) elif action.nargs == ZERO_OR_MORE: result = '[%s [%s ...]]' % get_metavar(2) elif action.nargs == ONE_OR_MORE: result = '%s [%s ...]' % get_metavar(2) elif action.nargs == REMAINDER: result = '...' elif action.nargs == PARSER: result = '%s ...' % get_metavar(1) else: formats = ['%s' for _ in range(action.nargs)] result = ' '.join(formats) % get_metavar(action.nargs) return result def _expand_help(self, action): params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str return self._get_help_string(action) % params def _iter_indented_subactions(self, action): try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() for subaction in get_subactions(): yield subaction self._dedent() def _split_lines(self, text, width): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.wrap(text, width) def _fill_text(self, text, width, indent): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.fill(text, width, initial_indent=indent, subsequent_indent=indent) def _get_help_string(self, action): return action.help def _get_default_metavar_for_optional(self, action): return action.dest.upper() def _get_default_metavar_for_positional(self, action): return action.dest class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _fill_text(self, text, width, indent): return ''.join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): """Help message formatter which retains formatting of all help text. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _split_lines(self, text, width): return text.splitlines() class ArgumentDefaultsHelpFormatter(HelpFormatter): """Help message formatter which adds default values to argument help. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _get_help_string(self, action): help = action.help if '%(default)' not in action.help: if action.default is not SUPPRESS: defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += ' (default: %(default)s)' return help class MetavarTypeHelpFormatter(HelpFormatter): """Help message formatter which uses the argument 'type' as the default metavar value (instead of the argument 'dest') Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _get_default_metavar_for_optional(self, action): return action.type.__name__ def _get_default_metavar_for_positional(self, action): return action.type.__name__ # ===================== # Options and Arguments # ===================== def _get_action_name(argument): if argument is None: return None elif argument.option_strings: return '/'.join(argument.option_strings) elif argument.metavar not in (None, SUPPRESS): return argument.metavar elif argument.dest not in (None, SUPPRESS): return argument.dest else: return None class ArgumentError(Exception): """An error from creating or using an argument (optional or positional). The string value of this exception is the message, augmented with information about the argument that caused it. """ def __init__(self, argument, message): self.argument_name = _get_action_name(argument) self.message = message def __str__(self): if self.argument_name is None: format = '%(message)s' else: format = 'argument %(argument_name)s: %(message)s' return format % dict(message=self.message, argument_name=self.argument_name) class ArgumentTypeError(Exception): """An error from trying to convert a command line string to a type.""" pass # ============== # Action classes # ============== class Action(_AttributeHolder): """Information about how to convert command line strings to Python objects. Action objects are used by an ArgumentParser to represent the information needed to parse a single argument from one or more strings from the command line. The keyword arguments to the Action constructor are also all attributes of Action instances. Keyword Arguments: - option_strings -- A list of command-line option strings which should be associated with this action. - dest -- The name of the attribute to hold the created object(s) - nargs -- The number of command-line arguments that should be consumed. By default, one argument will be consumed and a single value will be produced. Other values include: - N (an integer) consumes N arguments (and produces a list) - '?' consumes zero or one arguments - '*' consumes zero or more arguments (and produces a list) - '+' consumes one or more arguments (and produces a list) Note that the difference between the default and nargs=1 is that with the default, a single value will be produced, while with nargs=1, a list containing a single value will be produced. - const -- The value to be produced if the option is specified and the option uses an action that takes no values. - default -- The value to be produced if the option is not specified. - type -- The type which the command-line arguments should be converted to, should be one of 'string', 'int', 'float', 'complex' or a callable object that accepts a single string argument. If None, 'string' is assumed. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate type, an exception will be raised if it is not a member of this collection. - required -- True if the action must always be specified at the command line. This is only meaningful for optional command-line arguments. - help -- The help string describing the argument. - metavar -- The name to be used for the option's argument with the help string. If None, the 'dest' value will be used as the name. """ def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): self.option_strings = option_strings self.dest = dest self.nargs = nargs self.const = const self.default = default self.type = type self.choices = choices self.required = required self.help = help self.metavar = metavar def _get_kwargs(self): names = [ 'option_strings', 'dest', 'nargs', 'const', 'default', 'type', 'choices', 'help', 'metavar', ] return [(name, getattr(self, name)) for name in names] def __call__(self, parser, namespace, values, option_string=None): raise NotImplementedError(_('.__call__() not defined')) class _StoreAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for store actions must be > 0; if you ' 'have nothing to store, actions such as store ' 'true or store const may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_StoreAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class _StoreConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_StoreConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) class _StoreTrueAction(_StoreConstAction): def __init__(self, option_strings, dest, default=False, required=False, help=None): super(_StoreTrueAction, self).__init__( option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help) class _StoreFalseAction(_StoreConstAction): def __init__(self, option_strings, dest, default=True, required=False, help=None): super(_StoreFalseAction, self).__init__( option_strings=option_strings, dest=dest, const=False, default=default, required=required, help=help) class _AppendAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for append actions must be > 0; if arg ' 'strings are not supplying the value to append, ' 'the append const action may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_AppendAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(values) setattr(namespace, self.dest, items) class _AppendConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_AppendConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(self.const) setattr(namespace, self.dest, items) class _CountAction(Action): def __init__(self, option_strings, dest, default=None, required=False, help=None): super(_CountAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): new_count = _ensure_value(namespace, self.dest, 0) + 1 setattr(namespace, self.dest, new_count) class _HelpAction(Action): def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None): super(_HelpAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): parser.print_help() parser.exit() class _VersionAction(Action): def __init__(self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, help="show program's version number and exit"): super(_VersionAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) self.version = version def __call__(self, parser, namespace, values, option_string=None): version = self.version if version is None: version = parser.version formatter = parser._get_formatter() formatter.add_text(version) parser.exit(message=formatter.format_help()) class _SubParsersAction(Action): class _ChoicesPseudoAction(Action): def __init__(self, name, aliases, help): metavar = dest = name if aliases: metavar += ' (%s)' % ', '.join(aliases) sup = super(_SubParsersAction._ChoicesPseudoAction, self) sup.__init__(option_strings=[], dest=dest, help=help, metavar=metavar) def __init__(self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None): self._prog_prefix = prog self._parser_class = parser_class self._name_parser_map = _collections.OrderedDict() self._choices_actions = [] super(_SubParsersAction, self).__init__( option_strings=option_strings, dest=dest, nargs=PARSER, choices=self._name_parser_map, help=help, metavar=metavar) def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: kwargs['prog'] = '%s %s' % (self._prog_prefix, name) aliases = kwargs.pop('aliases', ()) # create a pseudo-action to hold the choice help if 'help' in kwargs: help = kwargs.pop('help') choice_action = self._ChoicesPseudoAction(name, aliases, help) self._choices_actions.append(choice_action) # create the parser and add it to the map parser = self._parser_class(**kwargs) self._name_parser_map[name] = parser # make parser available under aliases also for alias in aliases: self._name_parser_map[alias] = parser return parser def _get_subactions(self): return self._choices_actions def __call__(self, parser, namespace, values, option_string=None): parser_name = values[0] arg_strings = values[1:] # set the parser name if requested if self.dest is not SUPPRESS: setattr(namespace, self.dest, parser_name) # select the parser try: parser = self._name_parser_map[parser_name] except KeyError: args = {'parser_name': parser_name, 'choices': ', '.join(self._name_parser_map)} msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args raise ArgumentError(self, msg) # parse all the remaining options into the namespace # store any unrecognized options on the object, so that the top # level parser can decide what to do with them namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) if arg_strings: vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) # ============== # Type classes # ============== class FileType(object): """Factory for creating file object types Instances of FileType are typically passed as type= arguments to the ArgumentParser add_argument() method. Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin open() function. """ def __init__(self, mode='r', bufsize=-1): self._mode = mode self._bufsize = bufsize def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return _sys.stdin elif 'w' in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r') % self._mode raise ValueError(msg) # all other arguments are used as file names try: return open(string, self._mode, self._bufsize) except IOError as e: message = _("can't open '%s': %s") raise ArgumentTypeError(message % (string, e)) def __repr__(self): args = self._mode, self._bufsize args_str = ', '.join(repr(arg) for arg in args if arg != -1) return '%s(%s)' % (type(self).__name__, args_str) # =========================== # Optional and Positional Parsing # =========================== class Namespace(_AttributeHolder): """Simple object for storing attributes. Implements equality by attribute names and values, and provides a simple string representation. """ def __init__(self, **kwargs): for name in kwargs: setattr(self, name, kwargs[name]) def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not (self == other) def __contains__(self, key): return key in self.__dict__ class _ActionsContainer(object): def __init__(self, description, prefix_chars, argument_default, conflict_handler): super(_ActionsContainer, self).__init__() self.description = description self.argument_default = argument_default self.prefix_chars = prefix_chars self.conflict_handler = conflict_handler # set up registries self._registries = {} # register actions self.register('action', None, _StoreAction) self.register('action', 'store', _StoreAction) self.register('action', 'store_const', _StoreConstAction) self.register('action', 'store_true', _StoreTrueAction) self.register('action', 'store_false', _StoreFalseAction) self.register('action', 'append', _AppendAction) self.register('action', 'append_const', _AppendConstAction) self.register('action', 'count', _CountAction) self.register('action', 'help', _HelpAction) self.register('action', 'version', _VersionAction) self.register('action', 'parsers', _SubParsersAction) # raise an exception if the conflict handler is invalid self._get_handler() # action storage self._actions = [] self._option_string_actions = {} # groups self._action_groups = [] self._mutually_exclusive_groups = [] # defaults storage self._defaults = {} # determines whether an "option" looks like a negative number self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') # whether or not there are any optionals that look like negative # numbers -- uses a list so it can be shared and edited self._has_negative_number_optionals = [] # ==================== # Registration methods # ==================== def register(self, registry_name, value, object): registry = self._registries.setdefault(registry_name, {}) registry[value] = object def _registry_get(self, registry_name, value, default=None): return self._registries[registry_name].get(value, default) # ================================== # Namespace default accessor methods # ================================== def set_defaults(self, **kwargs): self._defaults.update(kwargs) # if these defaults match any existing arguments, replace # the previous default on the object with the new one for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest] def get_default(self, dest): for action in self._actions: if action.dest == dest and action.default is not None: return action.default return self._defaults.get(dest, None) # ======================= # Adding argument actions # ======================= def add_argument(self, *args, **kwargs): """ add_argument(dest, ..., name=value, ...) add_argument(option_string, option_string, ..., name=value, ...) """ # if no positional args are supplied or only one is supplied and # it doesn't look like an option string, parse a positional # argument chars = self.prefix_chars if not args or len(args) == 1 and args[0][0] not in chars: if args and 'dest' in kwargs: raise ValueError('dest supplied twice for positional argument') kwargs = self._get_positional_kwargs(*args, **kwargs) # otherwise, we're adding an optional argument else: kwargs = self._get_optional_kwargs(*args, **kwargs) # if no default was supplied, use the parser-level default if 'default' not in kwargs: dest = kwargs['dest'] if dest in self._defaults: kwargs['default'] = self._defaults[dest] elif self.argument_default is not None: kwargs['default'] = self.argument_default # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) if not _callable(action_class): raise ValueError('unknown action "%s"' % (action_class,)) action = action_class(**kwargs) # raise an error if the action type is not callable type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): raise ValueError('%r is not callable' % (type_func,)) # raise an error if the metavar does not match the type if hasattr(self, "_get_formatter"): try: self._get_formatter()._format_args(action, None) except TypeError: raise ValueError("length of metavar tuple does not match nargs") return self._add_action(action) def add_argument_group(self, *args, **kwargs): group = _ArgumentGroup(self, *args, **kwargs) self._action_groups.append(group) return group def add_mutually_exclusive_group(self, **kwargs): group = _MutuallyExclusiveGroup(self, **kwargs) self._mutually_exclusive_groups.append(group) return group def _add_action(self, action): # resolve any conflicts self._check_conflict(action) # add to actions list self._actions.append(action) action.container = self # index the action by any option strings it has for option_string in action.option_strings: self._option_string_actions[option_string] = action # set the flag if any option strings look like negative numbers for option_string in action.option_strings: if self._negative_number_matcher.match(option_string): if not self._has_negative_number_optionals: self._has_negative_number_optionals.append(True) # return the created action return action def _remove_action(self, action): self._actions.remove(action) def _add_container_actions(self, container): # collect groups by titles title_group_map = {} for group in self._action_groups: if group.title in title_group_map: msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, conflict_handler=group.conflict_handler) # map the actions to their new group for action in group._group_actions: group_map[action] = title_group_map[group.title] # add container's mutually exclusive groups # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: mutex_group = self.add_mutually_exclusive_group( required=group.required) # map the actions to their new mutex group for action in group._group_actions: group_map[action] = mutex_group # add all actions to this container or their group for action in container._actions: group_map.get(action, self)._add_action(action) def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified if 'required' in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: kwargs['required'] = True if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: kwargs['required'] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[]) def _get_optional_kwargs(self, *args, **kwargs): # determine short and long option strings option_strings = [] long_option_strings = [] for option_string in args: # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: args = {'option': option_string, 'prefix_chars': self.prefix_chars} msg = _('invalid option string %(option)r: ' 'must start with a character %(prefix_chars)r') raise ValueError(msg % args) # strings starting with two prefix characters are long options option_strings.append(option_string) if option_string[0] in self.prefix_chars: if len(option_string) > 1: if option_string[1] in self.prefix_chars: long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' dest = kwargs.pop('dest', None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] else: dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) if not dest: msg = _('dest= is required for options like %r') raise ValueError(msg % option_string) dest = dest.replace('-', '_') # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings) def _pop_action_class(self, kwargs, default=None): action = kwargs.pop('action', default) return self._registry_get('action', action, action) def _get_handler(self): # determine function from conflict handler string handler_func_name = '_handle_conflict_%s' % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: msg = _('invalid conflict_resolution value: %r') raise ValueError(msg % self.conflict_handler) def _check_conflict(self, action): # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: if option_string in self._option_string_actions: confl_optional = self._option_string_actions[option_string] confl_optionals.append((option_string, confl_optional)) # resolve any conflicts if confl_optionals: conflict_handler = self._get_handler() conflict_handler(action, confl_optionals) def _handle_conflict_error(self, action, conflicting_actions): message = ngettext('conflicting option string: %s', 'conflicting option strings: %s', len(conflicting_actions)) conflict_string = ', '.join([option_string for option_string, action in conflicting_actions]) raise ArgumentError(action, message % conflict_string) def _handle_conflict_resolve(self, action, conflicting_actions): # remove all conflicting options for option_string, action in conflicting_actions: # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) # if the option now has no option string, remove it from the # container holding it if not action.option_strings: action.container._remove_action(action) class _ArgumentGroup(_ActionsContainer): def __init__(self, container, title=None, description=None, **kwargs): # add any missing keyword arguments by checking the container update = kwargs.setdefault update('conflict_handler', container.conflict_handler) update('prefix_chars', container.prefix_chars) update('argument_default', container.argument_default) super_init = super(_ArgumentGroup, self).__init__ super_init(description=description, **kwargs) # group attributes self.title = title self._group_actions = [] # share most attributes with the container self._registries = container._registries self._actions = container._actions self._option_string_actions = container._option_string_actions self._defaults = container._defaults self._has_negative_number_optionals = \ container._has_negative_number_optionals self._mutually_exclusive_groups = container._mutually_exclusive_groups def _add_action(self, action): action = super(_ArgumentGroup, self)._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): super(_ArgumentGroup, self)._remove_action(action) self._group_actions.remove(action) class _MutuallyExclusiveGroup(_ArgumentGroup): def __init__(self, container, required=False): super(_MutuallyExclusiveGroup, self).__init__(container) self.required = required self._container = container def _add_action(self, action): if action.required: msg = _('mutually exclusive arguments must be optional') raise ValueError(msg) action = self._container._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): self._container._remove_action(action) self._group_actions.remove(action) class ArgumentParser(_AttributeHolder, _ActionsContainer): """Object for parsing command line strings into Python objects. Keyword Arguments: - prog -- The name of the program (default: sys.argv[0]) - usage -- A usage message (default: auto-generated from arguments) - description -- A description of what the program does - epilog -- Text following the argument descriptions - parents -- Parsers whose arguments should be copied into this one - formatter_class -- HelpFormatter class for printing help messages - prefix_chars -- Characters that prefix optional arguments - fromfile_prefix_chars -- Characters that prefix files containing additional arguments - argument_default -- The default value for all arguments - conflict_handler -- String indicating how to handle conflicts - add_help -- Add a -h/-help option """ def __init__(self, prog=None, usage=None, description=None, epilog=None, version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True): if version is not None: import warnings warnings.warn( """The "version" argument to ArgumentParser is deprecated. """ """Please use """ """"add_argument(..., action='version', version="N", ...)" """ """instead""", DeprecationWarning) superinit = super(ArgumentParser, self).__init__ superinit(description=description, prefix_chars=prefix_chars, argument_default=argument_default, conflict_handler=conflict_handler) # default setting for prog if prog is None: prog = _os.path.basename(_sys.argv[0]) self.prog = prog self.usage = usage self.epilog = epilog self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help add_group = self.add_argument_group self._positionals = add_group(_('positional arguments')) self._optionals = add_group(_('optional arguments')) self._subparsers = None # register types def identity(string): return string self.register('type', None, identity) # add help and version arguments if necessary # (using explicit default to override global argument_default) default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] if self.add_help: self.add_argument( default_prefix+'h', default_prefix*2+'help', action='help', default=SUPPRESS, help=_('show this help message and exit')) if self.version: self.add_argument( default_prefix+'v', default_prefix*2+'version', action='version', default=SUPPRESS, version=self.version, help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: self._add_container_actions(parent) try: defaults = parent._defaults except AttributeError: pass else: self._defaults.update(defaults) # ======================= # Pretty __repr__ methods # ======================= def _get_kwargs(self): names = [ 'prog', 'usage', 'description', 'version', 'formatter_class', 'conflict_handler', 'add_help', ] return [(name, getattr(self, name)) for name in names] # ================================== # Optional/Positional adding methods # ================================== def add_subparsers(self, **kwargs): if self._subparsers is not None: self.error(_('cannot have multiple subparser arguments')) # add the parser class to the arguments if it's not present kwargs.setdefault('parser_class', type(self)) if 'title' in kwargs or 'description' in kwargs: title = _(kwargs.pop('title', 'subcommands')) description = _(kwargs.pop('description', None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kwargs.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(self.usage, positionals, groups, '') kwargs['prog'] = formatter.format_help().strip() # create the parsers action and add it to the positionals list parsers_class = self._pop_action_class(kwargs, 'parsers') action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) # return the created parsers action return action def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action def _get_optional_actions(self): return [action for action in self._actions if action.option_strings] def _get_positional_actions(self): return [action for action in self._actions if not action.option_strings] # ===================================== # Command line argument parsing methods # ===================================== def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args def parse_known_args(self, args=None, namespace=None): # args default to the system args if args is None: args = _sys.argv[1:] # default Namespace built from parser defaults if namespace is None: namespace = Namespace() # add any action defaults that aren't present for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: default = action.default if isinstance(action.default, str): default = self._get_value(action, default) setattr(namespace, action.dest, default) # add any parser defaults that aren't present for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) # parse the arguments and exit if there are any errors try: namespace, args = self._parse_known_args(args, namespace) if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) return namespace, args except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) def _parse_known_args(self, arg_strings, namespace): # replace arg strings that are file references if self.fromfile_prefix_chars is not None: arg_strings = self._read_args_from_files(arg_strings) # map all mutually exclusive arguments to the other arguments # they can't occur with action_conflicts = {} for mutex_group in self._mutually_exclusive_groups: group_actions = mutex_group._group_actions for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) conflicts.extend(group_actions[i + 1:]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, # an 'A' if there is an argument, or a '-' if there is a '--' option_string_indices = {} arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): # all args after -- are non-options if arg_string == '--': arg_string_pattern_parts.append('-') for arg_string in arg_strings_iter: arg_string_pattern_parts.append('A') # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: pattern = 'A' else: option_string_indices[i] = option_tuple pattern = 'O' arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern arg_strings_pattern = ''.join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = set() seen_non_default_actions = set() def take_action(action, argument_strings, option_string=None): seen_actions.add(action) argument_values = self._get_values(action, argument_strings) # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" if argument_values is not action.default: seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: msg = _('not allowed with argument %s') action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) # take the action if we didn't receive a SUPPRESS value # (e.g. from a default) if argument_values is not SUPPRESS: action(self, namespace, argument_values, option_string) # function to convert arg_strings into an optional action def consume_optional(start_index): # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple # identify additional optionals in the same arg string # (e.g. -xyz is the same as -x -y -z if no args are required) match_argument = self._match_argument action_tuples = [] while True: # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) return start_index + 1 # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: arg_count = match_argument(action, 'A') # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars if arg_count == 0 and option_string[1] not in chars: action_tuples.append((action, [], option_string)) char = option_string[0] option_string = char + explicit_arg[0] new_explicit_arg = explicit_arg[1:] or None optionals_map = self._option_string_actions if option_string in optionals_map: action = optionals_map[option_string] explicit_arg = new_explicit_arg else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've # successfully matched the option; exit the loop elif arg_count == 1: stop = start_index + 1 args = [explicit_arg] action_tuples.append((action, args, option_string)) break # error if a double-dash option did not use the # explicit argument else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the # optional's string arguments with the following strings # if successful, exit the loop else: start = start_index + 1 selected_patterns = arg_strings_pattern[start:] arg_count = match_argument(action, selected_patterns) stop = start + arg_count args = arg_strings[start:stop] action_tuples.append((action, args, option_string)) break # add the Optional to the list and return the index at which # the Optional's string args stopped assert action_tuples for action, args, option_string in action_tuples: take_action(action, args, option_string) return stop # the list of Positionals left to be parsed; this is modified # by consume_positionals() positionals = self._get_positional_actions() # function to convert arg_strings into positional actions def consume_positionals(start_index): # match as many Positionals as possible match_partial = self._match_arguments_partial selected_pattern = arg_strings_pattern[start_index:] arg_counts = match_partial(positionals, selected_pattern) # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): args = arg_strings[start_index: start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped positionals[:] = positionals[len(arg_counts):] return start_index # consume Positionals and Optionals alternately, until we have # passed the last option string extras = [] start_index = 0 if option_string_indices: max_option_string_index = max(option_string_indices) else: max_option_string_index = -1 while start_index <= max_option_string_index: # consume any Positionals preceding the next option next_option_string_index = min([ index for index in option_string_indices if index >= start_index]) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) # only try to parse the next optional if we didn't consume # the option string during the positionals parsing if positionals_end_index > start_index: start_index = positionals_end_index continue else: start_index = positionals_end_index # if we consumed all the positionals we could and we're not # at the index of an option string, there were extra arguments if start_index not in option_string_indices: strings = arg_strings[start_index:next_option_string_index] extras.extend(strings) start_index = next_option_string_index # consume the next optional and any arguments for it start_index = consume_optional(start_index) # consume any positionals following the last Optional stop_index = consume_positionals(start_index) # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) # make sure all required actions were present required_actions = [_get_action_name(action) for action in self._actions if action.required and action not in seen_actions] if required_actions: self.error(_('the following arguments are required: %s') % ', '.join(required_actions)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: for action in group._group_actions: if action in seen_non_default_actions: break # if no actions were used, report the error else: names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS] msg = _('one of the arguments %s is required') self.error(msg % ' '.join(names)) # return the updated namespace and the extra arguments return namespace, extras def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: args_file = open(arg_string[1:]) try: arg_strings = [] for arg_line in args_file.read().splitlines(): for arg in self.convert_arg_line_to_args(arg_line): arg_strings.append(arg) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) finally: args_file.close() except IOError: err = _sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings def convert_arg_line_to_args(self, arg_line): return [arg_line] def _match_argument(self, action, arg_strings_pattern): # match the pattern for this action to the arg strings nargs_pattern = self._get_nargs_pattern(action) match = _re.match(nargs_pattern, arg_strings_pattern) # raise an exception if we weren't able to find a match if match is None: nargs_errors = { None: _('expected one argument'), OPTIONAL: _('expected at most one argument'), ONE_OR_MORE: _('expected at least one argument'), } default = ngettext('expected %s argument', 'expected %s arguments', action.nargs) % action.nargs msg = nargs_errors.get(action.nargs, default) raise ArgumentError(action, msg) # return the number of arguments matched return len(match.group(1)) def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the # final actions until we find a match result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] pattern = ''.join([self._get_nargs_pattern(action) for action in actions_slice]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) break # return the list of arg string counts return result def _parse_optional(self, arg_string): # if it's an empty string, it was meant to be a positional if not arg_string: return None # if it doesn't start with a prefix, it was meant to be positional if not arg_string[0] in self.prefix_chars: return None # if the option string is present in the parser, return the action if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] return action, arg_string, None # if it's just a single character, it was meant to be positional if len(arg_string) == 1: return None # if the option string before the "=" is present, return the action if '=' in arg_string: option_string, explicit_arg = arg_string.split('=', 1) if option_string in self._option_string_actions: action = self._option_string_actions[option_string] return action, option_string, explicit_arg # search through all possible prefixes of the option string # and all actions in the parser for possible interpretations option_tuples = self._get_option_tuples(arg_string) # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: options = ', '.join([option_string for action, option_string, explicit_arg in option_tuples]) args = {'option': arg_string, 'matches': options} msg = _('ambiguous option: %(option)s could match %(matches)s') self.error(msg % args) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: option_tuple, = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative # number, it was meant to be positional # unless there are negative-number-like options if self._negative_number_matcher.match(arg_string): if not self._has_negative_number_optionals: return None # if it contains a space, it was meant to be a positional if ' ' in arg_string: return None # it was meant to be an optional but there is no such option # in this parser (though it might be a valid option in a subparser) return None, arg_string, None def _get_option_tuples(self, option_string): result = [] # option strings starting with two prefix characters are only # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: if '=' in option_string: option_prefix, explicit_arg = option_string.split('=', 1) else: option_prefix = option_string explicit_arg = None for option_string in self._option_string_actions: if option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # single character options can be concatenated with their arguments # but multiple character options always have to have their argument # separate elif option_string[0] in chars and option_string[1] not in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] short_explicit_arg = option_string[2:] for option_string in self._option_string_actions: if option_string == short_option_prefix: action = self._option_string_actions[option_string] tup = action, option_string, short_explicit_arg result.append(tup) elif option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) # return the collected option tuples return result def _get_nargs_pattern(self, action): # in all examples below, we have to allow for '--' args # which are represented as '-' in the pattern nargs = action.nargs # the default (None) is assumed to be a single argument if nargs is None: nargs_pattern = '(-*A-*)' # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' # allow any number of options or arguments elif nargs == REMAINDER: nargs_pattern = '([-AO]*)' # allow one argument followed by any number of options or arguments elif nargs == PARSER: nargs_pattern = '(-*A[-AO]*)' # all others should be integers else: nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) # if this is an optional action, -- is not allowed if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') # return the pattern return nargs_pattern # ======================== # Value conversion methods # ======================== def _get_values(self, action, arg_strings): # for everything but PARSER args, strip out '--' if action.nargs not in [PARSER, REMAINDER]: arg_strings = [s for s in arg_strings if s != '--'] # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: if action.option_strings: value = action.const else: value = action.default if isinstance(value, str): value = self._get_value(action, value) self._check_value(action, value) # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None elif (not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings): if action.default is not None: value = action.default else: value = arg_strings self._check_value(action, value) # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) # REMAINDER arguments convert all values, checking none elif action.nargs == REMAINDER: value = [self._get_value(action, v) for v in arg_strings] # PARSER arguments convert all values, but check only the first elif action.nargs == PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: self._check_value(action, v) # return the converted value return value def _get_value(self, action, arg_string): type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): msg = _('%r is not callable') raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type try: result = type_func(arg_string) # ArgumentTypeErrors indicate errors except ArgumentTypeError: name = getattr(action.type, '__name__', repr(action.type)) msg = str(_sys.exc_info()[1]) raise ArgumentError(action, msg) # TypeErrors or ValueErrors also indicate errors except (TypeError, ValueError): name = getattr(action.type, '__name__', repr(action.type)) args = {'type': name, 'value': arg_string} msg = _('invalid %(type)s value: %(value)r') raise ArgumentError(action, msg % args) # return the converted value return result def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: args = {'value': value, 'choices': ', '.join(map(repr, action.choices))} msg = _('invalid choice: %(value)r (choose from %(choices)s)') raise ArgumentError(action, msg % args) # ======================= # Help-formatting methods # ======================= def format_usage(self): formatter = self._get_formatter() formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) return formatter.format_help() def format_help(self): formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def format_version(self): import warnings warnings.warn( 'The format_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) formatter = self._get_formatter() formatter.add_text(self.version) return formatter.format_help() def _get_formatter(self): return self.formatter_class(prog=self.prog) # ===================== # Help-printing methods # ===================== def print_usage(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_usage(), file) def print_help(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_help(), file) def print_version(self, file=None): import warnings warnings.warn( 'The print_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) self._print_message(self.format_version(), file) def _print_message(self, message, file=None): if message: if file is None: file = _sys.stderr file.write(message) # =============== # Exiting methods # =============== def exit(self, status=0, message=None): if message: self._print_message(message, _sys.stderr) _sys.exit(status) def error(self, message): """error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(_sys.stderr) args = {'prog': self.prog, 'message': message} self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
89,553
Python
.py
1,959
33.760082
83
0.575114
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,760
MultipartPostHandler.py
evilhero_mylar/lib/MultipartPostHandler.py
#!/usr/bin/python #### # 06/2010 Nic Wolfe <nic@wolfeden.ca> # 02/2006 Will Holcomb <wholcomb@gmail.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # import urllib import urllib2 import mimetools, mimetypes import os, sys # Controls how sequences are uncoded. If true, elements may be given multiple values by # assigning a sequence. doseq = 1 class MultipartPostHandler(urllib2.BaseHandler): handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first def http_request(self, request): data = request.get_data() if data is not None and type(data) != str: v_files = [] v_vars = [] try: for(key, value) in data.items(): if type(value) in (file, list, tuple): v_files.append((key, value)) else: v_vars.append((key, value)) except TypeError: systype, value, traceback = sys.exc_info() raise TypeError, "not a valid non-string sequence or mapping object", traceback if len(v_files) == 0: data = urllib.urlencode(v_vars, doseq) else: boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files) contenttype = 'multipart/form-data; boundary=%s' % boundary if(request.has_header('Content-Type') and request.get_header('Content-Type').find('multipart/form-data') != 0): print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data') request.add_unredirected_header('Content-Type', contenttype) request.add_data(data) return request @staticmethod def multipart_encode(vars, files, boundary = None, buffer = None): if boundary is None: boundary = mimetools.choose_boundary() if buffer is None: buffer = '' for(key, value) in vars: buffer += '--%s\r\n' % boundary buffer += 'Content-Disposition: form-data; name="%s"' % key buffer += '\r\n\r\n' + value + '\r\n' for(key, fd) in files: # allow them to pass in a file or a tuple with name & data if type(fd) == file: name_in = fd.name fd.seek(0) data_in = fd.read() elif type(fd) in (tuple, list): name_in, data_in = fd filename = os.path.basename(name_in) contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' buffer += '--%s\r\n' % boundary buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename) buffer += 'Content-Type: %s\r\n' % contenttype # buffer += 'Content-Length: %s\r\n' % file_size buffer += '\r\n' + data_in + '\r\n' buffer += '--%s--\r\n\r\n' % boundary return boundary, buffer https_request = http_request
3,642
Python
.py
77
35.883117
111
0.581342
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,761
configobj.py
evilhero_mylar/lib/configobj.py
# configobj.py # A config file reader/writer that supports nested sections in config files. # Copyright (C) 2005-2010 Michael Foord, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # nico AT tekNico DOT net # ConfigObj 4 # http://www.voidspace.org.uk/python/configobj.html # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. from __future__ import generators import os import re import sys from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE # imported lazily to avoid startup performance hit if it isn't used compiler = None # A dictionary mapping BOM to # the encoding to decode with, and what to set the # encoding attribute to. BOMS = { BOM_UTF8: ('utf_8', None), BOM_UTF16_BE: ('utf16_be', 'utf_16'), BOM_UTF16_LE: ('utf16_le', 'utf_16'), BOM_UTF16: ('utf_16', 'utf_16'), } # All legal variants of the BOM codecs. # TODO: the list of aliases is not meant to be exhaustive, is there a # better way ? BOM_LIST = { 'utf_16': 'utf_16', 'u16': 'utf_16', 'utf16': 'utf_16', 'utf-16': 'utf_16', 'utf16_be': 'utf16_be', 'utf_16_be': 'utf16_be', 'utf-16be': 'utf16_be', 'utf16_le': 'utf16_le', 'utf_16_le': 'utf16_le', 'utf-16le': 'utf16_le', 'utf_8': 'utf_8', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf-8': 'utf_8', } # Map of encodings to the BOM to write. BOM_SET = { 'utf_8': BOM_UTF8, 'utf_16': BOM_UTF16, 'utf16_be': BOM_UTF16_BE, 'utf16_le': BOM_UTF16_LE, None: BOM_UTF8 } def match_utf8(encoding): return BOM_LIST.get(encoding.lower()) == 'utf_8' # Quote strings used for writing values squot = "'%s'" dquot = '"%s"' noquot = "%s" wspace_plus = ' \r\n\v\t\'"' tsquot = '"""%s"""' tdquot = "'''%s'''" # Sentinel for use in getattr calls to replace hasattr MISSING = object() __version__ = '4.7.2' try: any except NameError: def any(iterable): for entry in iterable: if entry: return True return False __all__ = ( '__version__', 'DEFAULT_INDENT_TYPE', 'DEFAULT_INTERPOLATION', 'ConfigObjError', 'NestingError', 'ParseError', 'DuplicateError', 'ConfigspecError', 'ConfigObj', 'SimpleVal', 'InterpolationError', 'InterpolationLoopError', 'MissingInterpolationOption', 'RepeatSectionError', 'ReloadError', 'UnreprError', 'UnknownType', 'flatten_errors', 'get_extra_values' ) DEFAULT_INTERPOLATION = 'configparser' DEFAULT_INDENT_TYPE = ' ' MAX_INTERPOL_DEPTH = 10 OPTION_DEFAULTS = { 'interpolation': True, 'raise_errors': False, 'list_values': True, 'create_empty': False, 'file_error': False, 'configspec': None, 'stringify': True, # option may be set to one of ('', ' ', '\t') 'indent_type': None, 'encoding': None, 'default_encoding': None, 'unrepr': False, 'write_empty_values': False, } def getObj(s): global compiler if compiler is None: import compiler s = "a=" + s p = compiler.parse(s) return p.getChildren()[1].getChildren()[0].getChildren()[1] class UnknownType(Exception): pass class Builder(object): def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = map(self.build_Const, o.getChildren()) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s return _builder.build(getObj(s)) class ConfigObjError(SyntaxError): """ This is the base class for all errors that ConfigObj raises. It is a subclass of SyntaxError. """ def __init__(self, message='', line_number=None, line=''): self.line = line self.line_number = line_number SyntaxError.__init__(self, message) class NestingError(ConfigObjError): """ This error indicates a level of nesting that doesn't match. """ class ParseError(ConfigObjError): """ This error indicates that a line is badly written. It is neither a valid ``key = value`` line, nor a valid section marker line. """ class ReloadError(IOError): """ A 'reload' operation failed. This exception is a subclass of ``IOError``. """ def __init__(self): IOError.__init__(self, 'reload failed, filename is not set.') class DuplicateError(ConfigObjError): """ The keyword or section specified already exists. """ class ConfigspecError(ConfigObjError): """ An error occured whilst parsing a configspec. """ class InterpolationError(ConfigObjError): """Base class for the two interpolation errors.""" class InterpolationLoopError(InterpolationError): """Maximum interpolation depth exceeded in string interpolation.""" def __init__(self, option): InterpolationError.__init__( self, 'interpolation loop detected in value "%s".' % option) class RepeatSectionError(ConfigObjError): """ This error indicates additional sections in a section with a ``__many__`` (repeated) section. """ class MissingInterpolationOption(InterpolationError): """A value specified for interpolation was missing.""" def __init__(self, option): msg = 'missing option "%s" in interpolation.' % option InterpolationError.__init__(self, msg) class UnreprError(ConfigObjError): """An error parsing in unrepr mode.""" class InterpolationEngine(object): """ A helper class to help perform string interpolation. This class is an abstract base class; its descendants perform the actual work. """ # compiled regexp to use in self.interpolate() _KEYCRE = re.compile(r"%\(([^)]*)\)s") _cookie = '%' def __init__(self, section): # the Section instance that "owns" this engine self.section = section def interpolate(self, key, value): # short-cut if not self._cookie in value: return value def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. ``value``: the string we're trying to interpolate. ``section``: the section in which that string was found ``backtrail``: a dict to keep track of where we've been, to detect and prevent infinite recursion loops This is similar to a depth-first-search algorithm. """ # Have we been here already? if (key, section.name) in backtrail: # Yes - infinite loop detected raise InterpolationLoopError(key) # Place a marker on our backtrail so we won't come back here again backtrail[(key, section.name)] = 1 # Now start the actual work match = self._KEYCRE.search(value) while match: # The actual parsing of the match is implementation-dependent, # so delegate to our helper function k, v, s = self._parse_match(match) if k is None: # That's the signal that no further interpolation is needed replacement = v else: # Further interpolation may be needed to obtain final value replacement = recursive_interpolate(k, v, s, backtrail) # Replace the matched string with its final value start, end = match.span() value = ''.join((value[:start], replacement, value[end:])) new_search_start = start + len(replacement) # Pick up the next interpolation key, if any, for next time # through the while loop match = self._KEYCRE.search(value, new_search_start) # Now safe to come back here again; remove marker from backtrail del backtrail[(key, section.name)] return value # Back in interpolate(), all we have to do is kick off the recursive # function with appropriate starting values value = recursive_interpolate(key, value, self.section, {}) return value def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None and not isinstance(val, Section): break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None and not isinstance(val, Section): break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section def _parse_match(self, match): """Implementation-dependent helper function. Will be passed a match object corresponding to the interpolation key we just found (e.g., "%(foo)s" or "$foo"). Should look up that key in the appropriate config file section (using the ``_fetch()`` helper function) and return a 3-tuple: (key, value, section) ``key`` is the name of the key we're looking for ``value`` is the value found for that key ``section`` is a reference to the section where it was found ``key`` and ``section`` should be None if no further interpolation should be performed on the resulting value (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() class ConfigParserInterpolation(InterpolationEngine): """Behaves like ConfigParser.""" _cookie = '%' _KEYCRE = re.compile(r"%\(([^)]*)\)s") def _parse_match(self, match): key = match.group(1) value, section = self._fetch(key) return key, value, section class TemplateInterpolation(InterpolationEngine): """Behaves like string.Template.""" _cookie = '$' _delimiter = '$' _KEYCRE = re.compile(r""" \$(?: (?P<escaped>\$) | # Two $ signs (?P<named>[_a-z][_a-z0-9]*) | # $name format {(?P<braced>[^}]*)} # ${name} format ) """, re.IGNORECASE | re.VERBOSE) def _parse_match(self, match): # Valid name (in or out of braces): fetch value from section key = match.group('named') or match.group('braced') if key is not None: value, section = self._fetch(key) return key, value, section # Escaped delimiter (e.g., $$): return single delimiter if match.group('escaped') is not None: # Return None for key and section to indicate it's time to stop return None, self._delimiter, None # Anything else: ignore completely, just return it unchanged return None, match.group(), None interpolation_engines = { 'configparser': ConfigParserInterpolation, 'template': TemplateInterpolation, } def __newobj__(cls, *args): # Hack for pickle return cls.__new__(cls, *args) class Section(dict): """ A dictionary-like object that represents a section in a config file. It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. Iteration follows the order: scalars, then sections. """ def __setstate__(self, state): dict.update(self, state[0]) self.__dict__.update(state[1]) def __reduce__(self): state = (dict(self), self.__dict__) return (__newobj__, (self.__class__,), state) def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above * depth is the depth level of this section * main is the main ConfigObj * indict is a dictionary to initialise the section with """ if indict is None: indict = {} dict.__init__(self) # used for nesting level *and* interpolation self.parent = parent # used for the interpolation attribute self.main = main # level of nesting depth of this Section self.depth = depth # purely for information self.name = name # self._initialise() # we do this explicitly so that __setitem__ is used properly # (rather than just passing to ``dict.__init__``) for entry, value in indict.iteritems(): self[entry] = value def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] # the sequence of sections in this Section self.sections = [] # for comments :-) self.comments = {} self.inline_comments = {} # the configspec self.configspec = None # for defaults self.defaults = [] self.default_values = {} self.extra_values = [] self._created = False def _interpolate(self, key, value): try: # do we already have an interpolation engine? engine = self._interpolation_engine except AttributeError: # not yet: first time running _interpolate(), so pick the engine name = self.main.interpolation if name == True: # note that "if name:" would be incorrect here # backwards-compatibility: interpolation=True means use default name = DEFAULT_INTERPOLATION name = name.lower() # so that "Template", "template", etc. all work class_ = interpolation_engines.get(name, None) if class_ is None: # invalid value for self.main.interpolation self.main.interpolation = False return value else: # save reference to engine so we don't have to do this again engine = self._interpolation_engine = class_(self) # let the engine do the actual work return engine.interpolate(key, value) def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) if self.main.interpolation: if isinstance(val, basestring): return self._interpolate(key, val) if isinstance(val, list): def _check(entry): if isinstance(entry, basestring): return self._interpolate(key, entry) return entry new = [_check(entry) for entry in val] if new != val: return new return val def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. ``unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, basestring): raise ValueError('The key "%s" is not a string.' % key) # add the comment if key not in self.comments: self.comments[key] = [] self.inline_comments[key] = '' # remove the entry from defaults if key in self.defaults: self.defaults.remove(key) # if isinstance(value, Section): if key not in self: self.sections.append(key) dict.__setitem__(self, key, value) elif isinstance(value, dict) and not unrepr: # First create the new depth level, # then create the section if key not in self: self.sections.append(key) new_depth = self.depth + 1 dict.__setitem__( self, key, Section( self, new_depth, self.main, indict=value, name=key)) else: if key not in self: self.scalars.append(key) if not self.main.stringify: if isinstance(value, basestring): pass elif isinstance(value, (list, tuple)): for entry in value: if not isinstance(entry, basestring): raise TypeError('Value is not a string "%s".' % entry) else: raise TypeError('Value is not a string "%s".' % value) dict.__setitem__(self, key, value) def __delitem__(self, key): """Remove items from the sequence when deleting.""" dict. __delitem__(self, key) if key in self.scalars: self.scalars.remove(key) else: self.sections.remove(key) del self.comments[key] del self.inline_comments[key] def get(self, key, default=None): """A version of ``get`` that doesn't bypass string interpolation.""" try: return self[key] except KeyError: return default def update(self, indict): """ A version of update that uses our ``__setitem__``. """ for entry in indict: self[entry] = indict[entry] def pop(self, key, default=MISSING): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ try: val = self[key] except KeyError: if default is MISSING: raise val = default else: del self[key] return val def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None self.defaults = [] self.extra_values = [] def setdefault(self, key, default=None): """A version of setdefault that sets sequence if appropriate.""" try: return self[key] except KeyError: self[key] = default return self[key] def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return zip((self.scalars + self.sections), self.values()) def keys(self): """D.keys() -> list of D's keys""" return (self.scalars + self.sections) def values(self): """D.values() -> list of D's values""" return [self[key] for key in (self.scalars + self.sections)] def iteritems(self): """D.iteritems() -> an iterator over the (key, value) items of D""" return iter(self.items()) def iterkeys(self): """D.iterkeys() -> an iterator over the keys of D""" return iter((self.scalars + self.sections)) __iter__ = iterkeys def itervalues(self): """D.itervalues() -> an iterator over the values of D""" return iter(self.values()) def __repr__(self): """x.__repr__() <==> repr(x)""" def _getval(key): try: return self[key] except MissingInterpolationOption: return dict.__getitem__(self, key) return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) for key in (self.scalars + self.sections)]) __str__ = __repr__ __str__.__doc__ = "x.__str__() <==> str(x)" # Extra methods - not in a normal dictionary def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() >>> n == a 1 >>> n is a 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ for key, val in indict.items(): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. .. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, basestring): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val) def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2' """ return int(self[key]) def as_float(self, key): """ A convenience method which coerces the specified value to a float. If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') Traceback (most recent call last): ValueError: invalid literal for float(): fish >>> a['b'] = '1' >>> a.as_float('b') 1.0 >>> a['b'] = '3.2' >>> a.as_float('b') 3.2000000000000002 """ return float(self[key]) def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """ result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result] def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults() class ConfigObj(Section): """An object to read, create, and write config files.""" _keyword = re.compile(r'''^ # line start (\s*) # indentation ( # keyword (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"=].*?) # no quotes ) \s*=\s* # divider (.*) # value (including list values and comments) $ # line end ''', re.VERBOSE) _sectionmarker = re.compile(r'''^ (\s*) # 1: indentation ((?:\[\s*)+) # 2: section marker open ( # 3: section name open (?:"\s*\S.*?\s*")| # at least one non-space with double quotes (?:'\s*\S.*?\s*')| # at least one non-space with single quotes (?:[^'"\s].*?) # at least one non-space unquoted ) # section name close ((?:\s*\])+) # 4: section marker close \s*(\#.*)? # 5: optional comment $''', re.VERBOSE) # this regexp pulls list values out as a single string # or single values and comments # FIXME: this regex adds a '' to the end of comma terminated lists # workaround in ``_handle_value`` _valueexp = re.compile(r'''^ (?: (?: ( (?: (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#][^,\#]*?) # unquoted ) \s*,\s* # comma )* # match all list items ending in a comma (if any) ) ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#\s][^,]*?)| # unquoted (?:(?<!,)) # Empty value )? # last item in a list - or string value )| (,) # alternatively a single comma - empty list ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # use findall to get the members of a list value _listvalueexp = re.compile(r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#]?.*?) # unquoted ) \s*,\s* # comma ''', re.VERBOSE) # this regexp is used for the value # when lists are switched off _nolistvalue = re.compile(r'''^ ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"\#].*?)| # unquoted (?:) # Empty value ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # regexes for finding triple quoted values on one line _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$") _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$') _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$") _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$') _triple_quote = { "'''": (_single_line_single, _multi_line_single), '"""': (_single_line_double, _multi_line_double), } # Used by the ``istrue`` Section method _bools = { 'yes': True, 'no': False, 'on': True, 'off': False, '1': True, '0': False, 'true': True, 'false': False, } def __init__(self, infile=None, options=None, configspec=None, encoding=None, interpolation=True, raise_errors=False, list_values=True, create_empty=False, file_error=False, stringify=True, indent_type=None, default_encoding=None, unrepr=False, write_empty_values=False, _inspec=False): """ Parse a config file or create a config file object. ``ConfigObj(infile=None, configspec=None, encoding=None, interpolation=True, raise_errors=False, list_values=True, create_empty=False, file_error=False, stringify=True, indent_type=None, default_encoding=None, unrepr=False, write_empty_values=False, _inspec=False)`` """ self._inspec = _inspec # init the superclass Section.__init__(self, self, 0, self) infile = infile or [] _options = {'configspec': configspec, 'encoding': encoding, 'interpolation': interpolation, 'raise_errors': raise_errors, 'list_values': list_values, 'create_empty': create_empty, 'file_error': file_error, 'stringify': stringify, 'indent_type': indent_type, 'default_encoding': default_encoding, 'unrepr': unrepr, 'write_empty_values': write_empty_values} if options is None: options = _options else: import warnings warnings.warn('Passing in an options dictionary to ConfigObj() is ' 'deprecated. Use **options instead.', DeprecationWarning, stacklevel=2) # TODO: check the values too. for entry in options: if entry not in OPTION_DEFAULTS: raise TypeError('Unrecognised option "%s".' % entry) for entry, value in OPTION_DEFAULTS.items(): if entry not in options: options[entry] = value keyword_value = _options[entry] if value != keyword_value: options[entry] = keyword_value # XXXX this ignores an explicit list_values = True in combination # with _inspec. The user should *never* do that anyway, but still... if _inspec: options['list_values'] = False self._initialise(options) configspec = options['configspec'] self._original_configspec = configspec self._load(infile, configspec) def _load(self, infile, configspec): if isinstance(infile, basestring): self.filename = infile if os.path.isfile(infile): h = open(infile, 'rb') infile = h.read() or [] h.close() elif self.file_error: # raise an error if the file doesn't exist raise IOError('Config file not found: "%s".' % self.filename) else: # file doesn't already exist if self.create_empty: # this is a good test that the filename specified # isn't impossible - like on a non-existent device h = open(infile, 'w') h.write('') h.close() infile = [] elif isinstance(infile, (list, tuple)): infile = list(infile) elif isinstance(infile, dict): # initialise self # the Section class handles creating subsections if isinstance(infile, ConfigObj): # get a copy of our ConfigObj def set_section(in_section, this_section): for entry in in_section.scalars: this_section[entry] = in_section[entry] for section in in_section.sections: this_section[section] = {} set_section(in_section[section], this_section[section]) set_section(infile, self) else: for entry in infile: self[entry] = infile[entry] del self._errors if configspec is not None: self._handle_configspec(configspec) else: self.configspec = None return elif getattr(infile, 'read', MISSING) is not MISSING: # This supports file like objects infile = infile.read() or [] # needs splitting into lines - but needs doing *after* decoding # in case it's not an 8 bit encoding else: raise TypeError('infile must be a filename, file like object, or list of lines.') if infile: # don't do it for the empty ConfigObj infile = self._handle_bom(infile) # infile is now *always* a list # # Set the newlines attribute (first line ending it finds) # and strip trailing '\n' or '\r' from lines for line in infile: if (not line) or (line[-1] not in ('\r', '\n', '\r\n')): continue for end in ('\r\n', '\n', '\r'): if line.endswith(end): self.newlines = end break break infile = [line.rstrip('\r\n') for line in infile] self._parse(infile) # if we had any errors, now is the time to raise them if self._errors: info = "at line %s." % self._errors[0].line_number if len(self._errors) > 1: msg = "Parsing failed with several errors.\nFirst error %s" % info error = ConfigObjError(msg) else: error = self._errors[0] # set the errors attribute; it's a list of tuples: # (error_type, message, line_number) error.errors = self._errors # set the config attribute error.config = self raise error # delete private attributes del self._errors if configspec is None: self.configspec = None else: self._handle_configspec(configspec) def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS # initialise a few variables self.filename = None self._errors = [] self.raise_errors = options['raise_errors'] self.interpolation = options['interpolation'] self.list_values = options['list_values'] self.create_empty = options['create_empty'] self.file_error = options['file_error'] self.stringify = options['stringify'] self.indent_type = options['indent_type'] self.encoding = options['encoding'] self.default_encoding = options['default_encoding'] self.BOM = False self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] self.initial_comment = [] self.final_comment = [] self.configspec = None if self._inspec: self.list_values = False # Clear section attributes as well Section._initialise(self) def __repr__(self): def _getval(key): try: return self[key] except MissingInterpolationOption: return dict.__getitem__(self, key) return ('ConfigObj({%s})' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) for key in (self.scalars + self.sections)])) def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in BOMS.items(): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): ### BOM discovered ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in BOMS.items(): if not line.startswith(BOM): continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF8 - don't decode if isinstance(infile, basestring): return infile.splitlines(True) else: return infile # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, just return if isinstance(infile, basestring): # infile read from a file will be a single string return infile.splitlines(True) return infile def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if self.encoding: return aString.decode('ascii') else: return aString def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, basestring): # can't be unicode # NOTE: Could raise a ``UnicodeDecodeError`` return infile.decode(encoding).splitlines(True) for i, line in enumerate(infile): if not isinstance(line, unicode): # NOTE: The isinstance test here handles mixed lists of unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` infile[i] = line.decode(encoding) return infile def _decode_element(self, line): """Decode element to unicode if necessary.""" if not self.encoding: return line if isinstance(line, str) and self.default_encoding: return line.decode(self.default_encoding) return line def _str(self, value): """ Used by ``stringify`` within validate, to turn non-string values into strings. """ if not isinstance(value, basestring): return str(value) else: return value def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: value, comment, cur_index = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if type(e) == UnknownType: msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError() def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = text % cur_index error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error) def _unquote(self, value): """Return an unquoted version of a value""" if not value: # should only happen during parsing of lists raise SyntaxError if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, basestring): if self.stringify: value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif '"' in value: quot = squot else: quot = dquot return quot def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) if value.find('"""') == -1: quot = tdquot else: quot = tsquot return quot def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment) def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index) def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, _inspec=True) except ConfigObjError, e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError, e: raise IOError('Reading configspec failed: %s' % e) self.configspec = configspec def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} section[entry]._created = True if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry] def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment)) def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment)) def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment) # Public methods def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() >>> a.filename = filename >>> a == ConfigObj('test.ini', raise_errors=True) 1 >>> import os >>> os.remove('test.ini') """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, dict): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ((self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w' and sys.platform == 'win32' and newline == '\r\n'): # Windows specific hack to avoid writing '\r\r\n' newline = '\n' output = self._a_to_u(newline).join(out) if self.encoding: output = output.encode(self.encoding) if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output = BOM_UTF8 + output if not output.endswith(newline): output += newline if outfile is not None: outfile.write(output) else: h = open(self.filename, 'wb') h.write(output) h.close() def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # # section.default_values.clear() #?? configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): section.default_values.pop(entry, None) try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError, validator.baseErrorClass): # No default, bad default or validator has no 'get_default_value' # (e.g. SimpleVal) pass try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass, e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and entry not in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) unvalidated = [] for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: unvalidated.append(entry) continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check == False: ret_true = False elif check == True: ret_false = False else: ret_true = False section.extra_values = unvalidated if preserve_errors and not section._created: # If the section wasn't created (i.e. it wasn't missing) # then we can't return False, we need to preserve errors ret_false = False # if ret_false and preserve_errors and out: # If we are preserving errors, but all # the failures are from missing sections / values # then we can return False. Otherwise there is a # real failure that we need to preserve. ret_false = not any(out.values()) if ret_true: return True elif ret_false: return False return out def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, basestring): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec) class SimpleVal(object): """ A simple validator. Can be used to check that all members expected are present. To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ def __init__(self): self.baseErrorClass = ConfigObjError def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. """ if levels is None: # first time called levels = [] results = [] if res == True: return results if res == False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return results for (key, val) in res.items(): if val == True: continue if isinstance(cfg.get(key), dict): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return results def get_extra_values(conf, _prepend=()): """ Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ out = [] out.extend([(_prepend, name) for name in conf.extra_values]) for name in conf.sections: if name not in conf.extra_values: out.extend(get_extra_values(conf[name], _prepend + (name,))) return out """*A programming language is a medium of expression.* - Paul Graham"""
88,163
Python
.py
2,074
29.865477
114
0.541903
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,762
feedparser.py
evilhero_mylar/lib/feedparser.py
#!/usr/bin/env python """Universal feed parser Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds Visit http://feedparser.org/ for the latest version Visit http://feedparser.org/docs/ for the latest documentation Required: Python 2.4 or later Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> """ __version__ = "5.0.1" __license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" __author__ = "Mark Pilgrim <http://diveintomark.org/>" __contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>", "Sam Ruby <http://intertwingly.net/>", "Ade Oshineye <http://blog.oshineye.com/>", "Martin Pool <http://sourcefrog.net/>", "Kurt McKee <http://kurtmckee.org/>"] _debug = 0 # HTTP "User-Agent" header to send to servers when downloading feeds. # If you are embedding feedparser in a larger application, you should # change this to your application name and URL. USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ # HTTP "Accept" header to send to servers when downloading feeds. If you don't # want to send an Accept header, set this to None. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" # List of preferred XML parsers, by SAX driver name. These will be tried first, # but if they're not installed, Python will keep searching through its own list # of pre-installed parsers until it finds one that supports everything we need. PREFERRED_XML_PARSERS = ["drv_libxml2"] # If you want feedparser to automatically run HTML markup through HTML Tidy, set # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> # or utidylib <http://utidylib.berlios.de/>. TIDY_MARKUP = 0 # List of Python interfaces for HTML Tidy, in order of preference. Only useful # if TIDY_MARKUP = 1 PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] # If you want feedparser to automatically resolve all relative URIs, set this # to 1. RESOLVE_RELATIVE_URIS = 1 # If you want feedparser to automatically sanitize all potentially unsafe # HTML content, set this to 1. SANITIZE_HTML = 1 # ---------- Python 3 modules (make it work if possible) ---------- try: import rfc822 except ImportError: from email import _parseaddr as rfc822 try: # Python 3.1 introduces bytes.maketrans and simultaneously # deprecates string.maketrans; use bytes.maketrans if possible _maketrans = bytes.maketrans except (NameError, AttributeError): import string _maketrans = string.maketrans # base64 support for Atom feeds that contain embedded binary data try: import base64, binascii # Python 3.1 deprecates decodestring in favor of decodebytes _base64decode = getattr(base64, 'decodebytes', base64.decodestring) except: base64 = binascii = None def _s2bytes(s): # Convert a UTF-8 str to bytes if the interpreter is Python 3 try: return bytes(s, 'utf8') except (NameError, TypeError): # In Python 2.5 and below, bytes doesn't exist (NameError) # In Python 2.6 and above, bytes and str are the same (TypeError) return s def _l2bytes(l): # Convert a list of ints to bytes if the interpreter is Python 3 try: if bytes is not str: # In Python 2.6 and above, this call won't raise an exception # but it will return bytes([65]) as '[65]' instead of 'A' return bytes(l) raise NameError except NameError: return ''.join(map(chr, l)) # If you want feedparser to allow all URL schemes, set this to () # List culled from Python's urlparse documentation at: # http://docs.python.org/library/urlparse.html # as well as from "URI scheme" at Wikipedia: # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme # Many more will likely need to be added! ACCEPTABLE_URI_SCHEMES = ( 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais', # Additional common-but-unofficial schemes 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', ) #ACCEPTABLE_URI_SCHEMES = () # ---------- required modules (should come with any Python distribution) ---------- import sgmllib, re, sys, copy, urlparse, time, types, cgi, urllib, urllib2, datetime try: from io import BytesIO as _StringIO except ImportError: try: from cStringIO import StringIO as _StringIO except: from StringIO import StringIO as _StringIO # ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- # gzip is included with most Python distributions, but may not be available if you compiled your own try: import gzip except: gzip = None try: import zlib except: zlib = None # If a real XML parser is available, feedparser will attempt to use it. feedparser has # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1 except: _XML_AVAILABLE = 0 def _xmlescape(data,entities={}): data = data.replace('&', '&amp;') data = data.replace('>', '&gt;') data = data.replace('<', '&lt;') for char, entity in entities: data = data.replace(char, entity) return data # cjkcodecs and iconv_codec provide support for more character encodings. # Both are available from http://cjkpython.i18n.org/ try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass # chardet library auto-detects character encodings # Download from http://chardet.feedparser.org/ try: import chardet if _debug: import chardet.constants chardet.constants._debug = 1 except: chardet = None # reversable htmlentitydefs mappings for Python 2.2 try: from htmlentitydefs import name2codepoint, codepoint2name except: import htmlentitydefs name2codepoint={} codepoint2name={} for (name,codepoint) in htmlentitydefs.entitydefs.iteritems(): if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1])) name2codepoint[name]=ord(codepoint) codepoint2name[ord(codepoint)]=name # BeautifulSoup parser used for parsing microformats from embedded HTML content # http://www.crummy.com/software/BeautifulSoup/ # feedparser is tested with BeautifulSoup 3.0.x, but it might work with the # older 2.x series. If it doesn't, and you can figure out why, I'll accept a # patch and modify the compatibility statement accordingly. try: import BeautifulSoup except: BeautifulSoup = None # ---------- don't touch these ---------- class ThingsNobodyCaresAboutButMe(Exception): pass class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass class UndeclaredNamespace(Exception): pass sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') sgmllib.special = re.compile('<!') sgmllib.charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);') if sgmllib.endbracket.search(' <').start(0): class EndBracketRegEx: def __init__(self): # Overriding the built-in sgmllib.endbracket regex allows the # parser to find angle brackets embedded in element attributes. self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''') def search(self,string,index=0): match = self.endbracket.match(string,index) if match is not None: # Returning a new object in the calling thread's context # resolves a thread-safety. return EndBracketMatch(match) return None class EndBracketMatch: def __init__(self, match): self.match = match def start(self, n): return self.match.end(n) sgmllib.endbracket = EndBracketRegEx() SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' } try: UserDict = dict except NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rc class FeedParserDict(UserDict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['summary', 'subtitle'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): if key == 'category': return UserDict.__getitem__(self, 'tags')[0]['term'] if key == 'enclosures': norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel']) return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure'] if key == 'license': for link in UserDict.__getitem__(self, 'links'): if link['rel']=='license' and link.has_key('href'): return link['href'] if key == 'categories': return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] realkey = self.keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.__contains__(self, k): return UserDict.__getitem__(self, k) if UserDict.__contains__(self, key): return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def __setitem__(self, key, value): for k in self.keymap.keys(): if key == k: key = self.keymap[k] if type(key) == types.ListType: key = key[0] return UserDict.__setitem__(self, key, value) def get(self, key, default=None): if self.has_key(key): return self[key] else: return default def setdefault(self, key, value): if not self.has_key(key): self[key] = value return self[key] def has_key(self, key): try: return hasattr(self, key) or UserDict.__contains__(self, key) except AttributeError: return False # This alias prevents the 2to3 tool from changing the semantics of the # __contains__ function below and exhausting the maximum recursion depth __has_key = has_key def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: assert not key.startswith('_') return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __setattr__(self, key, value): if key.startswith('_') or key == 'data': self.__dict__[key] = value else: return self.__setitem__(key, value) def __contains__(self, key): return self.__has_key(key) def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc _ebcdic_to_ascii_map = None def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) _ebcdic_to_ascii_map = _maketrans( \ _l2bytes(range(256)), _l2bytes(emap)) return s.translate(_ebcdic_to_ascii_map) _cp1252 = { unichr(128): unichr(8364), # euro sign unichr(130): unichr(8218), # single low-9 quotation mark unichr(131): unichr( 402), # latin small letter f with hook unichr(132): unichr(8222), # double low-9 quotation mark unichr(133): unichr(8230), # horizontal ellipsis unichr(134): unichr(8224), # dagger unichr(135): unichr(8225), # double dagger unichr(136): unichr( 710), # modifier letter circumflex accent unichr(137): unichr(8240), # per mille sign unichr(138): unichr( 352), # latin capital letter s with caron unichr(139): unichr(8249), # single left-pointing angle quotation mark unichr(140): unichr( 338), # latin capital ligature oe unichr(142): unichr( 381), # latin capital letter z with caron unichr(145): unichr(8216), # left single quotation mark unichr(146): unichr(8217), # right single quotation mark unichr(147): unichr(8220), # left double quotation mark unichr(148): unichr(8221), # right double quotation mark unichr(149): unichr(8226), # bullet unichr(150): unichr(8211), # en dash unichr(151): unichr(8212), # em dash unichr(152): unichr( 732), # small tilde unichr(153): unichr(8482), # trade mark sign unichr(154): unichr( 353), # latin small letter s with caron unichr(155): unichr(8250), # single right-pointing angle quotation mark unichr(156): unichr( 339), # latin small ligature oe unichr(158): unichr( 382), # latin small letter z with caron unichr(159): unichr( 376)} # latin capital letter y with diaeresis _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) try: return urlparse.urljoin(base, uri) except: uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)]) return urlparse.urljoin(base, uri) class _FeedParserMixin: namespaces = {'': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media', #Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace 'http://search.yahoo.com/mrss/': 'media', 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', 'http://purl.org/rss/1.0/modules/reference/': 'ref', 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', 'http://purl.org/rss/1.0/modules/search/': 'search', 'http://purl.org/rss/1.0/modules/slash/': 'slash', 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', 'http://hacks.benhammersley.com/rss/streaming/': 'str', 'http://purl.org/rss/1.0/modules/subscription/': 'sub', 'http://purl.org/rss/1.0/modules/syndication/': 'sy', 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', 'http://purl.org/rss/1.0/modules/threading/': 'thr', 'http://purl.org/rss/1.0/modules/textinput/': 'ti', 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', 'http://wellformedweb.org/commentAPI/': 'wfw', 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', 'http://www.w3.org/1999/xhtml': 'xhtml', 'http://www.w3.org/1999/xlink': 'xlink', 'http://www.w3.org/XML/1998/namespace': 'xml' } _matchnamespaces = {} can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'] can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] html_types = ['text/html', 'application/xhtml+xml'] def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): if _debug: sys.stderr.write('initializing FeedParser\n') if not self._matchnamespaces: for k, v in self.namespaces.items(): self._matchnamespaces[k.lower()] = v self.feeddata = FeedParserDict() # feed-level data self.encoding = encoding # character encoding self.entries = [] # list of entry-level data self.version = '' # feed type/version, see SUPPORTED_VERSIONS self.namespacesInUse = {} # dictionary of namespaces defined by the feed # the following are used internally to track state; # this is really out of control and should be refactored self.infeed = 0 self.inentry = 0 self.incontent = 0 self.intextinput = 0 self.inimage = 0 self.inauthor = 0 self.incontributor = 0 self.inpublisher = 0 self.insource = 0 self.sourcedata = FeedParserDict() self.contentparams = FeedParserDict() self._summaryKey = None self.namespacemap = {} self.elementstack = [] self.basestack = [] self.langstack = [] self.baseuri = baseuri or '' self.lang = baselang or None self.svgOK = 0 self.hasTitle = 0 if baselang: self.feeddata['language'] = baselang.replace('_','-') def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) # normalize attrs attrs = [(k.lower(), v) for k, v in attrs] attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] # the sgml parser doesn't handle entities in attributes, but # strict xml parsers do -- account for this difference if isinstance(self, _LooseFeedParser): attrs = [(k, v.replace('&amp;', '&')) for k, v in attrs] # track xml:base and xml:lang attrsD = dict(attrs) baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri if type(baseuri) != type(u''): try: baseuri = unicode(baseuri, self.encoding) except: baseuri = unicode(baseuri, 'iso-8859-1') # ensure that self.baseuri is always an absolute URI that # uses a whitelisted URI scheme (e.g. not `javscript:`) if self.baseuri: self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri else: self.baseuri = _urljoin(self.baseuri, baseuri) lang = attrsD.get('xml:lang', attrsD.get('lang')) if lang == '': # xml:lang could be explicitly set to '', we need to capture that lang = None elif lang is None: # if no xml:lang is specified, use parent lang lang = self.lang if lang: if tag in ('feed', 'rss', 'rdf:RDF'): self.feeddata['language'] = lang.replace('_','-') self.lang = lang self.basestack.append(self.baseuri) self.langstack.append(lang) # track namespaces for prefix, uri in attrs: if prefix.startswith('xmlns:'): self.trackNamespace(prefix[6:], uri) elif prefix == 'xmlns': self.trackNamespace(None, uri) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007 # element declared itself as escaped markup, but it isn't really self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': if tag.find(':') <> -1: prefix, tag = tag.split(':', 1) namespace = self.namespacesInUse.get(prefix, '') if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML': attrs.append(('xmlns',namespace)) if tag=='svg' and namespace=='http://www.w3.org/2000/svg': attrs.append(('xmlns',namespace)) if tag == 'svg': self.svgOK += 1 return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' # special hack for better tracking of empty textinput/image elements in illformed feeds if (not prefix) and tag not in ('title', 'link', 'description', 'name'): self.intextinput = 0 if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): self.inimage = 0 # call special handler (if defined) or default handler methodname = '_start_' + prefix + suffix try: method = getattr(self, methodname) return method(attrsD) except AttributeError: # Since there's no handler or something has gone wrong we explicitly add the element and its attributes unknown_tag = prefix + suffix if len(attrsD) == 0: # No attributes so merge it into the encosing dictionary return self.push(unknown_tag, 1) else: # Has attributes so create it in its own dictionary context = self._getContext() context[unknown_tag] = attrsD def unknown_endtag(self, tag): if _debug: sys.stderr.write('end %s\n' % tag) # match namespaces if tag.find(':') <> -1: prefix, suffix = tag.split(':', 1) else: prefix, suffix = '', tag prefix = self.namespacemap.get(prefix, prefix) if prefix: prefix = prefix + '_' if suffix == 'svg' and self.svgOK: self.svgOK -= 1 # call special handler (if defined) or default handler methodname = '_end_' + prefix + suffix try: if self.svgOK: raise AttributeError() method = getattr(self, methodname) method() except AttributeError: self.pop(prefix + suffix) # track inline content if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): # element declared itself as escaped markup, but it isn't really if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007 self.contentparams['type'] = 'application/xhtml+xml' if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': tag = tag.split(':')[-1] self.handle_data('</%s>' % tag, escape=0) # track xml:base and xml:lang going out of scope if self.basestack: self.basestack.pop() if self.basestack and self.basestack[-1]: self.baseuri = self.basestack[-1] if self.langstack: self.langstack.pop() if self.langstack: # and (self.langstack[-1] is not None): self.lang = self.langstack[-1] def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' if not self.elementstack: return ref = ref.lower() if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): text = '&#%s;' % ref else: if ref[0] == 'x': c = int(ref[1:], 16) else: c = int(ref) text = unichr(c).encode('utf-8') self.elementstack[-1][2].append(text) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' if not self.elementstack: return if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): text = '&%s;' % ref elif ref in self.entities.keys(): text = self.entities[ref] if text.startswith('&#') and text.endswith(';'): return self.handle_entityref(text) else: try: name2codepoint[ref] except KeyError: text = '&%s;' % ref else: text = unichr(name2codepoint[ref]).encode('utf-8') self.elementstack[-1][2].append(text) def handle_data(self, text, escape=1): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references if not self.elementstack: return if escape and self.contentparams.get('type') == 'application/xhtml+xml': text = _xmlescape(text) self.elementstack[-1][2].append(text) def handle_comment(self, text): # called for each comment, e.g. <!-- insert message here --> pass def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> pass def handle_decl(self, text): pass def parse_declaration(self, i): # override internal declaration handler to handle CDATA blocks if _debug: sys.stderr.write('entering parse_declaration\n') if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: # CDATA block began but didn't finish k = len(self.rawdata) return k self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) return k+3 else: k = self.rawdata.find('>', i) if k >= 0: return k+1 else: # We have an incomplete CDATA block. return k def mapContentType(self, contentType): contentType = contentType.lower() if contentType == 'text' or contentType == 'plain': contentType = 'text/plain' elif contentType == 'html': contentType = 'text/html' elif contentType == 'xhtml': contentType = 'application/xhtml+xml' return contentType def trackNamespace(self, prefix, uri): loweruri = uri.lower() if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: self.version = 'rss090' if loweruri == 'http://purl.org/rss/1.0/' and not self.version: self.version = 'rss10' if loweruri == 'http://www.w3.org/2005/atom' and not self.version: self.version = 'atom10' if loweruri.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace uri = 'http://backend.userland.com/rss' loweruri = uri if self._matchnamespaces.has_key(loweruri): self.namespacemap[prefix] = self._matchnamespaces[loweruri] self.namespacesInUse[self._matchnamespaces[loweruri]] = uri else: self.namespacesInUse[prefix or ''] = uri def resolveURI(self, uri): return _urljoin(self.baseuri or '', uri) def decodeEntities(self, element, data): return data def strattrs(self, attrs): return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'&quot;'})) for t in attrs]) def push(self, element, expectingText): self.elementstack.append([element, expectingText, []]) def pop(self, element, stripWhitespace=1): if not self.elementstack: return if self.elementstack[-1][0] != element: return element, expectingText, pieces = self.elementstack.pop() if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml': # remove enclosing child element, but only if it is a <div> and # only if all the remaining content is nested underneath it. # This means that the divs would be retained in the following: # <div>foo</div><div>bar</div> while pieces and len(pieces)>1 and not pieces[-1].strip(): del pieces[-1] while pieces and len(pieces)>1 and not pieces[0].strip(): del pieces[0] if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>': depth = 0 for piece in pieces[:-1]: if piece.startswith('</'): depth -= 1 if depth == 0: break elif piece.startswith('<') and not piece.endswith('/>'): depth += 1 else: pieces = pieces[1:-1] # Ensure each piece is a str for Python 3 for (i, v) in enumerate(pieces): if not isinstance(v, basestring): pieces[i] = v.decode('utf-8') output = ''.join(pieces) if stripWhitespace: output = output.strip() if not expectingText: return output # decode base64 content if base64 and self.contentparams.get('base64', 0): try: output = _base64decode(output) except binascii.Error: pass except binascii.Incomplete: pass except TypeError: # In Python 3, base64 takes and outputs bytes, not str # This may not be the most correct way to accomplish this output = _base64decode(output.encode('utf-8')).decode('utf-8') # resolve relative URIs if (element in self.can_be_relative_uri) and output: output = self.resolveURI(output) # decode entities within embedded markup if not self.contentparams.get('base64', 0): output = self.decodeEntities(element, output) if self.lookslikehtml(output): self.contentparams['type']='text/html' # remove temporary cruft from contentparams try: del self.contentparams['mode'] except KeyError: pass try: del self.contentparams['base64'] except KeyError: pass is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types # resolve relative URIs within embedded markup if is_htmlish and RESOLVE_RELATIVE_URIS: if element in self.can_contain_relative_uris: output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html')) # parse microformats # (must do this before sanitizing because some microformats # rely on elements that we sanitize) if is_htmlish and element in ['content', 'description', 'summary']: mfresults = _parseMicroformats(output, self.baseuri, self.encoding) if mfresults: for tag in mfresults.get('tags', []): self._addTag(tag['term'], tag['scheme'], tag['label']) for enclosure in mfresults.get('enclosures', []): self._start_enclosure(enclosure) for xfn in mfresults.get('xfn', []): self._addXFN(xfn['relationships'], xfn['href'], xfn['name']) vcard = mfresults.get('vcard') if vcard: self._getContext()['vcard'] = vcard # sanitize embedded markup if is_htmlish and SANITIZE_HTML: if element in self.can_contain_dangerous_markup: output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html')) if self.encoding and type(output) != type(u''): try: output = unicode(output, self.encoding) except: pass # address common error where people take data that is already # utf-8, presume that it is iso-8859-1, and re-encode it. if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and type(output) == type(u''): try: output = unicode(output.encode('iso-8859-1'), 'utf-8') except: pass # map win-1252 extensions to the proper code points if type(output) == type(u''): output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output]) # categories/tags/keywords/whatever are handled in _end_category if element == 'category': return output if element == 'title' and self.hasTitle: return output # store output in appropriate place(s) if self.inentry and not self.insource: if element == 'content': self.entries[-1].setdefault(element, []) contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element].append(contentparams) elif element == 'link': if not self.inimage: # query variables in urls in link elements are improperly # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're # unhandled character references. fix this special case. output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) self.entries[-1][element] = output if output: self.entries[-1]['links'][-1]['href'] = output else: if element == 'description': element = 'summary' self.entries[-1][element] = output if self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output self.entries[-1][element + '_detail'] = contentparams elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage): context = self._getContext() if element == 'description': element = 'subtitle' context[element] = output if element == 'link': # fix query variables; see above for the explanation output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) context[element] = output context['links'][-1]['href'] = output elif self.incontent: contentparams = copy.deepcopy(self.contentparams) contentparams['value'] = output context[element + '_detail'] = contentparams return output def pushContent(self, tag, attrsD, defaultContentType, expectingText): self.incontent += 1 if self.lang: self.lang=self.lang.replace('_','-') self.contentparams = FeedParserDict({ 'type': self.mapContentType(attrsD.get('type', defaultContentType)), 'language': self.lang, 'base': self.baseuri}) self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) self.push(tag, expectingText) def popContent(self, tag): value = self.pop(tag) self.incontent -= 1 self.contentparams.clear() return value # a number of elements in a number of RSS variants are nominally plain # text, but this is routinely ignored. This is an attempt to detect # the most common cases. As false positives often result in silent # data loss, this function errs on the conservative side. def lookslikehtml(self, s): if self.version.startswith('atom'): return if self.contentparams.get('type','text/html') != 'text/plain': return # must have a close tag or a entity reference to qualify if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)): return # all tags must be in a restricted subset of valid HTML tags if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements, re.findall(r'</?(\w+)',s)): return # all entities must have been defined as valid HTML entities from htmlentitydefs import entitydefs if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);',s)): return return 1 def _mapToStandardPrefix(self, name): colonpos = name.find(':') if colonpos <> -1: prefix = name[:colonpos] suffix = name[colonpos+1:] prefix = self.namespacemap.get(prefix, prefix) name = prefix + ':' + suffix return name def _getAttribute(self, attrsD, name): return attrsD.get(self._mapToStandardPrefix(name)) def _isBase64(self, attrsD, contentparams): if attrsD.get('mode', '') == 'base64': return 1 if self.contentparams['type'].startswith('text/'): return 0 if self.contentparams['type'].endswith('+xml'): return 0 if self.contentparams['type'].endswith('/xml'): return 0 return 1 def _itsAnHrefDamnIt(self, attrsD): href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) if href: try: del attrsD['url'] except KeyError: pass try: del attrsD['uri'] except KeyError: pass attrsD['href'] = href return attrsD def _save(self, key, value, overwrite=False): context = self._getContext() if overwrite: context[key] = value else: context.setdefault(key, value) def _start_rss(self, attrsD): versionmap = {'0.91': 'rss091u', '0.92': 'rss092', '0.93': 'rss093', '0.94': 'rss094'} #If we're here then this is an RSS feed. #If we don't have a version or have a version that starts with something #other than RSS then there's been a mistake. Correct it. if not self.version or not self.version.startswith('rss'): attr_version = attrsD.get('version', '') version = versionmap.get(attr_version) if version: self.version = version elif attr_version.startswith('2.'): self.version = 'rss20' else: self.version = 'rss' def _start_dlhottitles(self, attrsD): self.version = 'hotrss' def _start_channel(self, attrsD): self.infeed = 1 self._cdf_common(attrsD) _start_feedinfo = _start_channel def _cdf_common(self, attrsD): if attrsD.has_key('lastmod'): self._start_modified({}) self.elementstack[-1][-1] = attrsD['lastmod'] self._end_modified() if attrsD.has_key('href'): self._start_link({}) self.elementstack[-1][-1] = attrsD['href'] self._end_link() def _start_feed(self, attrsD): self.infeed = 1 versionmap = {'0.1': 'atom01', '0.2': 'atom02', '0.3': 'atom03'} if not self.version: attr_version = attrsD.get('version') version = versionmap.get(attr_version) if version: self.version = version else: self.version = 'atom' def _end_channel(self): self.infeed = 0 _end_feed = _end_channel def _start_image(self, attrsD): context = self._getContext() if not self.inentry: context.setdefault('image', FeedParserDict()) self.inimage = 1 self.hasTitle = 0 self.push('image', 0) def _end_image(self): self.pop('image') self.inimage = 0 def _start_textinput(self, attrsD): context = self._getContext() context.setdefault('textinput', FeedParserDict()) self.intextinput = 1 self.hasTitle = 0 self.push('textinput', 0) _start_textInput = _start_textinput def _end_textinput(self): self.pop('textinput') self.intextinput = 0 _end_textInput = _end_textinput def _start_author(self, attrsD): self.inauthor = 1 self.push('author', 1) # Append a new FeedParserDict when expecting an author context = self._getContext() context.setdefault('authors', []) context['authors'].append(FeedParserDict()) _start_managingeditor = _start_author _start_dc_author = _start_author _start_dc_creator = _start_author _start_itunes_author = _start_author def _end_author(self): self.pop('author') self.inauthor = 0 self._sync_author_detail() _end_managingeditor = _end_author _end_dc_author = _end_author _end_dc_creator = _end_author _end_itunes_author = _end_author def _start_itunes_owner(self, attrsD): self.inpublisher = 1 self.push('publisher', 0) def _end_itunes_owner(self): self.pop('publisher') self.inpublisher = 0 self._sync_author_detail('publisher') def _start_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('contributor', 0) def _end_contributor(self): self.pop('contributor') self.incontributor = 0 def _start_dc_contributor(self, attrsD): self.incontributor = 1 context = self._getContext() context.setdefault('contributors', []) context['contributors'].append(FeedParserDict()) self.push('name', 0) def _end_dc_contributor(self): self._end_name() self.incontributor = 0 def _start_name(self, attrsD): self.push('name', 0) _start_itunes_name = _start_name def _end_name(self): value = self.pop('name') if self.inpublisher: self._save_author('name', value, 'publisher') elif self.inauthor: self._save_author('name', value) elif self.incontributor: self._save_contributor('name', value) elif self.intextinput: context = self._getContext() context['name'] = value _end_itunes_name = _end_name def _start_width(self, attrsD): self.push('width', 0) def _end_width(self): value = self.pop('width') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['width'] = value def _start_height(self, attrsD): self.push('height', 0) def _end_height(self): value = self.pop('height') try: value = int(value) except: value = 0 if self.inimage: context = self._getContext() context['height'] = value def _start_url(self, attrsD): self.push('href', 1) _start_homepage = _start_url _start_uri = _start_url def _end_url(self): value = self.pop('href') if self.inauthor: self._save_author('href', value) elif self.incontributor: self._save_contributor('href', value) _end_homepage = _end_url _end_uri = _end_url def _start_email(self, attrsD): self.push('email', 0) _start_itunes_email = _start_email def _end_email(self): value = self.pop('email') if self.inpublisher: self._save_author('email', value, 'publisher') elif self.inauthor: self._save_author('email', value) elif self.incontributor: self._save_contributor('email', value) _end_itunes_email = _end_email def _getContext(self): if self.insource: context = self.sourcedata elif self.inimage and self.feeddata.has_key('image'): context = self.feeddata['image'] elif self.intextinput: context = self.feeddata['textinput'] elif self.inentry: context = self.entries[-1] else: context = self.feeddata return context def _save_author(self, key, value, prefix='author'): context = self._getContext() context.setdefault(prefix + '_detail', FeedParserDict()) context[prefix + '_detail'][key] = value self._sync_author_detail() context.setdefault('authors', [FeedParserDict()]) context['authors'][-1][key] = value def _save_contributor(self, key, value): context = self._getContext() context.setdefault('contributors', [FeedParserDict()]) context['contributors'][-1][key] = value def _sync_author_detail(self, key='author'): context = self._getContext() detail = context.get('%s_detail' % key) if detail: name = detail.get('name') email = detail.get('email') if name and email: context[key] = '%s (%s)' % (name, email) elif name: context[key] = name elif email: context[key] = email else: author, email = context.get(key), None if not author: return emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author) if emailmatch: email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, '') author = author.replace('()', '') author = author.replace('<>', '') author = author.replace('&lt;&gt;', '') author = author.strip() if author and (author[0] == '('): author = author[1:] if author and (author[-1] == ')'): author = author[:-1] author = author.strip() if author or email: context.setdefault('%s_detail' % key, FeedParserDict()) if author: context['%s_detail' % key]['name'] = author if email: context['%s_detail' % key]['email'] = email def _start_subtitle(self, attrsD): self.pushContent('subtitle', attrsD, 'text/plain', 1) _start_tagline = _start_subtitle _start_itunes_subtitle = _start_subtitle def _end_subtitle(self): self.popContent('subtitle') _end_tagline = _end_subtitle _end_itunes_subtitle = _end_subtitle def _start_rights(self, attrsD): self.pushContent('rights', attrsD, 'text/plain', 1) _start_dc_rights = _start_rights _start_copyright = _start_rights def _end_rights(self): self.popContent('rights') _end_dc_rights = _end_rights _end_copyright = _end_rights def _start_item(self, attrsD): self.entries.append(FeedParserDict()) self.push('item', 0) self.inentry = 1 self.guidislink = 0 self.hasTitle = 0 id = self._getAttribute(attrsD, 'rdf:about') if id: context = self._getContext() context['id'] = id self._cdf_common(attrsD) _start_entry = _start_item _start_product = _start_item def _end_item(self): self.pop('item') self.inentry = 0 _end_entry = _end_item def _start_dc_language(self, attrsD): self.push('language', 1) _start_language = _start_dc_language def _end_dc_language(self): self.lang = self.pop('language') _end_language = _end_dc_language def _start_dc_publisher(self, attrsD): self.push('publisher', 1) _start_webmaster = _start_dc_publisher def _end_dc_publisher(self): self.pop('publisher') self._sync_author_detail('publisher') _end_webmaster = _end_dc_publisher def _start_published(self, attrsD): self.push('published', 1) _start_dcterms_issued = _start_published _start_issued = _start_published def _end_published(self): value = self.pop('published') self._save('published_parsed', _parse_date(value), overwrite=True) _end_dcterms_issued = _end_published _end_issued = _end_published def _start_updated(self, attrsD): self.push('updated', 1) _start_modified = _start_updated _start_dcterms_modified = _start_updated _start_pubdate = _start_updated _start_dc_date = _start_updated _start_lastbuilddate = _start_updated def _end_updated(self): value = self.pop('updated') parsed_value = _parse_date(value) self._save('updated_parsed', parsed_value, overwrite=True) _end_modified = _end_updated _end_dcterms_modified = _end_updated _end_pubdate = _end_updated _end_dc_date = _end_updated _end_lastbuilddate = _end_updated def _start_created(self, attrsD): self.push('created', 1) _start_dcterms_created = _start_created def _end_created(self): value = self.pop('created') self._save('created_parsed', _parse_date(value), overwrite=True) _end_dcterms_created = _end_created def _start_expirationdate(self, attrsD): self.push('expired', 1) def _end_expirationdate(self): self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True) def _start_cc_license(self, attrsD): context = self._getContext() value = self._getAttribute(attrsD, 'rdf:resource') attrsD = FeedParserDict() attrsD['rel']='license' if value: attrsD['href']=value context.setdefault('links', []).append(attrsD) def _start_creativecommons_license(self, attrsD): self.push('license', 1) _start_creativeCommons_license = _start_creativecommons_license def _end_creativecommons_license(self): value = self.pop('license') context = self._getContext() attrsD = FeedParserDict() attrsD['rel']='license' if value: attrsD['href']=value context.setdefault('links', []).append(attrsD) del context['license'] _end_creativeCommons_license = _end_creativecommons_license def _addXFN(self, relationships, href, name): context = self._getContext() xfn = context.setdefault('xfn', []) value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name}) if value not in xfn: xfn.append(value) def _addTag(self, term, scheme, label): context = self._getContext() tags = context.setdefault('tags', []) if (not term) and (not scheme) and (not label): return value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) if value not in tags: tags.append(value) def _start_category(self, attrsD): if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) term = attrsD.get('term') scheme = attrsD.get('scheme', attrsD.get('domain')) label = attrsD.get('label') self._addTag(term, scheme, label) self.push('category', 1) _start_dc_subject = _start_category _start_keywords = _start_category def _start_media_category(self, attrsD): attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema') self._start_category(attrsD) def _end_itunes_keywords(self): for term in self.pop('itunes_keywords').split(): self._addTag(term, 'http://www.itunes.com/', None) def _start_itunes_category(self, attrsD): self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) self.push('category', 1) def _end_category(self): value = self.pop('category') if not value: return context = self._getContext() tags = context['tags'] if value and len(tags) and not tags[-1]['term']: tags[-1]['term'] = value else: self._addTag(value, None, None) _end_dc_subject = _end_category _end_keywords = _end_category _end_itunes_category = _end_category _end_media_category = _end_category def _start_cloud(self, attrsD): self._getContext()['cloud'] = FeedParserDict(attrsD) def _start_link(self, attrsD): attrsD.setdefault('rel', 'alternate') if attrsD['rel'] == 'self': attrsD.setdefault('type', 'application/atom+xml') else: attrsD.setdefault('type', 'text/html') context = self._getContext() attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) expectingText = self.infeed or self.inentry or self.insource context.setdefault('links', []) if not (self.inentry and self.inimage): context['links'].append(FeedParserDict(attrsD)) if attrsD.has_key('href'): expectingText = 0 if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): context['link'] = attrsD['href'] else: self.push('link', expectingText) _start_producturl = _start_link def _end_link(self): value = self.pop('link') context = self._getContext() _end_producturl = _end_link def _start_guid(self, attrsD): self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') self.push('id', 1) def _end_guid(self): value = self.pop('id') self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) if self.guidislink: # guid acts as link, but only if 'ispermalink' is not present or is 'true', # and only if the item doesn't already have a link element self._save('link', value) def _start_title(self, attrsD): if self.svgOK: return self.unknown_starttag('title', attrsD.items()) self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) _start_dc_title = _start_title _start_media_title = _start_title def _end_title(self): if self.svgOK: return value = self.popContent('title') if not value: return context = self._getContext() self.hasTitle = 1 _end_dc_title = _end_title def _end_media_title(self): hasTitle = self.hasTitle self._end_title() self.hasTitle = hasTitle def _start_description(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) _start_dc_description = _start_description def _start_abstract(self, attrsD): self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) def _end_description(self): if self._summaryKey == 'content': self._end_content() else: value = self.popContent('description') self._summaryKey = None _end_abstract = _end_description _end_dc_description = _end_description def _start_info(self, attrsD): self.pushContent('info', attrsD, 'text/plain', 1) _start_feedburner_browserfriendly = _start_info def _end_info(self): self.popContent('info') _end_feedburner_browserfriendly = _end_info def _start_generator(self, attrsD): if attrsD: attrsD = self._itsAnHrefDamnIt(attrsD) if attrsD.has_key('href'): attrsD['href'] = self.resolveURI(attrsD['href']) self._getContext()['generator_detail'] = FeedParserDict(attrsD) self.push('generator', 1) def _end_generator(self): value = self.pop('generator') context = self._getContext() if context.has_key('generator_detail'): context['generator_detail']['name'] = value def _start_admin_generatoragent(self, attrsD): self.push('generator', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('generator') self._getContext()['generator_detail'] = FeedParserDict({'href': value}) def _start_admin_errorreportsto(self, attrsD): self.push('errorreportsto', 1) value = self._getAttribute(attrsD, 'rdf:resource') if value: self.elementstack[-1][2].append(value) self.pop('errorreportsto') def _start_summary(self, attrsD): context = self._getContext() if context.has_key('summary'): self._summaryKey = 'content' self._start_content(attrsD) else: self._summaryKey = 'summary' self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) _start_itunes_summary = _start_summary def _end_summary(self): if self._summaryKey == 'content': self._end_content() else: self.popContent(self._summaryKey or 'summary') self._summaryKey = None _end_itunes_summary = _end_summary def _start_enclosure(self, attrsD): attrsD = self._itsAnHrefDamnIt(attrsD) context = self._getContext() attrsD['rel']='enclosure' context.setdefault('links', []).append(FeedParserDict(attrsD)) def _start_source(self, attrsD): if 'url' in attrsD: # This means that we're processing a source element from an RSS 2.0 feed self.sourcedata['href'] = attrsD[u'url'] self.push('source', 1) self.insource = 1 self.hasTitle = 0 def _end_source(self): self.insource = 0 value = self.pop('source') if value: self.sourcedata['title'] = value self._getContext()['source'] = copy.deepcopy(self.sourcedata) self.sourcedata.clear() def _start_content(self, attrsD): self.pushContent('content', attrsD, 'text/plain', 1) src = attrsD.get('src') if src: self.contentparams['src'] = src self.push('content', 1) def _start_prodlink(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) def _start_body(self, attrsD): self.pushContent('content', attrsD, 'application/xhtml+xml', 1) _start_xhtml_body = _start_body def _start_content_encoded(self, attrsD): self.pushContent('content', attrsD, 'text/html', 1) _start_fullitem = _start_content_encoded def _end_content(self): copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) value = self.popContent('content') if copyToSummary: self._save('summary', value) _end_body = _end_content _end_xhtml_body = _end_content _end_content_encoded = _end_content _end_fullitem = _end_content _end_prodlink = _end_content def _start_itunes_image(self, attrsD): self.push('itunes_image', 0) if attrsD.get('href'): self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) _start_itunes_link = _start_itunes_image def _end_itunes_block(self): value = self.pop('itunes_block', 0) self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 def _end_itunes_explicit(self): value = self.pop('itunes_explicit', 0) # Convert 'yes' -> True, 'clean' to False, and any other value to None # False and None both evaluate as False, so the difference can be ignored # by applications that only need to know if the content is explicit. self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0] def _start_media_content(self, attrsD): context = self._getContext() context.setdefault('media_content', []) context['media_content'].append(attrsD) def _start_media_thumbnail(self, attrsD): context = self._getContext() context.setdefault('media_thumbnail', []) self.push('url', 1) # new context['media_thumbnail'].append(attrsD) def _end_media_thumbnail(self): url = self.pop('url') context = self._getContext() if url != None and len(url.strip()) != 0: if not context['media_thumbnail'][-1].has_key('url'): context['media_thumbnail'][-1]['url'] = url def _start_media_player(self, attrsD): self.push('media_player', 0) self._getContext()['media_player'] = FeedParserDict(attrsD) def _end_media_player(self): value = self.pop('media_player') context = self._getContext() context['media_player']['content'] = value def _start_newlocation(self, attrsD): self.push('newlocation', 1) def _end_newlocation(self): url = self.pop('newlocation') context = self._getContext() # don't set newlocation if the context isn't right if context is not self.feeddata: return context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip()) if _XML_AVAILABLE: class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): def __init__(self, baseuri, baselang, encoding): if _debug: sys.stderr.write('trying StrictFeedParser\n') xml.sax.handler.ContentHandler.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) self.bozo = 0 self.exc = None self.decls = {} def startPrefixMapping(self, prefix, uri): self.trackNamespace(prefix, uri) if uri == 'http://www.w3.org/1999/xlink': self.decls['xmlns:'+prefix] = uri def startElementNS(self, name, qname, attrs): namespace, localname = name lowernamespace = str(namespace or '').lower() if lowernamespace.find('backend.userland.com/rss') <> -1: # match any backend.userland.com namespace namespace = 'http://backend.userland.com/rss' lowernamespace = namespace if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = None prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix localname = str(localname).lower() # qname implementation is horribly broken in Python 2.1 (it # doesn't report any), and slightly broken in Python 2.2 (it # doesn't report the xml: namespace). So we match up namespaces # with a known list first, and then possibly override them with # the qnames the SAX parser gives us (if indeed it gives us any # at all). Thanks to MatejC for helping me test this and # tirelessly telling me that it didn't work yet. attrsD, self.decls = self.decls, {} if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML': attrsD['xmlns']=namespace if localname=='svg' and namespace=='http://www.w3.org/2000/svg': attrsD['xmlns']=namespace if prefix: localname = prefix.lower() + ':' + localname elif namespace and not qname: #Expat for name,value in self.namespacesInUse.items(): if name and value == namespace: localname = name + ':' + localname break if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): lowernamespace = (namespace or '').lower() prefix = self._matchnamespaces.get(lowernamespace, '') if prefix: attrlocalname = prefix + ':' + attrlocalname attrsD[str(attrlocalname).lower()] = attrvalue for qname in attrs.getQNames(): attrsD[str(qname).lower()] = attrs.getValueByQName(qname) self.unknown_starttag(localname, attrsD.items()) def characters(self, text): self.handle_data(text) def endElementNS(self, name, qname): namespace, localname = name lowernamespace = str(namespace or '').lower() if qname and qname.find(':') > 0: givenprefix = qname.split(':')[0] else: givenprefix = '' prefix = self._matchnamespaces.get(lowernamespace, givenprefix) if prefix: localname = prefix + ':' + localname elif namespace and not qname: #Expat for name,value in self.namespacesInUse.items(): if name and value == namespace: localname = name + ':' + localname break localname = str(localname).lower() self.unknown_endtag(localname) def error(self, exc): self.bozo = 1 self.exc = exc def fatalError(self, exc): self.error(exc) raise exc class _BaseHTMLProcessor(sgmllib.SGMLParser): special = re.compile('''[<>'"]''') bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)") elements_no_end_tag = [ 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param', 'source', 'track', 'wbr' ] def __init__(self, encoding, _type): self.encoding = encoding self._type = _type if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) sgmllib.SGMLParser.__init__(self) def reset(self): self.pieces = [] sgmllib.SGMLParser.reset(self) def _shorttag_replace(self, match): tag = match.group(1) if tag in self.elements_no_end_tag: return '<' + tag + ' />' else: return '<' + tag + '></' + tag + '>' def parse_starttag(self,i): j=sgmllib.SGMLParser.parse_starttag(self, i) if self._type == 'application/xhtml+xml': if j>2 and self.rawdata[j-2:j]=='/>': self.unknown_endtag(self.lasttag) return j def feed(self, data): data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data) #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data) data = data.replace('&#39;', "'") data = data.replace('&#34;', '"') try: bytes if bytes is str: raise NameError self.encoding = self.encoding + '_INVALID_PYTHON_3' except NameError: if self.encoding and type(data) == type(u''): data = data.encode(self.encoding) sgmllib.SGMLParser.feed(self, data) sgmllib.SGMLParser.close(self) def normalize_attrs(self, attrs): if not attrs: return attrs # utility method to be called by descendants attrs = dict([(k.lower(), v) for k, v in attrs]).items() attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] attrs.sort() return attrs def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) uattrs = [] strattrs='' if attrs: for key, value in attrs: value=value.replace('>','&gt;').replace('<','&lt;').replace('"','&quot;') value = self.bare_ampersand.sub("&amp;", value) # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds if type(value) != type(u''): try: value = unicode(value, self.encoding) except: value = unicode(value, 'iso-8859-1') try: # Currently, in Python 3 the key is already a str, and cannot be decoded again uattrs.append((unicode(key, self.encoding), value)) except TypeError: uattrs.append((key, value)) strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]) if self.encoding: try: strattrs=strattrs.encode(self.encoding) except: pass if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) def unknown_endtag(self, tag): # called for each end tag, e.g. for </pre>, tag will be 'pre' # Reconstruct the original end tag. if tag not in self.elements_no_end_tag: self.pieces.append("</%(tag)s>" % locals()) def handle_charref(self, ref): # called for each character reference, e.g. for '&#160;', ref will be '160' # Reconstruct the original character reference. if ref.startswith('x'): value = unichr(int(ref[1:],16)) else: value = unichr(int(ref)) if value in _cp1252.keys(): self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) else: self.pieces.append('&#%(ref)s;' % locals()) def handle_entityref(self, ref): # called for each entity reference, e.g. for '&copy;', ref will be 'copy' # Reconstruct the original entity reference. if name2codepoint.has_key(ref): self.pieces.append('&%(ref)s;' % locals()) else: self.pieces.append('&amp;%(ref)s' % locals()) def handle_data(self, text): # called for each block of plain text, i.e. outside of any tag and # not containing any character or entity references # Store the original text verbatim. if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text) self.pieces.append(text) def handle_comment(self, text): # called for each HTML comment, e.g. <!-- insert Javascript code here --> # Reconstruct the original comment. self.pieces.append('<!--%(text)s-->' % locals()) def handle_pi(self, text): # called for each processing instruction, e.g. <?instruction> # Reconstruct original processing instruction. self.pieces.append('<?%(text)s>' % locals()) def handle_decl(self, text): # called for the DOCTYPE, if present, e.g. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # "http://www.w3.org/TR/html4/loose.dtd"> # Reconstruct original DOCTYPE self.pieces.append('<!%(text)s>' % locals()) _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match def _scan_name(self, i, declstartpos): rawdata = self.rawdata n = len(rawdata) if i == n: return None, -1 m = self._new_declname_match(rawdata, i) if m: s = m.group() name = s.strip() if (i + len(s)) == n: return None, -1 # end of buffer return name.lower(), m.end() else: self.handle_data(rawdata) # self.updatepos(declstartpos, i) return None, -1 def convert_charref(self, name): return '&#%s;' % name def convert_entityref(self, name): return '&%s;' % name def output(self): '''Return processed HTML as a single string''' return ''.join([str(p) for p in self.pieces]) def parse_declaration(self, i): try: return sgmllib.SGMLParser.parse_declaration(self, i) except sgmllib.SGMLParseError: # escape the doctype declaration and continue parsing self.handle_data('&lt;') return i+1 class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): def __init__(self, baseuri, baselang, encoding, entities): sgmllib.SGMLParser.__init__(self) _FeedParserMixin.__init__(self, baseuri, baselang, encoding) _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml') self.entities=entities def decodeEntities(self, element, data): data = data.replace('&#60;', '&lt;') data = data.replace('&#x3c;', '&lt;') data = data.replace('&#x3C;', '&lt;') data = data.replace('&#62;', '&gt;') data = data.replace('&#x3e;', '&gt;') data = data.replace('&#x3E;', '&gt;') data = data.replace('&#38;', '&amp;') data = data.replace('&#x26;', '&amp;') data = data.replace('&#34;', '&quot;') data = data.replace('&#x22;', '&quot;') data = data.replace('&#39;', '&apos;') data = data.replace('&#x27;', '&apos;') if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): data = data.replace('&lt;', '<') data = data.replace('&gt;', '>') data = data.replace('&amp;', '&') data = data.replace('&quot;', '"') data = data.replace('&apos;', "'") return data def strattrs(self, attrs): return ''.join([' %s="%s"' % (n,v.replace('"','&quot;')) for n,v in attrs]) class _MicroformatsParser: STRING = 1 DATE = 2 URI = 3 NODE = 4 EMAIL = 5 known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'] known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'] def __init__(self, data, baseuri, encoding): self.document = BeautifulSoup.BeautifulSoup(data) self.baseuri = baseuri self.encoding = encoding if type(data) == type(u''): data = data.encode(encoding) self.tags = [] self.enclosures = [] self.xfn = [] self.vcard = None def vcardEscape(self, s): if type(s) in (type(''), type(u'')): s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n') return s def vcardFold(self, s): s = re.sub(';+$', '', s) sFolded = '' iMax = 75 sPrefix = '' while len(s) > iMax: sFolded += sPrefix + s[:iMax] + '\n' s = s[iMax:] sPrefix = ' ' iMax = 74 sFolded += sPrefix + s return sFolded def normalize(self, s): return re.sub(r'\s+', ' ', s).strip() def unique(self, aList): results = [] for element in aList: if element not in results: results.append(element) return results def toISO8601(self, dt): return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt) def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0): all = lambda x: 1 sProperty = sProperty.lower() bFound = 0 bNormalize = 1 propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)} if bAllowMultiple and (iPropertyType != self.NODE): snapResults = [] containers = elmRoot(['ul', 'ol'], propertyMatch) for container in containers: snapResults.extend(container('li')) bFound = (len(snapResults) != 0) if not bFound: snapResults = elmRoot(all, propertyMatch) bFound = (len(snapResults) != 0) if (not bFound) and (sProperty == 'value'): snapResults = elmRoot('pre') bFound = (len(snapResults) != 0) bNormalize = not bFound if not bFound: snapResults = [elmRoot] bFound = (len(snapResults) != 0) arFilter = [] if sProperty == 'vcard': snapFilter = elmRoot(all, propertyMatch) for node in snapFilter: if node.findParent(all, propertyMatch): arFilter.append(node) arResults = [] for node in snapResults: if node not in arFilter: arResults.append(node) bFound = (len(arResults) != 0) if not bFound: if bAllowMultiple: return [] elif iPropertyType == self.STRING: return '' elif iPropertyType == self.DATE: return None elif iPropertyType == self.URI: return '' elif iPropertyType == self.NODE: return None else: return None arValues = [] for elmResult in arResults: sValue = None if iPropertyType == self.NODE: if bAllowMultiple: arValues.append(elmResult) continue else: return elmResult sNodeName = elmResult.name.lower() if (iPropertyType == self.EMAIL) and (sNodeName == 'a'): sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0] if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if (not sValue) and (sNodeName == 'abbr'): sValue = elmResult.get('title') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if (not sValue) and (iPropertyType == self.URI): if sNodeName == 'a': sValue = elmResult.get('href') elif sNodeName == 'img': sValue = elmResult.get('src') elif sNodeName == 'object': sValue = elmResult.get('data') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if (not sValue) and (sNodeName == 'img'): sValue = elmResult.get('alt') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if not sValue: sValue = elmResult.renderContents() sValue = re.sub(r'<\S[^>]*>', '', sValue) sValue = sValue.replace('\r\n', '\n') sValue = sValue.replace('\r', '\n') if sValue: sValue = bNormalize and self.normalize(sValue) or sValue.strip() if not sValue: continue if iPropertyType == self.DATE: sValue = _parse_date_iso8601(sValue) if bAllowMultiple: arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue) else: return bAutoEscape and self.vcardEscape(sValue) or sValue return arValues def findVCards(self, elmRoot, bAgentParsing=0): sVCards = '' if not bAgentParsing: arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1) else: arCards = [elmRoot] for elmCard in arCards: arLines = [] def processSingleString(sProperty): sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding) if sValue: arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue)) return sValue or u'' def processSingleURI(sProperty): sValue = self.getPropertyValue(elmCard, sProperty, self.URI) if sValue: sContentType = '' sEncoding = '' sValueKey = '' if sValue.startswith('data:'): sEncoding = ';ENCODING=b' sContentType = sValue.split(';')[0].split('/').pop() sValue = sValue.split(',', 1).pop() else: elmValue = self.getPropertyValue(elmCard, sProperty) if elmValue: if sProperty != 'url': sValueKey = ';VALUE=uri' sContentType = elmValue.get('type', '').strip().split('/').pop().strip() sContentType = sContentType.upper() if sContentType == 'OCTET-STREAM': sContentType = '' if sContentType: sContentType = ';TYPE=' + sContentType.upper() arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue)) def processTypeValue(sProperty, arDefaultType, arForceType=None): arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1) for elmResult in arResults: arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1) if arForceType: arType = self.unique(arForceType + arType) if not arType: arType = arDefaultType sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0) if sValue: arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue)) # AGENT # must do this before all other properties because it is destructive # (removes nested class="vcard" nodes so they don't interfere with # this vcard's other properties) arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1) for elmAgent in arAgent: if re.compile(r'\bvcard\b').search(elmAgent.get('class')): sAgentValue = self.findVCards(elmAgent, 1) + '\n' sAgentValue = sAgentValue.replace('\n', '\\n') sAgentValue = sAgentValue.replace(';', '\\;') if sAgentValue: arLines.append(self.vcardFold('AGENT:' + sAgentValue)) # Completely remove the agent element from the parse tree elmAgent.extract() else: sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1); if sAgentValue: arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue)) # FN (full name) sFN = processSingleString('fn') # N (name) elmName = self.getPropertyValue(elmCard, 'n') if elmName: sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1) sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1) arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1) arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1) arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1) arLines.append(self.vcardFold('N:' + sFamilyName + ';' + sGivenName + ';' + ','.join(arAdditionalNames) + ';' + ','.join(arHonorificPrefixes) + ';' + ','.join(arHonorificSuffixes))) elif sFN: # implied "N" optimization # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization arNames = self.normalize(sFN).split() if len(arNames) == 2: bFamilyNameFirst = (arNames[0].endswith(',') or len(arNames[1]) == 1 or ((len(arNames[1]) == 2) and (arNames[1].endswith('.')))) if bFamilyNameFirst: arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1])) else: arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0])) # SORT-STRING sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1) if sSortString: arLines.append(self.vcardFold('SORT-STRING:' + sSortString)) # NICKNAME arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1) if arNickname: arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname))) # PHOTO processSingleURI('photo') # BDAY dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE) if dtBday: arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday))) # ADR (address) arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1) for elmAdr in arAdr: arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1) if not arType: arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1 sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1) sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1) sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1) sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1) sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1) sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1) sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1) arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' + sPostOfficeBox + ';' + sExtendedAddress + ';' + sStreetAddress + ';' + sLocality + ';' + sRegion + ';' + sPostalCode + ';' + sCountryName)) # LABEL processTypeValue('label', ['intl','postal','parcel','work']) # TEL (phone number) processTypeValue('tel', ['voice']) # EMAIL processTypeValue('email', ['internet'], ['internet']) # MAILER processSingleString('mailer') # TZ (timezone) processSingleString('tz') # GEO (geographical information) elmGeo = self.getPropertyValue(elmCard, 'geo') if elmGeo: sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1) sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1) arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude)) # TITLE processSingleString('title') # ROLE processSingleString('role') # LOGO processSingleURI('logo') # ORG (organization) elmOrg = self.getPropertyValue(elmCard, 'org') if elmOrg: sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1) if not sOrganizationName: # implied "organization-name" optimization # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1) if sOrganizationName: arLines.append(self.vcardFold('ORG:' + sOrganizationName)) else: arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1) arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit))) # CATEGORY arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1) if arCategory: arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory))) # NOTE processSingleString('note') # REV processSingleString('rev') # SOUND processSingleURI('sound') # UID processSingleString('uid') # URL processSingleURI('url') # CLASS processSingleString('class') # KEY processSingleURI('key') if arLines: arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard'] sVCards += u'\n'.join(arLines) + u'\n' return sVCards.strip() def isProbablyDownloadable(self, elm): attrsD = elm.attrMap if not attrsD.has_key('href'): return 0 linktype = attrsD.get('type', '').strip() if linktype.startswith('audio/') or \ linktype.startswith('video/') or \ (linktype.startswith('application/') and not linktype.endswith('xml')): return 1 path = urlparse.urlparse(attrsD['href'])[2] if path.find('.') == -1: return 0 fileext = path.split('.').pop().lower() return fileext in self.known_binary_extensions def findTags(self): all = lambda x: 1 for elm in self.document(all, {'rel': re.compile(r'\btag\b')}): href = elm.get('href') if not href: continue urlscheme, domain, path, params, query, fragment = \ urlparse.urlparse(_urljoin(self.baseuri, href)) segments = path.split('/') tag = segments.pop() if not tag: tag = segments.pop() tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', '')) if not tagscheme.endswith('/'): tagscheme += '/' self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''})) def findEnclosures(self): all = lambda x: 1 enclosure_match = re.compile(r'\benclosure\b') for elm in self.document(all, {'href': re.compile(r'.+')}): if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue if elm.attrMap not in self.enclosures: self.enclosures.append(elm.attrMap) if elm.string and not elm.get('title'): self.enclosures[-1]['title'] = elm.string def findXFN(self): all = lambda x: 1 for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}): rels = elm.get('rel', '').split() xfn_rels = [] for rel in rels: if rel in self.known_xfn_relationships: xfn_rels.append(rel) if xfn_rels: self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string}) def _parseMicroformats(htmlSource, baseURI, encoding): if not BeautifulSoup: return if _debug: sys.stderr.write('entering _parseMicroformats\n') try: p = _MicroformatsParser(htmlSource, baseURI, encoding) except UnicodeEncodeError: # sgmllib throws this exception when performing lookups of tags # with non-ASCII characters in them. return p.vcard = p.findVCards(p.document) p.findTags() p.findEnclosures() p.findXFN() return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard} class _RelativeURIResolver(_BaseHTMLProcessor): relative_uris = [('a', 'href'), ('applet', 'codebase'), ('area', 'href'), ('blockquote', 'cite'), ('body', 'background'), ('del', 'cite'), ('form', 'action'), ('frame', 'longdesc'), ('frame', 'src'), ('iframe', 'longdesc'), ('iframe', 'src'), ('head', 'profile'), ('img', 'longdesc'), ('img', 'src'), ('img', 'usemap'), ('input', 'src'), ('input', 'usemap'), ('ins', 'cite'), ('link', 'href'), ('object', 'classid'), ('object', 'codebase'), ('object', 'data'), ('object', 'usemap'), ('q', 'cite'), ('script', 'src')] def __init__(self, baseuri, encoding, _type): _BaseHTMLProcessor.__init__(self, encoding, _type) self.baseuri = baseuri def resolveURI(self, uri): return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip())) def unknown_starttag(self, tag, attrs): if _debug: sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs))) attrs = self.normalize_attrs(attrs) attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type): if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') p = _RelativeURIResolver(baseURI, encoding, _type) p.feed(htmlSource) return p.output() def _makeSafeAbsoluteURI(base, rel=None): # bail if ACCEPTABLE_URI_SCHEMES is empty if not ACCEPTABLE_URI_SCHEMES: return _urljoin(base, rel or u'') if not base: return rel or u'' if not rel: scheme = urlparse.urlparse(base)[0] if not scheme or scheme in ACCEPTABLE_URI_SCHEMES: return base return u'' uri = _urljoin(base, rel) if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: return u'' return uri class _HTMLSanitizer(_BaseHTMLProcessor): acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', 'background', 'balance', 'bgcolor', 'bgproperties', 'border', 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', 'xml:lang'] unacceptable_elements_with_end_tag = ['script', 'applet', 'style'] acceptable_css_properties = ['azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width'] # survey of common keywords found in feeds acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow'] valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' + '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$') mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math', 'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', 'munderover', 'none', 'semantics'] mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', 'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth', 'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows', 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', 'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'] # svgtiny - foreignObject + linearGradient + radialGradient + stop svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject', 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use'] # svgtiny + class + opacity + offset + xmlns + xmlns:xlink svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', 'arabic-form', 'ascent', 'attributeName', 'attributeType', 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid', 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max', 'min', 'name', 'offset', 'opacity', 'orient', 'origin', 'overline-position', 'overline-thickness', 'panose-1', 'path', 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity', 'strikethrough-position', 'strikethrough-thickness', 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan'] svg_attr_map = None svg_elem_map = None acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity'] def reset(self): _BaseHTMLProcessor.reset(self) self.unacceptablestack = 0 self.mathmlOK = 0 self.svgOK = 0 def unknown_starttag(self, tag, attrs): acceptable_attributes = self.acceptable_attributes keymap = {} if not tag in self.acceptable_elements or self.svgOK: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack += 1 # add implicit namespaces to html5 inline svg/mathml if self._type.endswith('html'): if not dict(attrs).get('xmlns'): if tag=='svg': attrs.append( ('xmlns','http://www.w3.org/2000/svg') ) if tag=='math': attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') ) # not otherwise acceptable, perhaps it is MathML or SVG? if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs: self.mathmlOK += 1 if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs: self.svgOK += 1 # chose acceptable attributes based on tag class, else bail if self.mathmlOK and tag in self.mathml_elements: acceptable_attributes = self.mathml_attributes elif self.svgOK and tag in self.svg_elements: # for most vocabularies, lowercasing is a good idea. Many # svg elements, however, are camel case if not self.svg_attr_map: lower=[attr.lower() for attr in self.svg_attributes] mix=[a for a in self.svg_attributes if a not in lower] self.svg_attributes = lower self.svg_attr_map = dict([(a.lower(),a) for a in mix]) lower=[attr.lower() for attr in self.svg_elements] mix=[a for a in self.svg_elements if a not in lower] self.svg_elements = lower self.svg_elem_map = dict([(a.lower(),a) for a in mix]) acceptable_attributes = self.svg_attributes tag = self.svg_elem_map.get(tag,tag) keymap = self.svg_attr_map elif not tag in self.acceptable_elements: return # declare xlink namespace, if needed if self.mathmlOK or self.svgOK: if filter(lambda (n,v): n.startswith('xlink:'),attrs): if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs: attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink')) clean_attrs = [] for key, value in self.normalize_attrs(attrs): if key in acceptable_attributes: key=keymap.get(key,key) # make sure the uri uses an acceptable uri scheme if key == u'href': value = _makeSafeAbsoluteURI(value) clean_attrs.append((key,value)) elif key=='style': clean_value = self.sanitize_style(value) if clean_value: clean_attrs.append((key,clean_value)) _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs) def unknown_endtag(self, tag): if not tag in self.acceptable_elements: if tag in self.unacceptable_elements_with_end_tag: self.unacceptablestack -= 1 if self.mathmlOK and tag in self.mathml_elements: if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1 elif self.svgOK and tag in self.svg_elements: tag = self.svg_elem_map.get(tag,tag) if tag == 'svg' and self.svgOK: self.svgOK -= 1 else: return _BaseHTMLProcessor.unknown_endtag(self, tag) def handle_pi(self, text): pass def handle_decl(self, text): pass def handle_data(self, text): if not self.unacceptablestack: _BaseHTMLProcessor.handle_data(self, text) def sanitize_style(self, style): # disallow urls style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) # gauntlet if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' # This replaced a regexp that used re.match and was prone to pathological back-tracking. if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return '' clean = [] for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): if not value: continue if prop.lower() in self.acceptable_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background','border','margin','padding']: for keyword in value.split(): if not keyword in self.acceptable_css_keywords and \ not self.valid_css_values.match(keyword): break else: clean.append(prop + ': ' + value + ';') elif self.svgOK and prop.lower() in self.acceptable_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean) def parse_comment(self, i, report=1): ret = _BaseHTMLProcessor.parse_comment(self, i, report) if ret >= 0: return ret # if ret == -1, this may be a malicious attempt to circumvent # sanitization, or a page-destroying unclosed comment match = re.compile(r'--[^>]*>').search(self.rawdata, i+4) if match: return match.end() # unclosed comment; deliberately fail to handle_data() return len(self.rawdata) def _sanitizeHTML(htmlSource, encoding, _type): p = _HTMLSanitizer(encoding, _type) htmlSource = htmlSource.replace('<![CDATA[', '&lt;![CDATA[') p.feed(htmlSource) data = p.output() if TIDY_MARKUP: # loop through list of preferred Tidy interfaces looking for one that's installed, # then set up a common _tidy function to wrap the interface-specific API. _tidy = None for tidy_interface in PREFERRED_TIDY_INTERFACES: try: if tidy_interface == "uTidy": from tidy import parseString as _utidy def _tidy(data, **kwargs): return str(_utidy(data, **kwargs)) break elif tidy_interface == "mxTidy": from mx.Tidy import Tidy as _mxtidy def _tidy(data, **kwargs): nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) return data break except: pass if _tidy: utf8 = type(data) == type(u'') if utf8: data = data.encode('utf-8') data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") if utf8: data = unicode(data, 'utf-8') if data.count('<body'): data = data.split('<body', 1)[1] if data.count('>'): data = data.split('>', 1)[1] if data.count('</body'): data = data.split('</body', 1)[0] data = data.strip().replace('\r\n', '\n') return data class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, headers): if ((code / 100) == 3) and (code != 304): return self.http_error_302(req, fp, code, msg, headers) infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code return infourl def http_error_302(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl def http_error_301(self, req, fp, code, msg, headers): if headers.dict.has_key('location'): infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) else: infourl = urllib.addinfourl(fp, headers, req.get_full_url()) if not hasattr(infourl, 'status'): infourl.status = code return infourl http_error_300 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def http_error_401(self, req, fp, code, msg, headers): # Check if # - server requires digest auth, AND # - we tried (unsuccessfully) with basic auth, AND # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) # If all conditions hold, parse authentication information # out of the Authorization header we sent the first time # (for the username and password) and the WWW-Authenticate # header the server sent back (for the realm) and retry # the request with the appropriate digest auth headers instead. # This evil genius hack has been brought to you by Aaron Swartz. host = urlparse.urlparse(req.get_full_url())[1] try: assert sys.version.split()[0] >= '2.3.3' assert base64 != None user, passw = _base64decode(req.headers['Authorization'].split(' ')[1]).split(':') realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] self.add_password(realm, host, user, passw) retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry except: return self.http_error_default(req, fp, code, msg, headers) def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers): """URL, filename, or string --> stream This function lets you define parsers that take any input source (URL, pathname to local or network file, or actual data as a string) and deal with it in a uniform manner. Returned object is guaranteed to have all the basic stdio read methods (read, readline, readlines). Just .close() the object when you're done with it. If the etag argument is supplied, it will be used as the value of an If-None-Match request header. If the modified argument is supplied, it can be a tuple of 9 integers (as returned by gmtime() in the standard Python time module) or a date string in any format supported by feedparser. Regardless, it MUST be in GMT (Greenwich Mean Time). It will be reformatted into an RFC 1123-compliant date and used as the value of an If-Modified-Since request header. If the agent argument is supplied, it will be used as the value of a User-Agent request header. If the referrer argument is supplied, it will be used as the value of a Referer[sic] request header. If handlers is supplied, it is a list of handlers used to build a urllib2 opener. if request_headers is supplied it is a dictionary of HTTP request headers that will override the values generated by FeedParser. """ if hasattr(url_file_stream_or_string, 'read'): return url_file_stream_or_string if url_file_stream_or_string == '-': return sys.stdin if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'): # Deal with the feed URI scheme if url_file_stream_or_string.startswith('feed:http'): url_file_stream_or_string = url_file_stream_or_string[5:] elif url_file_stream_or_string.startswith('feed:'): url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:] if not agent: agent = USER_AGENT # test for inline user:password for basic auth auth = None if base64: urltype, rest = urllib.splittype(url_file_stream_or_string) realhost, rest = urllib.splithost(rest) if realhost: user_passwd, realhost = urllib.splituser(realhost) if user_passwd: url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) auth = base64.standard_b64encode(user_passwd).strip() # iri support try: if isinstance(url_file_stream_or_string,unicode): url_file_stream_or_string = url_file_stream_or_string.encode('idna').decode('utf-8') else: url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna').decode('utf-8') except: pass # try to open with urllib2 (to use optional headers) request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers) opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()])) opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent try: return opener.open(request, timeout=15) finally: opener.close() # JohnD # try to open with native open function (if url_file_stream_or_string is a filename) try: return open(url_file_stream_or_string, 'rb') except: pass # treat url_file_stream_or_string as string return _StringIO(str(url_file_stream_or_string)) def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers): request = urllib2.Request(url) request.add_header('User-Agent', agent) if etag: request.add_header('If-None-Match', etag) if type(modified) == type(''): modified = _parse_date(modified) elif isinstance(modified, datetime.datetime): modified = modified.utctimetuple() if modified: # format into an RFC 1123-compliant timestamp. We can't use # time.strftime() since the %a and %b directives can be affected # by the current locale, but RFC 2616 states that dates must be # in English. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) if referrer: request.add_header('Referer', referrer) if gzip and zlib: request.add_header('Accept-encoding', 'gzip, deflate') elif gzip: request.add_header('Accept-encoding', 'gzip') elif zlib: request.add_header('Accept-encoding', 'deflate') else: request.add_header('Accept-encoding', '') if auth: request.add_header('Authorization', 'Basic %s' % auth) if ACCEPT_HEADER: request.add_header('Accept', ACCEPT_HEADER) # use this for whatever -- cookies, special headers, etc # [('Cookie','Something'),('x-special-header','Another Value')] for header_name, header_value in request_headers.items(): request.add_header(header_name, header_value) request.add_header('A-IM', 'feed') # RFC 3229 support return request _date_handlers = [] def registerDateHandler(func): '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' _date_handlers.insert(0, func) # ISO-8601 date parsing routines written by Fazal Majid. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 # parser is beyond the scope of feedparser and would be a worthwhile addition # to the Python library. # A single regular expression cannot parse ISO 8601 date formats into groups # as the standard is highly irregular (for instance is 030104 2003-01-04 or # 0301-04-01), so we use templates instead. # Please note the order in templates is significant because we need a # greedy match. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO', 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', '-YY-?MM', '-OOO', '-YY', '--MM-?DD', '--MM', '---DD', 'CC', ''] _iso8601_re = [ tmpl.replace( 'YYYY', r'(?P<year>\d{4})').replace( 'YY', r'(?P<year>\d\d)').replace( 'MM', r'(?P<month>[01]\d)').replace( 'DD', r'(?P<day>[0123]\d)').replace( 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( 'CC', r'(?P<century>\d\d$)') + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + r'(:(?P<second>\d{2}))?' + r'(\.(?P<fracsecond>\d+))?' + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' for tmpl in _iso8601_tmpl] try: del tmpl except NameError: pass _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] try: del regex except NameError: pass def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params.keys(): year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(float(params.get('second', 0))) # weekday is normalized by mktime(), we can ignore it weekday = 0 daylight_savings_flag = -1 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tuple(tm))) registerDateHandler(_parse_date_iso8601) # 8-bit date handling routines written by ytrewq1. _korean_year = u'\ub144' # b3e2 in euc-kr _korean_month = u'\uc6d4' # bff9 in euc-kr _korean_day = u'\uc77c' # c0cf in euc-kr _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr _korean_onblog_date_re = \ re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ (_korean_year, _korean_month, _korean_day)) _korean_nate_date_re = \ re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ (_korean_am, _korean_pm)) def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_onblog) def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_nate) _mssql_date_re = \ re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') def _parse_date_mssql(dateString): '''Parse a string according to the MS SQL date format''' m = _mssql_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_mssql) # Unicode strings for Greek date strings _greek_months = \ { \ u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 } _greek_wdays = \ { \ u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 } _greek_date_format_re = \ re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return try: wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] except: return rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) return _parse_date_rfc822(rfc822date) registerDateHandler(_parse_date_greek) # Unicode strings for Hungarian date strings _hungarian_months = \ { \ u'janu\u00e1r': u'01', # e1 in iso-8859-2 u'febru\u00e1ri': u'02', # e1 in iso-8859-2 u'm\u00e1rcius': u'03', # e1 in iso-8859-2 u'\u00e1prilis': u'04', # e1 in iso-8859-2 u'm\u00e1ujus': u'05', # e1 in iso-8859-2 u'j\u00fanius': u'06', # fa in iso-8859-2 u'j\u00falius': u'07', # fa in iso-8859-2 u'augusztus': u'08', u'szeptember': u'09', u'okt\u00f3ber': u'10', # f3 in iso-8859-2 u'november': u'11', u'december': u'12', } _hungarian_date_format_re = \ re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m: return try: month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour except: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) return _parse_date_w3dtf(w3dtfdate) registerDateHandler(_parse_date_hungarian) # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by # Drake and licensed under the Python license. Removed all range checking # for month, day, hour, minute, and second, since mktime will normalize # these later def _parse_date_w3dtf(dateString): def __extract_date(m): year = int(m.group('year')) if year < 100: year = 100 * int(time.gmtime()[0] / 100) + int(year) if year < 1000: return 0, 0, 0 julian = m.group('julian') if julian: julian = int(julian) month = julian / 30 + 1 day = julian % 30 + 1 jday = None while jday != julian: t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) jday = time.gmtime(t)[-2] diff = abs(jday - julian) if jday > julian: if diff < day: day = day - diff else: month = month - 1 day = 31 elif jday < julian: if day + diff < 28: day = day + diff else: month = month + 1 return year, month, day month = m.group('month') day = 1 if month is None: month = 1 else: month = int(month) day = m.group('day') if day: day = int(day) else: day = 1 return year, month, day def __extract_time(m): if not m: return 0, 0, 0 hours = m.group('hours') if not hours: return 0, 0, 0 hours = int(hours) minutes = int(m.group('minutes')) seconds = m.group('seconds') if seconds: seconds = int(seconds) else: seconds = 0 return hours, minutes, seconds def __extract_tzd(m): '''Return the Time Zone Designator as an offset in seconds from UTC.''' if not m: return 0 tzd = m.group('tzd') if not tzd: return 0 if tzd == 'Z': return 0 hours = int(m.group('tzdhours')) minutes = m.group('tzdminutes') if minutes: minutes = int(minutes) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == '+': return -offset return offset __date_re = ('(?P<year>\d\d\d\d)' '(?:(?P<dsep>-|)' '(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?' '|(?P<julian>\d\d\d)))?') __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' __tzd_rx = re.compile(__tzd_re) __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' '(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?' + __tzd_re) __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) __datetime_rx = re.compile(__datetime_re) m = __datetime_rx.match(dateString) if (m is None) or (m.group() != dateString): return gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) if gmt[0] == 0: return return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) registerDateHandler(_parse_date_w3dtf) def _parse_date_rfc822(dateString): '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' data = dateString.split() if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: del data[0] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') dateString = " ".join(data) # Account for the Etc/GMT timezone by stripping 'Etc/' elif len(data) == 5 and data[4].lower().startswith('etc/'): data[4] = data[4][4:] dateString = " ".join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) # rfc822.py defines several time zones, but we define some extra ones. # 'ET' is equivalent to 'EST', etc. _additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} rfc822._timezones.update(_additional_timezones) registerDateHandler(_parse_date_rfc822) def _parse_date_perforce(aDateString): """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" # Fri, 2006/09/15 08:19:53 EDT _my_date_pattern = re.compile( \ r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') dow, year, month, day, hour, minute, second, tz = \ _my_date_pattern.search(aDateString).groups() months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm)) registerDateHandler(_parse_date_perforce) def _parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' for handler in _date_handlers: try: date9tuple = handler(dateString) if not date9tuple: continue if len(date9tuple) != 9: if _debug: sys.stderr.write('date handler function must return 9-tuple\n') raise ValueError map(int, date9tuple) return date9tuple except Exception, e: if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) pass return None def _getCharacterEncoding(http_headers, xml_data): '''Get the character encoding of the XML document http_headers is a dictionary xml_data is a raw string (not Unicode) This is so much trickier than it sounds, it's not even funny. According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type is application/xml, application/*+xml, application/xml-external-parsed-entity, or application/xml-dtd, the encoding given in the charset parameter of the HTTP Content-Type takes precedence over the encoding given in the XML prefix within the document, and defaults to 'utf-8' if neither are specified. But, if the HTTP Content-Type is text/xml, text/*+xml, or text/xml-external-parsed-entity, the encoding given in the XML prefix within the document is ALWAYS IGNORED and only the encoding given in the charset parameter of the HTTP Content-Type header should be respected, and it defaults to 'us-ascii' if not specified. Furthermore, discussion on the atom-syntax mailing list with the author of RFC 3023 leads me to the conclusion that any document served with a Content-Type of text/* and no charset parameter must be treated as us-ascii. (We now do this.) And also that it must always be flagged as non-well-formed. (We now do this too.) If Content-Type is unspecified (input was local file or non-HTTP source) or unrecognized (server just got it totally wrong), then go by the encoding given in the XML prefix of the document and default to 'iso-8859-1' as per the HTTP specification (RFC 2616). Then, assuming we didn't find a character encoding in the HTTP headers (and the HTTP Content-type allowed us to look in the body), we need to sniff the first few bytes of the XML data and try to determine whether the encoding is ASCII-compatible. Section F of the XML specification shows the way here: http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info If the sniffed encoding is not ASCII-compatible, we need to make it ASCII compatible so that we can sniff further into the XML declaration to find the encoding attribute, which will tell us the true encoding. Of course, none of this guarantees that we will be able to parse the feed in the declared character encoding (assuming it was declared correctly, which many are not). CJKCodecs and iconv_codec help a lot; you should definitely install them if you can. http://cjkpython.i18n.org/ ''' def _parseHTTPContentType(content_type): '''takes HTTP Content-Type header and returns (content type, charset) If no charset is specified, returns (content type, '') If no content type is specified, returns ('', '') Both return parameters are guaranteed to be lowercase strings ''' content_type = content_type or '' content_type, params = cgi.parse_header(content_type) return content_type, params.get('charset', '').replace("'", '') sniffed_xml_encoding = '' xml_encoding = '' true_encoding = '' http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type'))) # Must sniff for non-ASCII-compatible character encodings before # searching for XML declaration. This heuristic is defined in # section F of the XML specification: # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info try: if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]): # EBCDIC xml_data = _ebcdic_to_ascii(xml_data) elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]): # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]): # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]): # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]): # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]): # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]): # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]): # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: # ASCII-compatible pass xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower() if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding acceptable_content_type = 0 application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') text_content_types = ('text/xml', 'text/xml-external-parsed-entity') if (http_content_type in application_content_types) or \ (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): acceptable_content_type = 1 true_encoding = http_encoding or xml_encoding or 'utf-8' elif (http_content_type in text_content_types) or \ (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): acceptable_content_type = 1 true_encoding = http_encoding or 'us-ascii' elif http_content_type.startswith('text/'): true_encoding = http_encoding or 'us-ascii' elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))): true_encoding = xml_encoding or 'iso-8859-1' else: true_encoding = xml_encoding or 'utf-8' # some feeds claim to be gb2312 but are actually gb18030. # apparently MSIE and Firefox both do the following switch: if true_encoding.lower() == 'gb2312': true_encoding = 'gb18030' return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type def _toUTF8(data, encoding): '''Changes an XML data stream on the fly to specify a new encoding data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already encoding is a string recognized by encodings.aliases ''' if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16be': sys.stderr.write('trying utf-16be instead\n') encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-16le': sys.stderr.write('trying utf-16le instead\n') encoding = 'utf-16le' data = data[2:] elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-8': sys.stderr.write('trying utf-8 instead\n') encoding = 'utf-8' data = data[3:] elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32be': sys.stderr.write('trying utf-32be instead\n') encoding = 'utf-32be' data = data[4:] elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]): if _debug: sys.stderr.write('stripping BOM\n') if encoding != 'utf-32le': sys.stderr.write('trying utf-32le instead\n') encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) declmatch = re.compile('^<\?xml[^>]*?>') newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' if declmatch.search(newdata): newdata = declmatch.sub(newdecl, newdata) else: newdata = newdecl + u'\n' + newdata return newdata.encode('utf-8') def _stripDoctype(data): '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) rss_version may be 'rss091n' or None stripped_data is the same XML document, minus the DOCTYPE ''' start = re.search(_s2bytes('<\w'), data) start = start and start.start() or -1 head,data = data[:start+1], data[start+1:] entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE) entity_results=entity_pattern.findall(head) head = entity_pattern.sub(_s2bytes(''), head) doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE) doctype_results = doctype_pattern.findall(head) doctype = doctype_results and doctype_results[0] or _s2bytes('') if doctype.lower().count(_s2bytes('netscape')): version = 'rss091n' else: version = None # only allow in 'safe' inline entity definitions replacement=_s2bytes('') if len(doctype_results)==1 and entity_results: safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')) safe_entities=filter(lambda e: safe_pattern.match(e),entity_results) if safe_entities: replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>') data = doctype_pattern.sub(replacement, head) + data return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)]) def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], request_headers={}, response_headers={}): '''Parse a feed from a URL, file, stream, or string. request_headers, if given, is a dict from http header name to value to add to the request; this overrides internally generated values. ''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if not isinstance(handlers, list): handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = None f = None if hasattr(f, 'headers'): result['headers'] = dict(f.headers) # overwrite existing headers using response_headers if 'headers' in result: result['headers'].update(response_headers) elif response_headers: result['headers'] = copy.deepcopy(response_headers) # if feed is gzip-compressed, decompress it if f and data and 'headers' in result: if gzip and result['headers'].get('content-encoding') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and result['headers'].get('content-encoding') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if 'headers' in result: if 'etag' in result['headers'] or 'ETag' in result['headers']: etag = result['headers'].get('etag', result['headers'].get('ETag')) if etag: result['etag'] = etag if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']: modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified')) if modified: result['modified'] = _parse_date(modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type') or http_headers.has_key('Content-type'): bozo_message = '%s is not an XML media type' % http_headers.get('content-type', http_headers.get('Content-type')) else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) if data is not None: result['version'], data, entities = _stripDoctype(data) # ensure that baseuri is an absolute uri using an acceptable URI scheme contentloc = http_headers.get('content-location', http_headers.get('Content-Location', '')) href = result.get('href', '') baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href baselang = http_headers.get('content-language', http_headers.get('Content-Language', None)) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if data is None: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried iso-8859-2 yet, try that. if (not known_encoding) and ('iso-8859-2' not in tried_encodings): try: proposed_encoding = 'iso-8859-2' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'document declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities) feedparser.feed(data.decode('utf-8', 'replace')) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result class Serializer: def __init__(self, results): self.results = results class TextSerializer(Serializer): def write(self, stream=sys.stdout): self._writer(stream, self.results, '') def _writer(self, stream, node, prefix): if not node: return if hasattr(node, 'keys'): keys = node.keys() keys.sort() for k in keys: if k in ('description', 'link'): continue if node.has_key(k + '_detail'): continue if node.has_key(k + '_parsed'): continue self._writer(stream, node[k], prefix + k + '.') elif type(node) == types.ListType: index = 0 for n in node: self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].') index += 1 else: try: s = str(node).encode('utf-8') s = s.replace('\\', '\\\\') s = s.replace('\r', '') s = s.replace('\n', r'\n') stream.write(prefix[:-1]) stream.write('=') stream.write(s) stream.write('\n') except: pass class PprintSerializer(Serializer): def write(self, stream=sys.stdout): if self.results.has_key('href'): stream.write(self.results['href'] + '\n\n') from pprint import pprint pprint(self.results, stream) stream.write('\n') if __name__ == '__main__': try: from optparse import OptionParser except: OptionParser = None if OptionParser: optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-") optionParser.set_defaults(format="pprint") optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs") optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs") optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs") optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)") optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)") optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr") (options, urls) = optionParser.parse_args() if options.verbose: _debug = 1 if not urls: optionParser.print_help() sys.exit(0) else: if not sys.argv[1:]: print __doc__ sys.exit(0) class _Options: etag = modified = agent = referrer = None format = 'pprint' options = _Options() urls = sys.argv[1:] zopeCompatibilityHack() serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer) for url in urls: results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer) serializer(results).write(sys.stdout)
168,077
Python
.py
3,505
37.577175
297
0.57918
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,763
certgen.py
evilhero_mylar/lib/certgen.py
# -*- coding: latin-1 -*- # # Copyright (C) Martin Sjögren and AB Strakt 2001, All rights reserved # Copyright (C) Jean-Paul Calderone 2008, All rights reserved # This file is licenced under the GNU LESSER GENERAL PUBLIC LICENSE Version 2.1 or later (aka LGPL v2.1) # Please see LGPL2.1.txt for more information """ Certificate generation module. """ from OpenSSL import crypto import time TYPE_RSA = crypto.TYPE_RSA TYPE_DSA = crypto.TYPE_DSA serial = int(time.time()) def createKeyPair(type, bits): """ Create a public/private key pair. Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA bits - Number of bits to use in the key Returns: The public/private key pair in a PKey object """ pkey = crypto.PKey() pkey.generate_key(type, bits) return pkey def createCertRequest(pkey, digest="md5", **name): """ Create a certificate request. Arguments: pkey - The key to associate with the request digest - Digestion method to use for signing, default is md5 **name - The name of the subject of the request, possible arguments are: C - Country name ST - State or province name L - Locality name O - Organization name OU - Organizational unit name CN - Common name emailAddress - E-mail address Returns: The certificate request in an X509Req object """ req = crypto.X509Req() subj = req.get_subject() for (key,value) in name.items(): setattr(subj, key, value) req.set_pubkey(pkey) req.sign(pkey, digest) return req def createCertificate(req, (issuerCert, issuerKey), serial, (notBefore, notAfter), digest="md5"): """ Generate a certificate given a certificate request. Arguments: req - Certificate reqeust to use issuerCert - The certificate of the issuer issuerKey - The private key of the issuer serial - Serial number for the certificate notBefore - Timestamp (relative to now) when the certificate starts being valid notAfter - Timestamp (relative to now) when the certificate stops being valid digest - Digest method to use for signing, default is md5 Returns: The signed certificate in an X509 object """ cert = crypto.X509() cert.set_serial_number(serial) cert.gmtime_adj_notBefore(notBefore) cert.gmtime_adj_notAfter(notAfter) cert.set_issuer(issuerCert.get_subject()) cert.set_subject(req.get_subject()) cert.set_pubkey(req.get_pubkey()) cert.sign(issuerKey, digest) return cert
2,892
Python
.py
70
32.471429
104
0.621708
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,764
six.py
evilhero_mylar/lib/six.py
"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): if from_value is None: raise value raise value from from_value """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
30,098
Python
.py
699
36.296137
98
0.650222
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,765
bencode.py
evilhero_mylar/lib/bencode.py
# Copyright (C) 2011 by clueless <clueless.nospam ! mail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # Version: 20111107 # # Changelog # --------- # 2011-11-07 - Added support for Python2 (tested on 2.6) # 2011-10-03 - Fixed: moved check for end of list at the top of the while loop # in _decode_list (in case the list is empty) (Chris Lucas) # - Converted dictionary keys to str # 2011-04-24 - Changed date format to YYYY-MM-DD for versioning, bigger # integer denotes a newer version # - Fixed a bug that would treat False as an integral type but # encode it using the 'False' string, attempting to encode a # boolean now results in an error # - Fixed a bug where an integer value of 0 in a list or # dictionary resulted in a parse error while decoding # # 2011-04-03 - Original release import sys _py3 = sys.version_info[0] == 3 if _py3: _VALID_STRING_TYPES = (str,) else: _VALID_STRING_TYPES = (str, unicode) # @UndefinedVariable _TYPE_INT = 1 _TYPE_STRING = 2 _TYPE_LIST = 3 _TYPE_DICTIONARY = 4 _TYPE_END = 5 _TYPE_INVALID = 6 # Function to determine the type of he next value/item # Arguments: # char First character of the string that is to be decoded # Return value: # Returns an integer that describes what type the next value/item is def _gettype(char): if not isinstance(char, int): char = ord(char) if char == 0x6C: # 'l' return _TYPE_LIST elif char == 0x64: # 'd' return _TYPE_DICTIONARY elif char == 0x69: # 'i' return _TYPE_INT elif char == 0x65: # 'e' return _TYPE_END elif char >= 0x30 and char <= 0x39: # '0' '9' return _TYPE_STRING else: return _TYPE_INVALID # Function to parse a string from the bendcoded data # Arguments: # data bencoded data, must be guaranteed to be a string # Return Value: # Returns a tuple, the first member of the tuple is the parsed string # The second member is whatever remains of the bencoded data so it can # be used to parse the next part of the data def _decode_string(data): end = 1 # if py3, data[end] is going to be an int # if py2, data[end] will be a string if _py3: char = 0x3A else: char = chr(0x3A) while data[end] != char: # ':' end = end + 1 strlen = int(data[:end]) return (data[end + 1:strlen + end + 1], data[strlen + end + 1:]) # Function to parse an integer from the bencoded data # Arguments: # data bencoded data, must be guaranteed to be an integer # Return Value: # Returns a tuple, the first member of the tuple is the parsed string # The second member is whatever remains of the bencoded data so it can # be used to parse the next part of the data def _decode_int(data): end = 1 # if py3, data[end] is going to be an int # if py2, data[end] will be a string if _py3: char = 0x65 else: char = chr(0x65) while data[end] != char: # 'e' end = end + 1 return (int(data[1:end]), data[end + 1:]) # Function to parse a bencoded list # Arguments: # data bencoded data, must be guaranted to be the start of a list # Return Value: # Returns a tuple, the first member of the tuple is the parsed list # The second member is whatever remains of the bencoded data so it can # be used to parse the next part of the data def _decode_list(data): x = [] overflow = data[1:] while True: # Loop over the data if _gettype(overflow[0]) == _TYPE_END: # - Break if we reach the end of the list return (x, overflow[1:]) # and return the list and overflow value, overflow = _decode(overflow) # if isinstance(value, bool) or overflow == '': # - if we have a parse error return (False, False) # Die with error else: # - Otherwise x.append(value) # add the value to the list # Function to parse a bencoded list # Arguments: # data bencoded data, must be guaranted to be the start of a list # Return Value: # Returns a tuple, the first member of the tuple is the parsed dictionary # The second member is whatever remains of the bencoded data so it can # be used to parse the next part of the data def _decode_dict(data): x = {} overflow = data[1:] while True: # Loop over the data if _gettype(overflow[0]) != _TYPE_STRING: # - If the key is not a string return (False, False) # Die with error key, overflow = _decode(overflow) # if key == False or overflow == '': # - If parse error return (False, False) # Die with error value, overflow = _decode(overflow) # if isinstance(value, bool) or overflow == '': # - If parse error print("Error parsing value") print(value) print(overflow) return (False, False) # Die with error else: # don't use bytes for the key key = key.decode() x[key] = value if _gettype(overflow[0]) == _TYPE_END: return (x, overflow[1:]) # Arguments: # data bencoded data in bytes format # Return Values: # Returns a tuple, the first member is the parsed data, could be a string, # an integer, a list or a dictionary, or a combination of those # The second member is the leftover of parsing, if everything parses correctly this # should be an empty byte string def _decode(data): btype = _gettype(data[0]) if btype == _TYPE_INT: return _decode_int(data) elif btype == _TYPE_STRING: return _decode_string(data) elif btype == _TYPE_LIST: return _decode_list(data) elif btype == _TYPE_DICTIONARY: return _decode_dict(data) else: return (False, False) # Function to decode bencoded data # Arguments: # data bencoded data, can be str or bytes # Return Values: # Returns the decoded data on success, this coud be bytes, int, dict or list # or a combinatin of those # If an error occurs the return value is False def decode(data): # if isinstance(data, str): # data = data.encode() decoded, overflow = _decode(data) return decoded # Args: data as integer # return: encoded byte string def _encode_int(data): return b'i' + str(data).encode() + b'e' # Args: data as string or bytes # Return: encoded byte string def _encode_string(data): return str(len(data)).encode() + b':' + data # Args: data as list # Return: Encoded byte string, false on error def _encode_list(data): elist = b'l' for item in data: eitem = encode(item) if eitem == False: return False elist += eitem return elist + b'e' # Args: data as dict # Return: encoded byte string, false on error def _encode_dict(data): edict = b'd' keys = [] for key in data: if not isinstance(key, _VALID_STRING_TYPES) and not isinstance(key, bytes): return False keys.append(key) keys.sort() for key in keys: ekey = encode(key) eitem = encode(data[key]) if ekey == False or eitem == False: return False edict += ekey + eitem return edict + b'e' # Function to encode a variable in bencoding # Arguments: # data Variable to be encoded, can be a list, dict, str, bytes, int or a combination of those # Return Values: # Returns the encoded data as a byte string when successful # If an error occurs the return value is False def encode(data): if isinstance(data, bool): return False elif isinstance(data, int): return _encode_int(data) elif isinstance(data, bytes): return _encode_string(data) elif isinstance(data, _VALID_STRING_TYPES): return _encode_string(data.encode()) elif isinstance(data, list): return _encode_list(data) elif isinstance(data, dict): return _encode_dict(data) else: return False
9,132
Python
.py
239
33.753138
97
0.655971
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,766
pathlib.py
evilhero_mylar/lib/pathlib.py
import fnmatch import functools import io import ntpath import os import posixpath import re import sys import time from collections import Sequence from contextlib import contextmanager from errno import EINVAL, ENOENT from operator import attrgetter from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO try: from urllib import quote as urlquote, quote as urlquote_from_bytes except ImportError: from urllib.parse import quote as urlquote, quote_from_bytes as urlquote_from_bytes try: intern = intern except NameError: intern = sys.intern try: basestring = basestring except NameError: basestring = str supports_symlinks = True try: import nt except ImportError: nt = None else: if sys.getwindowsversion()[:2] >= (6, 0) and sys.version_info >= (3, 2): from nt import _getfinalpathname else: supports_symlinks = False _getfinalpathname = None __all__ = [ "PurePath", "PurePosixPath", "PureWindowsPath", "Path", "PosixPath", "WindowsPath", ] # # Internals # _py2 = sys.version_info < (3,) _py2_fs_encoding = 'ascii' def _py2_fsencode(parts): # py2 => minimal unicode support return [part.encode(_py2_fs_encoding) if isinstance(part, unicode) else part for part in parts] def _is_wildcard_pattern(pat): # Whether this pattern needs actual matching using fnmatch, or can # be looked up directly as a file. return "*" in pat or "?" in pat or "[" in pat class _Flavour(object): """A flavour implements a particular (platform-specific) set of path semantics.""" def __init__(self): self.join = self.sep.join def parse_parts(self, parts): if _py2: parts = _py2_fsencode(parts) parsed = [] sep = self.sep altsep = self.altsep drv = root = '' it = reversed(parts) for part in it: if not part: continue if altsep: part = part.replace(altsep, sep) drv, root, rel = self.splitroot(part) if sep in rel: for x in reversed(rel.split(sep)): if x and x != '.': parsed.append(intern(x)) else: if rel and rel != '.': parsed.append(intern(rel)) if drv or root: if not drv: # If no drive is present, try to find one in the previous # parts. This makes the result of parsing e.g. # ("C:", "/", "a") reasonably intuitive. for part in it: drv = self.splitroot(part)[0] if drv: break break if drv or root: parsed.append(drv + root) parsed.reverse() return drv, root, parsed def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2): """ Join the two paths represented by the respective (drive, root, parts) tuples. Return a new (drive, root, parts) tuple. """ if root2: if not drv2 and drv: return drv, root2, [drv + root2] + parts2[1:] elif drv2: if drv2 == drv or self.casefold(drv2) == self.casefold(drv): # Same drive => second path is relative to the first return drv, root, parts + parts2[1:] else: # Second path is non-anchored (common case) return drv, root, parts + parts2 return drv2, root2, parts2 class _WindowsFlavour(_Flavour): # Reference for Windows paths can be found at # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx sep = '\\' altsep = '/' has_drv = True pathmod = ntpath is_supported = (nt is not None) drive_letters = ( set(chr(x) for x in range(ord('a'), ord('z') + 1)) | set(chr(x) for x in range(ord('A'), ord('Z') + 1)) ) ext_namespace_prefix = '\\\\?\\' reserved_names = ( set(['CON', 'PRN', 'AUX', 'NUL']) | set(['COM%d' % i for i in range(1, 10)]) | set(['LPT%d' % i for i in range(1, 10)]) ) # Interesting findings about extended paths: # - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported # but '\\?\c:/a' is not # - extended paths are always absolute; "relative" extended paths will # fail. def splitroot(self, part, sep=sep): first = part[0:1] second = part[1:2] if (second == sep and first == sep): # XXX extended paths should also disable the collapsing of "." # components (according to MSDN docs). prefix, part = self._split_extended_path(part) first = part[0:1] second = part[1:2] else: prefix = '' third = part[2:3] if (second == sep and first == sep and third != sep): # is a UNC path: # vvvvvvvvvvvvvvvvvvvvv root # \\machine\mountpoint\directory\etc\... # directory ^^^^^^^^^^^^^^ index = part.find(sep, 2) if index != -1: index2 = part.find(sep, index + 1) # a UNC path can't have two slashes in a row # (after the initial two) if index2 != index + 1: if index2 == -1: index2 = len(part) if prefix: return prefix + part[1:index2], sep, part[index2+1:] else: return part[:index2], sep, part[index2+1:] drv = root = '' if second == ':' and first in self.drive_letters: drv = part[:2] part = part[2:] first = third if first == sep: root = first part = part.lstrip(sep) return prefix + drv, root, part def casefold(self, s): return s.lower() def casefold_parts(self, parts): return [p.lower() for p in parts] def resolve(self, path): s = str(path) if not s: return os.getcwd() if _getfinalpathname is not None: return self._ext_to_normal(_getfinalpathname(s)) # Means fallback on absolute return None def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix): prefix = '' if s.startswith(ext_prefix): prefix = s[:4] s = s[4:] if s.startswith('UNC\\'): prefix += s[:3] s = '\\' + s[3:] return prefix, s def _ext_to_normal(self, s): # Turn back an extended path into a normal DOS-like path return self._split_extended_path(s)[1] def is_reserved(self, parts): # NOTE: the rules for reserved names seem somewhat complicated # (e.g. r"..\NUL" is reserved but not r"foo\NUL"). # We err on the side of caution and return True for paths which are # not considered reserved by Windows. if not parts: return False if parts[0].startswith('\\\\'): # UNC paths are never reserved return False return parts[-1].partition('.')[0].upper() in self.reserved_names def make_uri(self, path): # Under Windows, file URIs use the UTF-8 encoding. drive = path.drive if len(drive) == 2 and drive[1] == ':': # It's a path on a local drive => 'file:///c:/a/b' rest = path.as_posix()[2:].lstrip('/') return 'file:///%s/%s' % ( drive, urlquote_from_bytes(rest.encode('utf-8'))) else: # It's a path on a network drive => 'file://host/share/a/b' return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8')) class _PosixFlavour(_Flavour): sep = '/' altsep = '' has_drv = False pathmod = posixpath is_supported = (os.name != 'nt') def splitroot(self, part, sep=sep): if part and part[0] == sep: stripped_part = part.lstrip(sep) # According to POSIX path resolution: # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11 # "A pathname that begins with two successive slashes may be # interpreted in an implementation-defined manner, although more # than two leading slashes shall be treated as a single slash". if len(part) - len(stripped_part) == 2: return '', sep * 2, stripped_part else: return '', sep, stripped_part else: return '', '', part def casefold(self, s): return s def casefold_parts(self, parts): return parts def resolve(self, path): sep = self.sep accessor = path._accessor seen = {} def _resolve(path, rest): if rest.startswith(sep): path = '' for name in rest.split(sep): if not name or name == '.': # current dir continue if name == '..': # parent dir path, _, _ = path.rpartition(sep) continue newpath = path + sep + name if newpath in seen: # Already seen this path path = seen[newpath] if path is not None: # use cached value continue # The symlink is not resolved, so we must have a symlink loop. raise RuntimeError("Symlink loop from %r" % newpath) # Resolve the symbolic link try: target = accessor.readlink(newpath) except OSError as e: if e.errno != EINVAL: raise # Not a symlink path = newpath else: seen[newpath] = None # not resolved symlink path = _resolve(path, target) seen[newpath] = path # resolved symlink return path # NOTE: according to POSIX, getcwd() cannot contain path components # which are symlinks. base = '' if path.is_absolute() else os.getcwd() return _resolve(base, str(path)) or sep def is_reserved(self, parts): return False def make_uri(self, path): # We represent the path using the local filesystem encoding, # for portability to other applications. bpath = bytes(path) return 'file://' + urlquote_from_bytes(bpath) _windows_flavour = _WindowsFlavour() _posix_flavour = _PosixFlavour() class _Accessor: """An accessor implements a particular (system-specific or not) way of accessing paths on the filesystem.""" class _NormalAccessor(_Accessor): def _wrap_strfunc(strfunc): @functools.wraps(strfunc) def wrapped(pathobj, *args): return strfunc(str(pathobj), *args) return staticmethod(wrapped) def _wrap_binary_strfunc(strfunc): @functools.wraps(strfunc) def wrapped(pathobjA, pathobjB, *args): return strfunc(str(pathobjA), str(pathobjB), *args) return staticmethod(wrapped) stat = _wrap_strfunc(os.stat) lstat = _wrap_strfunc(os.lstat) open = _wrap_strfunc(os.open) listdir = _wrap_strfunc(os.listdir) chmod = _wrap_strfunc(os.chmod) if hasattr(os, "lchmod"): lchmod = _wrap_strfunc(os.lchmod) else: def lchmod(self, pathobj, mode): raise NotImplementedError("lchmod() not available on this system") mkdir = _wrap_strfunc(os.mkdir) unlink = _wrap_strfunc(os.unlink) rmdir = _wrap_strfunc(os.rmdir) rename = _wrap_binary_strfunc(os.rename) if sys.version_info >= (3, 3): replace = _wrap_binary_strfunc(os.replace) if nt: if supports_symlinks: symlink = _wrap_binary_strfunc(os.symlink) else: def symlink(a, b, target_is_directory): raise NotImplementedError("symlink() not available on this system") else: # Under POSIX, os.symlink() takes two args @staticmethod def symlink(a, b, target_is_directory): return os.symlink(str(a), str(b)) utime = _wrap_strfunc(os.utime) # Helper for resolve() def readlink(self, path): return os.readlink(path) _normal_accessor = _NormalAccessor() # # Globbing helpers # @contextmanager def _cached(func): try: func.__cached__ yield func except AttributeError: cache = {} def wrapper(*args): try: return cache[args] except KeyError: value = cache[args] = func(*args) return value wrapper.__cached__ = True try: yield wrapper finally: cache.clear() def _make_selector(pattern_parts): pat = pattern_parts[0] child_parts = pattern_parts[1:] if pat == '**': cls = _RecursiveWildcardSelector elif '**' in pat: raise ValueError("Invalid pattern: '**' can only be an entire path component") elif _is_wildcard_pattern(pat): cls = _WildcardSelector else: cls = _PreciseSelector return cls(pat, child_parts) if hasattr(functools, "lru_cache"): _make_selector = functools.lru_cache()(_make_selector) class _Selector: """A selector matches a specific glob pattern part against the children of a given path.""" def __init__(self, child_parts): self.child_parts = child_parts if child_parts: self.successor = _make_selector(child_parts) else: self.successor = _TerminatingSelector() def select_from(self, parent_path): """Iterate over all child paths of `parent_path` matched by this selector. This can contain parent_path itself.""" path_cls = type(parent_path) is_dir = path_cls.is_dir exists = path_cls.exists listdir = parent_path._accessor.listdir return self._select_from(parent_path, is_dir, exists, listdir) class _TerminatingSelector: def _select_from(self, parent_path, is_dir, exists, listdir): yield parent_path class _PreciseSelector(_Selector): def __init__(self, name, child_parts): self.name = name _Selector.__init__(self, child_parts) def _select_from(self, parent_path, is_dir, exists, listdir): if not is_dir(parent_path): return path = parent_path._make_child_relpath(self.name) if exists(path): for p in self.successor._select_from(path, is_dir, exists, listdir): yield p class _WildcardSelector(_Selector): def __init__(self, pat, child_parts): self.pat = re.compile(fnmatch.translate(pat)) _Selector.__init__(self, child_parts) def _select_from(self, parent_path, is_dir, exists, listdir): if not is_dir(parent_path): return cf = parent_path._flavour.casefold for name in listdir(parent_path): casefolded = cf(name) if self.pat.match(casefolded): path = parent_path._make_child_relpath(name) for p in self.successor._select_from(path, is_dir, exists, listdir): yield p class _RecursiveWildcardSelector(_Selector): def __init__(self, pat, child_parts): _Selector.__init__(self, child_parts) def _iterate_directories(self, parent_path, is_dir, listdir): yield parent_path for name in listdir(parent_path): path = parent_path._make_child_relpath(name) if is_dir(path): for p in self._iterate_directories(path, is_dir, listdir): yield p def _select_from(self, parent_path, is_dir, exists, listdir): if not is_dir(parent_path): return with _cached(listdir) as listdir: yielded = set() try: successor_select = self.successor._select_from for starting_point in self._iterate_directories(parent_path, is_dir, listdir): for p in successor_select(starting_point, is_dir, exists, listdir): if p not in yielded: yield p yielded.add(p) finally: yielded.clear() # # Public API # class _PathParents(Sequence): """This object provides sequence-like access to the logical ancestors of a path. Don't try to construct it yourself.""" __slots__ = ('_pathcls', '_drv', '_root', '_parts') def __init__(self, path): # We don't store the instance to avoid reference cycles self._pathcls = type(path) self._drv = path._drv self._root = path._root self._parts = path._parts def __len__(self): if self._drv or self._root: return len(self._parts) - 1 else: return len(self._parts) def __getitem__(self, idx): if idx < 0 or idx >= len(self): raise IndexError(idx) return self._pathcls._from_parsed_parts(self._drv, self._root, self._parts[:-idx - 1]) def __repr__(self): return "<{0}.parents>".format(self._pathcls.__name__) class PurePath(object): """PurePath represents a filesystem path and offers operations which don't imply any actual filesystem I/O. Depending on your system, instantiating a PurePath will return either a PurePosixPath or a PureWindowsPath object. You can also instantiate either of these classes directly, regardless of your system. """ __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) def __new__(cls, *args): """Construct a PurePath from one or several strings and or existing PurePath objects. The strings and path objects are combined so as to yield a canonicalized path, which is incorporated into the new PurePath object. """ if cls is PurePath: cls = PureWindowsPath if os.name == 'nt' else PurePosixPath return cls._from_parts(args) def __reduce__(self): # Using the parts tuple helps share interned path parts # when pickling related paths. return (self.__class__, tuple(self._parts)) @classmethod def _parse_args(cls, args): # This is useful when you don't want to create an instance, just # canonicalize some constructor arguments. parts = [] for a in args: if isinstance(a, PurePath): parts += a._parts elif isinstance(a, basestring): parts.append(a) else: raise TypeError( "argument should be a path or str object, not %r" % type(a)) return cls._flavour.parse_parts(parts) @classmethod def _from_parts(cls, args, init=True): # We need to call _parse_args on the instance, so as to get the # right flavour. self = object.__new__(cls) drv, root, parts = self._parse_args(args) self._drv = drv self._root = root self._parts = parts if init: self._init() return self @classmethod def _from_parsed_parts(cls, drv, root, parts, init=True): self = object.__new__(cls) self._drv = drv self._root = root self._parts = parts if init: self._init() return self @classmethod def _format_parsed_parts(cls, drv, root, parts): if drv or root: return drv + root + cls._flavour.join(parts[1:]) else: return cls._flavour.join(parts) def _init(self): # Overriden in concrete Path pass def _make_child(self, args): drv, root, parts = self._parse_args(args) drv, root, parts = self._flavour.join_parsed_parts( self._drv, self._root, self._parts, drv, root, parts) return self._from_parsed_parts(drv, root, parts) def __str__(self): """Return the string representation of the path, suitable for passing to system calls.""" try: return self._str except AttributeError: self._str = self._format_parsed_parts(self._drv, self._root, self._parts) or '.' return self._str def as_posix(self): """Return the string representation of the path with forward (/) slashes.""" f = self._flavour return str(self).replace(f.sep, '/') def __bytes__(self): """Return the bytes representation of the path. This is only recommended to use under Unix.""" if sys.version_info < (3, 2): raise NotImplementedError("needs Python 3.2 or later") return os.fsencode(str(self)) def __repr__(self): return "{0}({1!r})".format(self.__class__.__name__, self.as_posix()) def as_uri(self): """Return the path as a 'file' URI.""" if not self.is_absolute(): raise ValueError("relative path can't be expressed as a file URI") return self._flavour.make_uri(self) @property def _cparts(self): # Cached casefolded parts, for hashing and comparison try: return self._cached_cparts except AttributeError: self._cached_cparts = self._flavour.casefold_parts(self._parts) return self._cached_cparts def __eq__(self, other): if not isinstance(other, PurePath): return NotImplemented return self._cparts == other._cparts and self._flavour is other._flavour def __ne__(self, other): return not self == other def __hash__(self): try: return self._hash except AttributeError: self._hash = hash(tuple(self._cparts)) return self._hash def __lt__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts < other._cparts def __le__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts <= other._cparts def __gt__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts > other._cparts def __ge__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts >= other._cparts drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") @property def anchor(self): """The concatenation of the drive and root, or ''.""" anchor = self._drv + self._root return anchor @property def name(self): """The final path component, if any.""" parts = self._parts if len(parts) == (1 if (self._drv or self._root) else 0): return '' return parts[-1] @property def suffix(self): """The final component's last suffix, if any.""" name = self.name i = name.rfind('.') if 0 < i < len(name) - 1: return name[i:] else: return '' @property def suffixes(self): """A list of the final component's suffixes, if any.""" name = self.name if name.endswith('.'): return [] name = name.lstrip('.') return ['.' + suffix for suffix in name.split('.')[1:]] @property def stem(self): """The final path component, minus its last suffix.""" name = self.name i = name.rfind('.') if 0 < i < len(name) - 1: return name[:i] else: return name def with_name(self, name): """Return a new path with the file name changed.""" if not self.name: raise ValueError("%r has an empty name" % (self,)) return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]) def with_suffix(self, suffix): """Return a new path with the file suffix changed (or added, if none).""" # XXX if suffix is None, should the current suffix be removed? drv, root, parts = self._flavour.parse_parts((suffix,)) if drv or root or len(parts) != 1: raise ValueError("Invalid suffix %r" % (suffix)) suffix = parts[0] if not suffix.startswith('.'): raise ValueError("Invalid suffix %r" % (suffix)) name = self.name if not name: raise ValueError("%r has an empty name" % (self,)) old_suffix = self.suffix if not old_suffix: name = name + suffix else: name = name[:-len(old_suffix)] + suffix return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]) def relative_to(self, *other): """Return the relative path to another path identified by the passed arguments. If the operation is not possible (because this is not a subpath of the other path), raise ValueError. """ # For the purpose of this method, drive and root are considered # separate parts, i.e.: # Path('c:/').relative_to('c:') gives Path('/') # Path('c:/').relative_to('/') raise ValueError if not other: raise TypeError("need at least one argument") parts = self._parts drv = self._drv root = self._root if root: abs_parts = [drv, root] + parts[1:] else: abs_parts = parts to_drv, to_root, to_parts = self._parse_args(other) if to_root: to_abs_parts = [to_drv, to_root] + to_parts[1:] else: to_abs_parts = to_parts n = len(to_abs_parts) cf = self._flavour.casefold_parts if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts): formatted = self._format_parsed_parts(to_drv, to_root, to_parts) raise ValueError("{!r} does not start with {!r}" .format(str(self), str(formatted))) return self._from_parsed_parts('', root if n == 1 else '', abs_parts[n:]) @property def parts(self): """An object providing sequence-like access to the components in the filesystem path.""" # We cache the tuple to avoid building a new one each time .parts # is accessed. XXX is this necessary? try: return self._pparts except AttributeError: self._pparts = tuple(self._parts) return self._pparts def joinpath(self, *args): """Combine this path with one or several arguments, and return a new path representing either a subpath (if all arguments are relative paths) or a totally different path (if one of the arguments is anchored). """ return self._make_child(args) def __truediv__(self, key): return self._make_child((key,)) def __rtruediv__(self, key): return self._from_parts([key] + self._parts) if sys.version_info < (3,): __div__ = __truediv__ __rdiv__ = __rtruediv__ @property def parent(self): """The logical parent of the path.""" drv = self._drv root = self._root parts = self._parts if len(parts) == 1 and (drv or root): return self return self._from_parsed_parts(drv, root, parts[:-1]) @property def parents(self): """A sequence of this path's logical parents.""" return _PathParents(self) def is_absolute(self): """True if the path is absolute (has both a root and, if applicable, a drive).""" if not self._root: return False return not self._flavour.has_drv or bool(self._drv) def is_reserved(self): """Return True if the path contains one of the special names reserved by the system, if any.""" return self._flavour.is_reserved(self._parts) def match(self, path_pattern): """ Return True if this path matches the given pattern. """ cf = self._flavour.casefold path_pattern = cf(path_pattern) drv, root, pat_parts = self._flavour.parse_parts((path_pattern,)) if not pat_parts: raise ValueError("empty pattern") if drv and drv != cf(self._drv): return False if root and root != cf(self._root): return False parts = self._cparts if drv or root: if len(pat_parts) != len(parts): return False pat_parts = pat_parts[1:] elif len(pat_parts) > len(parts): return False for part, pat in zip(reversed(parts), reversed(pat_parts)): if not fnmatch.fnmatchcase(part, pat): return False return True class PurePosixPath(PurePath): _flavour = _posix_flavour __slots__ = () class PureWindowsPath(PurePath): _flavour = _windows_flavour __slots__ = () # Filesystem-accessing classes class Path(PurePath): __slots__ = ( '_accessor', ) def __new__(cls, *args, **kwargs): if cls is Path: cls = WindowsPath if os.name == 'nt' else PosixPath self = cls._from_parts(args, init=False) if not self._flavour.is_supported: raise NotImplementedError("cannot instantiate %r on your system" % (cls.__name__,)) self._init() return self def _init(self, # Private non-constructor arguments template=None, ): if template is not None: self._accessor = template._accessor else: self._accessor = _normal_accessor def _make_child_relpath(self, part): # This is an optimization used for dir walking. `part` must be # a single part relative to this path. parts = self._parts + [part] return self._from_parsed_parts(self._drv, self._root, parts) def _opener(self, name, flags, mode=0o666): # A stub for the opener argument to built-in open() return self._accessor.open(self, flags, mode) def _raw_open(self, flags, mode=0o777): """ Open the file pointed by this path and return a file descriptor, as os.open() does. """ return self._accessor.open(self, flags, mode) # Public API @classmethod def cwd(cls): """Return a new path pointing to the current working directory (as returned by os.getcwd()). """ return cls(os.getcwd()) def iterdir(self): """Iterate over the files in this directory. Does not yield any result for the special paths '.' and '..'. """ for name in self._accessor.listdir(self): if name in ('.', '..'): # Yielding a path object for these makes little sense continue yield self._make_child_relpath(name) def glob(self, pattern): """Iterate over this subtree and yield all existing files (of any kind, including directories) matching the given pattern. """ pattern = self._flavour.casefold(pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") selector = _make_selector(tuple(pattern_parts)) for p in selector.select_from(self): yield p def rglob(self, pattern): """Recursively yield all existing files (of any kind, including directories) matching the given pattern, anywhere in this subtree. """ pattern = self._flavour.casefold(pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") selector = _make_selector(("**",) + tuple(pattern_parts)) for p in selector.select_from(self): yield p def absolute(self): """Return an absolute version of this path. This function works even if the path doesn't point to anything. No normalization is done, i.e. all '.' and '..' will be kept along. Use resolve() to get the canonical path to a file. """ # XXX untested yet! if self.is_absolute(): return self # FIXME this must defer to the specific flavour (and, under Windows, # use nt._getfullpathname()) obj = self._from_parts([os.getcwd()] + self._parts, init=False) obj._init(template=self) return obj def resolve(self): """ Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). """ s = self._flavour.resolve(self) if s is None: # No symlink resolution => for consistency, raise an error if # the path doesn't exist or is forbidden self.stat() s = str(self.absolute()) # Now we have no symlinks in the path, it's safe to normalize it. normed = self._flavour.pathmod.normpath(s) obj = self._from_parts((normed,), init=False) obj._init(template=self) return obj def stat(self): """ Return the result of the stat() system call on this path, like os.stat() does. """ return self._accessor.stat(self) def owner(self): """ Return the login name of the file owner. """ import pwd return pwd.getpwuid(self.stat().st_uid).pw_name def group(self): """ Return the group name of the file gid. """ import grp return grp.getgrgid(self.stat().st_gid).gr_name def open(self, mode='r', buffering=-1, encoding=None, errors=None, newline=None): """ Open the file pointed by this path and return a file object, as the built-in open() function does. """ if sys.version_info >= (3, 3): return io.open(str(self), mode, buffering, encoding, errors, newline, opener=self._opener) else: return io.open(str(self), mode, buffering, encoding, errors, newline) def touch(self, mode=0o666, exist_ok=True): """ Create this file with the given access mode, if it doesn't exist. """ if exist_ok: # First try to bump modification time # Implementation note: GNU touch uses the UTIME_NOW option of # the utimensat() / futimens() functions. t = time.time() try: self._accessor.utime(self, (t, t)) except OSError: # Avoid exception chaining pass else: return flags = os.O_CREAT | os.O_WRONLY if not exist_ok: flags |= os.O_EXCL fd = self._raw_open(flags, mode) os.close(fd) def mkdir(self, mode=0o777, parents=False): if not parents: self._accessor.mkdir(self, mode) else: try: self._accessor.mkdir(self, mode) except OSError as e: if e.errno != ENOENT: raise self.parent.mkdir(parents=True) self._accessor.mkdir(self, mode) def chmod(self, mode): """ Change the permissions of the path, like os.chmod(). """ self._accessor.chmod(self, mode) def lchmod(self, mode): """ Like chmod(), except if the path points to a symlink, the symlink's permissions are changed, rather than its target's. """ self._accessor.lchmod(self, mode) def unlink(self): """ Remove this file or link. If the path is a directory, use rmdir() instead. """ self._accessor.unlink(self) def rmdir(self): """ Remove this directory. The directory must be empty. """ self._accessor.rmdir(self) def lstat(self): """ Like stat(), except if the path points to a symlink, the symlink's status information is returned, rather than its target's. """ return self._accessor.lstat(self) def rename(self, target): """ Rename this path to the given path. """ self._accessor.rename(self, target) def replace(self, target): """ Rename this path to the given path, clobbering the existing destination if it exists. """ if sys.version_info < (3, 3): raise NotImplementedError("replace() is only available " "with Python 3.3 and later") self._accessor.replace(self, target) def symlink_to(self, target, target_is_directory=False): """ Make this path a symlink pointing to the given path. Note the order of arguments (self, target) is the reverse of os.symlink's. """ self._accessor.symlink(target, self, target_is_directory) # Convenience functions for querying the stat results def exists(self): """ Whether this path exists. """ try: self.stat() except OSError as e: if e.errno != ENOENT: raise return False return True def is_dir(self): """ Whether this path is a directory. """ try: return S_ISDIR(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_file(self): """ Whether this path is a regular file (also True for symlinks pointing to regular files). """ try: return S_ISREG(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_symlink(self): """ Whether this path is a symbolic link. """ try: return S_ISLNK(self.lstat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist return False def is_block_device(self): """ Whether this path is a block device. """ try: return S_ISBLK(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_char_device(self): """ Whether this path is a character device. """ try: return S_ISCHR(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_fifo(self): """ Whether this path is a FIFO. """ try: return S_ISFIFO(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False def is_socket(self): """ Whether this path is a socket. """ try: return S_ISSOCK(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False class PosixPath(Path, PurePosixPath): __slots__ = () class WindowsPath(Path, PureWindowsPath): __slots__ = ()
41,480
Python
.py
1,093
27.987191
95
0.563941
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,767
util.py
evilhero_mylar/lib/apscheduler/util.py
"""This module contains several handy functions primarily meant for internal use.""" from __future__ import division from datetime import date, datetime, time, timedelta, tzinfo from calendar import timegm import re from functools import partial from pytz import timezone, utc import six try: from inspect import signature except ImportError: # pragma: nocover from funcsigs import signature try: from threading import TIMEOUT_MAX except ImportError: TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows __all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp', 'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', 'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args') class _Undefined(object): def __nonzero__(self): return False def __bool__(self): return False def __repr__(self): return '<undefined>' undefined = _Undefined() #: a unique object that only signifies that no value is defined def asint(text): """ Safely converts a string to an integer, returning ``None`` if the string is ``None``. :type text: str :rtype: int """ if text is not None: return int(text) def asbool(obj): """ Interprets an object as a boolean value. :rtype: bool """ if isinstance(obj, str): obj = obj.strip().lower() if obj in ('true', 'yes', 'on', 'y', 't', '1'): return True if obj in ('false', 'no', 'off', 'n', 'f', '0'): return False raise ValueError('Unable to interpret value "%s" as boolean' % obj) return bool(obj) def astimezone(obj): """ Interprets an object as a timezone. :rtype: tzinfo """ if isinstance(obj, six.string_types): return timezone(obj) if isinstance(obj, tzinfo): if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'): raise TypeError('Only timezones from the pytz library are supported') if obj.zone == 'local': raise ValueError( 'Unable to determine the name of the local timezone -- you must explicitly ' 'specify the name of the local timezone. Please refrain from using timezones like ' 'EST to prevent problems with daylight saving time. Instead, use a locale based ' 'timezone name (such as Europe/Helsinki).') return obj if obj is not None: raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__) _DATE_REGEX = re.compile( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})' r'(?: (?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})' r'(?:\.(?P<microsecond>\d{1,6}))?)?') def convert_to_datetime(input, tz, arg_name): """ Converts the given object to a timezone aware datetime object. If a timezone aware datetime object is passed, it is returned unmodified. If a native datetime object is passed, it is given the specified timezone. If the input is a string, it is parsed as a datetime with the given timezone. Date strings are accepted in three different forms: date only (Y-m-d), date with time (Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro). :param str|datetime input: the datetime or string to convert to a timezone aware datetime :param datetime.tzinfo tz: timezone to interpret ``input`` in :param str arg_name: the name of the argument (used in an error message) :rtype: datetime """ if input is None: return elif isinstance(input, datetime): datetime_ = input elif isinstance(input, date): datetime_ = datetime.combine(input, time()) elif isinstance(input, six.string_types): m = _DATE_REGEX.match(input) if not m: raise ValueError('Invalid date string') values = [(k, int(v or 0)) for k, v in m.groupdict().items()] values = dict(values) datetime_ = datetime(**values) else: raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__)) if datetime_.tzinfo is not None: return datetime_ if tz is None: raise ValueError( 'The "tz" argument must be specified if %s has no timezone information' % arg_name) if isinstance(tz, six.string_types): tz = timezone(tz) try: return tz.localize(datetime_, is_dst=None) except AttributeError: raise TypeError( 'Only pytz timezones are supported (need the localize() and normalize() methods)') def datetime_to_utc_timestamp(timeval): """ Converts a datetime instance to a timestamp. :type timeval: datetime :rtype: float """ if timeval is not None: return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000 def utc_timestamp_to_datetime(timestamp): """ Converts the given timestamp to a datetime instance. :type timestamp: float :rtype: datetime """ if timestamp is not None: return datetime.fromtimestamp(timestamp, utc) def timedelta_seconds(delta): """ Converts the given timedelta to seconds. :type delta: timedelta :rtype: float """ return delta.days * 24 * 60 * 60 + delta.seconds + \ delta.microseconds / 1000000.0 def datetime_ceil(dateval): """ Rounds the given datetime object upwards. :type dateval: datetime """ if dateval.microsecond > 0: return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond) return dateval def datetime_repr(dateval): return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None' def get_callable_name(func): """ Returns the best available display name for the given function/callable. :rtype: str """ # the easy case (on Python 3.3+) if hasattr(func, '__qualname__'): return func.__qualname__ # class methods, bound and unbound methods f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None) if f_self and hasattr(func, '__name__'): f_class = f_self if isinstance(f_self, type) else f_self.__class__ else: f_class = getattr(func, 'im_class', None) if f_class and hasattr(func, '__name__'): return '%s.%s' % (f_class.__name__, func.__name__) # class or class instance if hasattr(func, '__call__'): # class if hasattr(func, '__name__'): return func.__name__ # instance of a class with a __call__ method return func.__class__.__name__ raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func) def obj_to_ref(obj): """ Returns the path to the given callable. :rtype: str :raises TypeError: if the given object is not callable :raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested function """ if isinstance(obj, partial): raise ValueError('Cannot create a reference to a partial()') name = get_callable_name(obj) if '<lambda>' in name: raise ValueError('Cannot create a reference to a lambda') if '<locals>' in name: raise ValueError('Cannot create a reference to a nested function') return '%s:%s' % (obj.__module__, name) def ref_to_obj(ref): """ Returns the object pointed to by ``ref``. :type ref: str """ if not isinstance(ref, six.string_types): raise TypeError('References must be strings') if ':' not in ref: raise ValueError('Invalid reference') modulename, rest = ref.split(':', 1) try: obj = __import__(modulename, fromlist=[rest]) except ImportError: raise LookupError('Error resolving reference %s: could not import module' % ref) try: for name in rest.split('.'): obj = getattr(obj, name) return obj except Exception: raise LookupError('Error resolving reference %s: error looking up object' % ref) def maybe_ref(ref): """ Returns the object that the given reference points to, if it is indeed a reference. If it is not a reference, the object is returned as-is. """ if not isinstance(ref, str): return ref return ref_to_obj(ref) if six.PY2: def repr_escape(string): if isinstance(string, six.text_type): return string.encode('ascii', 'backslashreplace') return string else: def repr_escape(string): return string def check_callable_args(func, args, kwargs): """ Ensures that the given callable can be called with the given arguments. :type args: tuple :type kwargs: dict """ pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs positional_only_kwargs = [] # positional-only parameters that have a match in kwargs unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs unmatched_args = list(args) # args that didn't match any of the parameters in the signature # kwargs that didn't match any of the parameters in the signature unmatched_kwargs = list(kwargs) # indicates if the signature defines *args and **kwargs respectively has_varargs = has_var_kwargs = False try: sig = signature(func) except ValueError: # signature() doesn't work against every kind of callable return for param in six.itervalues(sig.parameters): if param.kind == param.POSITIONAL_OR_KEYWORD: if param.name in unmatched_kwargs and unmatched_args: pos_kwargs_conflicts.append(param.name) elif unmatched_args: del unmatched_args[0] elif param.name in unmatched_kwargs: unmatched_kwargs.remove(param.name) elif param.default is param.empty: unsatisfied_args.append(param.name) elif param.kind == param.POSITIONAL_ONLY: if unmatched_args: del unmatched_args[0] elif param.name in unmatched_kwargs: unmatched_kwargs.remove(param.name) positional_only_kwargs.append(param.name) elif param.default is param.empty: unsatisfied_args.append(param.name) elif param.kind == param.KEYWORD_ONLY: if param.name in unmatched_kwargs: unmatched_kwargs.remove(param.name) elif param.default is param.empty: unsatisfied_kwargs.append(param.name) elif param.kind == param.VAR_POSITIONAL: has_varargs = True elif param.kind == param.VAR_KEYWORD: has_var_kwargs = True # Make sure there are no conflicts between args and kwargs if pos_kwargs_conflicts: raise ValueError('The following arguments are supplied in both args and kwargs: %s' % ', '.join(pos_kwargs_conflicts)) # Check if keyword arguments are being fed to positional-only parameters if positional_only_kwargs: raise ValueError('The following arguments cannot be given as keyword arguments: %s' % ', '.join(positional_only_kwargs)) # Check that the number of positional arguments minus the number of matched kwargs matches the # argspec if unsatisfied_args: raise ValueError('The following arguments have not been supplied: %s' % ', '.join(unsatisfied_args)) # Check that all keyword-only arguments have been supplied if unsatisfied_kwargs: raise ValueError( 'The following keyword-only arguments have not been supplied in kwargs: %s' % ', '.join(unsatisfied_kwargs)) # Check that the callable can accept the given number of positional arguments if not has_varargs and unmatched_args: raise ValueError( 'The list of positional arguments is longer than the target callable can handle ' '(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args))) # Check that the callable can accept the given keyword arguments if not has_var_kwargs and unmatched_kwargs: raise ValueError( 'The target callable does not accept the following keyword arguments: %s' % ', '.join(unmatched_kwargs))
12,588
Python
.py
296
35.182432
99
0.648529
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,768
__init__.py
evilhero_mylar/lib/apscheduler/__init__.py
# These will be removed in APScheduler 4.0. #release = __import__('pkg_resources').get_distribution('APScheduler').version.split('-')[0] #version_info = tuple(int(x) if x.isdigit() else x for x in release.split('.')) #version = __version__ = '.'.join(str(x) for x in version_info[:3]) version_info = (3, 3, 1) release = '3.3.1' version = __version__ = '3.3.1'
361
Python
.py
7
50.428571
92
0.654391
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,769
events.py
evilhero_mylar/lib/apscheduler/events.py
__all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDULER_PAUSED', 'EVENT_SCHEDULER_RESUMED', 'EVENT_EXECUTOR_ADDED', 'EVENT_EXECUTOR_REMOVED', 'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED', 'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES', 'SchedulerEvent', 'JobEvent', 'JobExecutionEvent') EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0 EVENT_SCHEDULER_SHUTDOWN = 2 ** 1 EVENT_SCHEDULER_PAUSED = 2 ** 2 EVENT_SCHEDULER_RESUMED = 2 ** 3 EVENT_EXECUTOR_ADDED = 2 ** 4 EVENT_EXECUTOR_REMOVED = 2 ** 5 EVENT_JOBSTORE_ADDED = 2 ** 6 EVENT_JOBSTORE_REMOVED = 2 ** 7 EVENT_ALL_JOBS_REMOVED = 2 ** 8 EVENT_JOB_ADDED = 2 ** 9 EVENT_JOB_REMOVED = 2 ** 10 EVENT_JOB_MODIFIED = 2 ** 11 EVENT_JOB_EXECUTED = 2 ** 12 EVENT_JOB_ERROR = 2 ** 13 EVENT_JOB_MISSED = 2 ** 14 EVENT_JOB_SUBMITTED = 2 ** 15 EVENT_JOB_MAX_INSTANCES = 2 ** 16 EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED | EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED | EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES) class SchedulerEvent(object): """ An event that concerns the scheduler itself. :ivar code: the type code of this event :ivar alias: alias of the job store or executor that was added or removed (if applicable) """ def __init__(self, code, alias=None): super(SchedulerEvent, self).__init__() self.code = code self.alias = alias def __repr__(self): return '<%s (code=%d)>' % (self.__class__.__name__, self.code) class JobEvent(SchedulerEvent): """ An event that concerns a job. :ivar code: the type code of this event :ivar job_id: identifier of the job in question :ivar jobstore: alias of the job store containing the job in question """ def __init__(self, code, job_id, jobstore): super(JobEvent, self).__init__(code) self.code = code self.job_id = job_id self.jobstore = jobstore class JobSubmissionEvent(JobEvent): """ An event that concerns the submission of a job to its executor. :ivar scheduled_run_times: a list of datetimes when the job was intended to run """ def __init__(self, code, job_id, jobstore, scheduled_run_times): super(JobSubmissionEvent, self).__init__(code, job_id, jobstore) self.scheduled_run_times = scheduled_run_times class JobExecutionEvent(JobEvent): """ An event that concerns the running of a job within its executor. :ivar scheduled_run_time: the time when the job was scheduled to be run :ivar retval: the return value of the successfully executed job :ivar exception: the exception raised by the job :ivar traceback: a formatted traceback for the exception """ def __init__(self, code, job_id, jobstore, scheduled_run_time, retval=None, exception=None, traceback=None): super(JobExecutionEvent, self).__init__(code, job_id, jobstore) self.scheduled_run_time = scheduled_run_time self.retval = retval self.exception = exception self.traceback = traceback
3,571
Python
.py
75
41.666667
99
0.675582
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,770
job.py
evilhero_mylar/lib/apscheduler/job.py
from collections import Iterable, Mapping from uuid import uuid4 import six from apscheduler.triggers.base import BaseTrigger from apscheduler.util import ( ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args, convert_to_datetime) class Job(object): """ Contains the options given when scheduling callables and its current schedule and other state. This class should never be instantiated by the user. :var str id: the unique identifier of this job :var str name: the description of this job :var func: the callable to execute :var tuple|list args: positional arguments to the callable :var dict kwargs: keyword arguments to the callable :var bool coalesce: whether to only run the job once when several run times are due :var trigger: the trigger object that controls the schedule of this job :var str executor: the name of the executor that will run this job :var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to be late :var int max_instances: the maximum number of concurrently executing instances allowed for this job :var datetime.datetime next_run_time: the next scheduled run time of this job .. note:: The ``misfire_grace_time`` has some non-obvious effects on job execution. See the :ref:`missed-job-executions` section in the documentation for an in-depth explanation. """ __slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref', 'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances', 'next_run_time') def __init__(self, scheduler, id=None, **kwargs): super(Job, self).__init__() self._scheduler = scheduler self._jobstore_alias = None self._modify(id=id or uuid4().hex, **kwargs) def modify(self, **changes): """ Makes the given changes to this job and saves it in the associated job store. Accepted keyword arguments are the same as the variables on this class. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job` :return Job: this job instance """ self._scheduler.modify_job(self.id, self._jobstore_alias, **changes) return self def reschedule(self, trigger, **trigger_args): """ Shortcut for switching the trigger on this job. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job` :return Job: this job instance """ self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args) return self def pause(self): """ Temporarily suspend the execution of this job. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job` :return Job: this job instance """ self._scheduler.pause_job(self.id, self._jobstore_alias) return self def resume(self): """ Resume the schedule of this job if previously paused. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job` :return Job: this job instance """ self._scheduler.resume_job(self.id, self._jobstore_alias) return self def remove(self): """ Unschedules this job and removes it from its associated job store. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job` """ self._scheduler.remove_job(self.id, self._jobstore_alias) @property def pending(self): """ Returns ``True`` if the referenced job is still waiting to be added to its designated job store. """ return self._jobstore_alias is None # # Private API # def _get_run_times(self, now): """ Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive). :type now: datetime.datetime :rtype: list[datetime.datetime] """ run_times = [] next_run_time = self.next_run_time while next_run_time and next_run_time <= now: run_times.append(next_run_time) next_run_time = self.trigger.get_next_fire_time(next_run_time, now) return run_times def _modify(self, **changes): """ Validates the changes to the Job and makes the modifications if and only if all of them validate. """ approved = {} if 'id' in changes: value = changes.pop('id') if not isinstance(value, six.string_types): raise TypeError("id must be a nonempty string") if hasattr(self, 'id'): raise ValueError('The job ID may not be changed') approved['id'] = value if 'func' in changes or 'args' in changes or 'kwargs' in changes: func = changes.pop('func') if 'func' in changes else self.func args = changes.pop('args') if 'args' in changes else self.args kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs if isinstance(func, six.string_types): func_ref = func func = ref_to_obj(func) elif callable(func): try: func_ref = obj_to_ref(func) except ValueError: # If this happens, this Job won't be serializable func_ref = None else: raise TypeError('func must be a callable or a textual reference to one') if not hasattr(self, 'name') and changes.get('name', None) is None: changes['name'] = get_callable_name(func) if isinstance(args, six.string_types) or not isinstance(args, Iterable): raise TypeError('args must be a non-string iterable') if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping): raise TypeError('kwargs must be a dict-like object') check_callable_args(func, args, kwargs) approved['func'] = func approved['func_ref'] = func_ref approved['args'] = args approved['kwargs'] = kwargs if 'name' in changes: value = changes.pop('name') if not value or not isinstance(value, six.string_types): raise TypeError("name must be a nonempty string") approved['name'] = value if 'misfire_grace_time' in changes: value = changes.pop('misfire_grace_time') if value is not None and (not isinstance(value, six.integer_types) or value <= 0): raise TypeError('misfire_grace_time must be either None or a positive integer') approved['misfire_grace_time'] = value if 'coalesce' in changes: value = bool(changes.pop('coalesce')) approved['coalesce'] = value if 'max_instances' in changes: value = changes.pop('max_instances') if not isinstance(value, six.integer_types) or value <= 0: raise TypeError('max_instances must be a positive integer') approved['max_instances'] = value if 'trigger' in changes: trigger = changes.pop('trigger') if not isinstance(trigger, BaseTrigger): raise TypeError('Expected a trigger instance, got %s instead' % trigger.__class__.__name__) approved['trigger'] = trigger if 'executor' in changes: value = changes.pop('executor') if not isinstance(value, six.string_types): raise TypeError('executor must be a string') approved['executor'] = value if 'next_run_time' in changes: value = changes.pop('next_run_time') approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone, 'next_run_time') if changes: raise AttributeError('The following are not modifiable attributes of Job: %s' % ', '.join(changes)) for key, value in six.iteritems(approved): setattr(self, key, value) def __getstate__(self): # Don't allow this Job to be serialized if the function reference could not be determined if not self.func_ref: raise ValueError( 'This Job cannot be serialized since the reference to its callable (%r) could not ' 'be determined. Consider giving a textual reference (module:function name) ' 'instead.' % (self.func,)) return { 'version': 1, 'id': self.id, 'func': self.func_ref, 'trigger': self.trigger, 'executor': self.executor, 'args': self.args, 'kwargs': self.kwargs, 'name': self.name, 'misfire_grace_time': self.misfire_grace_time, 'coalesce': self.coalesce, 'max_instances': self.max_instances, 'next_run_time': self.next_run_time } def __setstate__(self, state): if state.get('version', 1) > 1: raise ValueError('Job has version %s, but only version 1 can be handled' % state['version']) self.id = state['id'] self.func_ref = state['func'] self.func = ref_to_obj(self.func_ref) self.trigger = state['trigger'] self.executor = state['executor'] self.args = state['args'] self.kwargs = state['kwargs'] self.name = state['name'] self.misfire_grace_time = state['misfire_grace_time'] self.coalesce = state['coalesce'] self.max_instances = state['max_instances'] self.next_run_time = state['next_run_time'] def __eq__(self, other): if isinstance(other, Job): return self.id == other.id return NotImplemented def __repr__(self): return '<Job (id=%s name=%s)>' % (repr_escape(self.id), repr_escape(self.name)) def __str__(self): return repr_escape(self.__unicode__()) def __unicode__(self): if hasattr(self, 'next_run_time'): status = ('next run at: ' + datetime_repr(self.next_run_time) if self.next_run_time else 'paused') else: status = 'pending' return u'%s (trigger: %s, %s)' % (self.name, self.trigger, status)
10,690
Python
.py
226
36.402655
99
0.599077
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,771
pool.py
evilhero_mylar/lib/apscheduler/executors/pool.py
from abc import abstractmethod import concurrent.futures from apscheduler.executors.base import BaseExecutor, run_job class BasePoolExecutor(BaseExecutor): @abstractmethod def __init__(self, pool): super(BasePoolExecutor, self).__init__() self._pool = pool def _do_submit_job(self, job, run_times): def callback(f): exc, tb = (f.exception_info() if hasattr(f, 'exception_info') else (f.exception(), getattr(f.exception(), '__traceback__', None))) if exc: self._run_job_error(job.id, exc, tb) else: self._run_job_success(job.id, f.result()) f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) f.add_done_callback(callback) def shutdown(self, wait=True): self._pool.shutdown(wait) class ThreadPoolExecutor(BasePoolExecutor): """ An executor that runs jobs in a concurrent.futures thread pool. Plugin alias: ``threadpool`` :param max_workers: the maximum number of spawned threads. """ def __init__(self, max_workers=10): pool = concurrent.futures.ThreadPoolExecutor(int(max_workers)) super(ThreadPoolExecutor, self).__init__(pool) class ProcessPoolExecutor(BasePoolExecutor): """ An executor that runs jobs in a concurrent.futures process pool. Plugin alias: ``processpool`` :param max_workers: the maximum number of spawned processes. """ def __init__(self, max_workers=10): pool = concurrent.futures.ProcessPoolExecutor(int(max_workers)) super(ProcessPoolExecutor, self).__init__(pool)
1,672
Python
.py
38
36.394737
94
0.660692
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,772
gevent.py
evilhero_mylar/lib/apscheduler/executors/gevent.py
from __future__ import absolute_import import sys from apscheduler.executors.base import BaseExecutor, run_job try: import gevent except ImportError: # pragma: nocover raise ImportError('GeventExecutor requires gevent installed') class GeventExecutor(BaseExecutor): """ Runs jobs as greenlets. Plugin alias: ``gevent`` """ def _do_submit_job(self, job, run_times): def callback(greenlet): try: events = greenlet.get() except: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events) gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\ link(callback)
763
Python
.py
22
26.954545
87
0.631651
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,773
twisted.py
evilhero_mylar/lib/apscheduler/executors/twisted.py
from __future__ import absolute_import from apscheduler.executors.base import BaseExecutor, run_job class TwistedExecutor(BaseExecutor): """ Runs jobs in the reactor's thread pool. Plugin alias: ``twisted`` """ def start(self, scheduler, alias): super(TwistedExecutor, self).start(scheduler, alias) self._reactor = scheduler._reactor def _do_submit_job(self, job, run_times): def callback(success, result): if success: self._run_job_success(job.id, result) else: self._run_job_error(job.id, result.value, result.tb) self._reactor.getThreadPool().callInThreadWithCallback( callback, run_job, job, job._jobstore_alias, run_times, self._logger.name)
778
Python
.py
18
34.944444
86
0.657371
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,774
asyncio.py
evilhero_mylar/lib/apscheduler/executors/asyncio.py
from __future__ import absolute_import import sys from apscheduler.executors.base import BaseExecutor, run_job try: from asyncio import iscoroutinefunction from apscheduler.executors.base_py3 import run_coroutine_job except ImportError: from trollius import iscoroutinefunction run_coroutine_job = None class AsyncIOExecutor(BaseExecutor): """ Runs jobs in the default executor of the event loop. If the job function is a native coroutine function, it is scheduled to be run directly in the event loop as soon as possible. All other functions are run in the event loop's default executor which is usually a thread pool. Plugin alias: ``asyncio`` """ def start(self, scheduler, alias): super(AsyncIOExecutor, self).start(scheduler, alias) self._eventloop = scheduler._eventloop def _do_submit_job(self, job, run_times): def callback(f): try: events = f.result() except: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events) if iscoroutinefunction(job.func): if run_coroutine_job is not None: coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) f = self._eventloop.create_task(coro) else: raise Exception('Executing coroutine based jobs is not supported with Trollius') else: f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times, self._logger.name) f.add_done_callback(callback)
1,706
Python
.py
38
35.289474
99
0.645745
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,775
tornado.py
evilhero_mylar/lib/apscheduler/executors/tornado.py
from __future__ import absolute_import import sys from concurrent.futures import ThreadPoolExecutor from tornado.gen import convert_yielded from apscheduler.executors.base import BaseExecutor, run_job try: from inspect import iscoroutinefunction from apscheduler.executors.base_py3 import run_coroutine_job except ImportError: def iscoroutinefunction(func): return False class TornadoExecutor(BaseExecutor): """ Runs jobs either in a thread pool or directly on the I/O loop. If the job function is a native coroutine function, it is scheduled to be run directly in the I/O loop as soon as possible. All other functions are run in a thread pool. Plugin alias: ``tornado`` :param int max_workers: maximum number of worker threads in the thread pool """ def __init__(self, max_workers=10): super(TornadoExecutor, self).__init__() self.executor = ThreadPoolExecutor(max_workers) def start(self, scheduler, alias): super(TornadoExecutor, self).start(scheduler, alias) self._ioloop = scheduler._ioloop def _do_submit_job(self, job, run_times): def callback(f): try: events = f.result() except: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events) if iscoroutinefunction(job.func): f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) else: f = self.executor.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) f = convert_yielded(f) f.add_done_callback(callback)
1,733
Python
.py
40
35.05
97
0.662895
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,776
base.py
evilhero_mylar/lib/apscheduler/executors/base.py
from abc import ABCMeta, abstractmethod from collections import defaultdict from datetime import datetime, timedelta from traceback import format_tb import logging import sys from pytz import utc import six from apscheduler.events import ( JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) class MaxInstancesReachedError(Exception): def __init__(self, job): super(MaxInstancesReachedError, self).__init__( 'Job "%s" has already reached its maximum number of instances (%d)' % (job.id, job.max_instances)) class BaseExecutor(six.with_metaclass(ABCMeta, object)): """Abstract base class that defines the interface that every executor must implement.""" _scheduler = None _lock = None _logger = logging.getLogger('apscheduler.executors') def __init__(self): super(BaseExecutor, self).__init__() self._instances = defaultdict(lambda: 0) def start(self, scheduler, alias): """ Called by the scheduler when the scheduler is being started or when the executor is being added to an already running scheduler. :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this executor :param str|unicode alias: alias of this executor as it was assigned to the scheduler """ self._scheduler = scheduler self._lock = scheduler._create_lock() self._logger = logging.getLogger('apscheduler.executors.%s' % alias) def shutdown(self, wait=True): """ Shuts down this executor. :param bool wait: ``True`` to wait until all submitted jobs have been executed """ def submit_job(self, job, run_times): """ Submits job for execution. :param Job job: job to execute :param list[datetime] run_times: list of datetimes specifying when the job should have been run :raises MaxInstancesReachedError: if the maximum number of allowed instances for this job has been reached """ assert self._lock is not None, 'This executor has not been started yet' with self._lock: if self._instances[job.id] >= job.max_instances: raise MaxInstancesReachedError(job) self._do_submit_job(job, run_times) self._instances[job.id] += 1 @abstractmethod def _do_submit_job(self, job, run_times): """Performs the actual task of scheduling `run_job` to be called.""" def _run_job_success(self, job_id, events): """ Called by the executor with the list of generated events when :func:`run_job` has been successfully called. """ with self._lock: self._instances[job_id] -= 1 if self._instances[job_id] == 0: del self._instances[job_id] for event in events: self._scheduler._dispatch_event(event) def _run_job_error(self, job_id, exc, traceback=None): """Called by the executor with the exception if there is an error calling `run_job`.""" with self._lock: self._instances[job_id] -= 1 if self._instances[job_id] == 0: del self._instances[job_id] exc_info = (exc.__class__, exc, traceback) self._logger.error('Error running job %s', job_id, exc_info=exc_info) def run_job(job, jobstore_alias, run_times, logger_name): """ Called by executors to run the job. Returns a list of scheduler events to be dispatched by the scheduler. """ events = [] logger = logging.getLogger(logger_name) for run_time in run_times: # See if the job missed its run time window, and handle # possible misfires accordingly if job.misfire_grace_time is not None: difference = datetime.now(utc) - run_time grace_time = timedelta(seconds=job.misfire_grace_time) if difference > grace_time: events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, run_time)) logger.warning('Run time of job "%s" was missed by %s', job, difference) continue logger.info('Running job "%s" (scheduled at %s)', job, run_time) try: retval = job.func(*job.args, **job.kwargs) except: exc, tb = sys.exc_info()[1:] formatted_tb = ''.join(format_tb(tb)) events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, exception=exc, traceback=formatted_tb)) logger.exception('Job "%s" raised an exception', job) else: events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval)) logger.info('Job "%s" executed successfully', job) return events
5,042
Python
.py
109
36.33945
98
0.624873
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,777
debug.py
evilhero_mylar/lib/apscheduler/executors/debug.py
import sys from apscheduler.executors.base import BaseExecutor, run_job class DebugExecutor(BaseExecutor): """ A special executor that executes the target callable directly instead of deferring it to a thread or process. Plugin alias: ``debug`` """ def _do_submit_job(self, job, run_times): try: events = run_job(job, job._jobstore_alias, run_times, self._logger.name) except: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events)
559
Python
.py
15
30.333333
94
0.653061
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,778
base_py3.py
evilhero_mylar/lib/apscheduler/executors/base_py3.py
import logging import sys from datetime import datetime, timedelta from traceback import format_tb from pytz import utc from apscheduler.events import ( JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) async def run_coroutine_job(job, jobstore_alias, run_times, logger_name): """Coroutine version of run_job().""" events = [] logger = logging.getLogger(logger_name) for run_time in run_times: # See if the job missed its run time window, and handle possible misfires accordingly if job.misfire_grace_time is not None: difference = datetime.now(utc) - run_time grace_time = timedelta(seconds=job.misfire_grace_time) if difference > grace_time: events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, run_time)) logger.warning('Run time of job "%s" was missed by %s', job, difference) continue logger.info('Running job "%s" (scheduled at %s)', job, run_time) try: retval = await job.func(*job.args, **job.kwargs) except: exc, tb = sys.exc_info()[1:] formatted_tb = ''.join(format_tb(tb)) events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, exception=exc, traceback=formatted_tb)) logger.exception('Job "%s" raised an exception', job) else: events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval)) logger.info('Job "%s" executed successfully', job) return events
1,761
Python
.py
35
38.4
97
0.609884
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,779
interval.py
evilhero_mylar/lib/apscheduler/triggers/interval.py
from datetime import timedelta, datetime from math import ceil from tzlocal import get_localzone from apscheduler.triggers.base import BaseTrigger from apscheduler.util import convert_to_datetime, timedelta_seconds, datetime_repr, astimezone class IntervalTrigger(BaseTrigger): """ Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` + interval otherwise. :param int weeks: number of weeks to wait :param int days: number of days to wait :param int hours: number of hours to wait :param int minutes: number of minutes to wait :param int seconds: number of seconds to wait :param datetime|str start_date: starting point for the interval calculation :param datetime|str end_date: latest possible date/time to trigger on :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations """ __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length' def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, end_date=None, timezone=None): self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds) self.interval_length = timedelta_seconds(self.interval) if self.interval_length == 0: self.interval = timedelta(seconds=1) self.interval_length = 1 if timezone: self.timezone = astimezone(timezone) elif isinstance(start_date, datetime) and start_date.tzinfo: self.timezone = start_date.tzinfo elif isinstance(end_date, datetime) and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() start_date = start_date or (datetime.now(self.timezone) + self.interval) self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') def get_next_fire_time(self, previous_fire_time, now): if previous_fire_time: next_fire_time = previous_fire_time + self.interval elif self.start_date > now: next_fire_time = self.start_date else: timediff_seconds = timedelta_seconds(now - self.start_date) next_interval_num = int(ceil(timediff_seconds / self.interval_length)) next_fire_time = self.start_date + self.interval * next_interval_num if not self.end_date or next_fire_time <= self.end_date: return self.timezone.normalize(next_fire_time) def __getstate__(self): return { 'version': 1, 'timezone': self.timezone, 'start_date': self.start_date, 'end_date': self.end_date, 'interval': self.interval } def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 1: raise ValueError( 'Got serialized data for version %s of %s, but only version 1 can be handled' % (state['version'], self.__class__.__name__)) self.timezone = state['timezone'] self.start_date = state['start_date'] self.end_date = state['end_date'] self.interval = state['interval'] self.interval_length = timedelta_seconds(self.interval) def __str__(self): return 'interval[%s]' % str(self.interval) def __repr__(self): return "<%s (interval=%r, start_date='%s', timezone='%s')>" % ( self.__class__.__name__, self.interval, datetime_repr(self.start_date), self.timezone)
3,802
Python
.py
75
41.373333
98
0.64097
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,780
date.py
evilhero_mylar/lib/apscheduler/triggers/date.py
from datetime import datetime from tzlocal import get_localzone from apscheduler.triggers.base import BaseTrigger from apscheduler.util import convert_to_datetime, datetime_repr, astimezone class DateTrigger(BaseTrigger): """ Triggers once on the given datetime. If ``run_date`` is left empty, current time is used. :param datetime|str run_date: the date/time to run the job at :param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already """ __slots__ = 'run_date' def __init__(self, run_date=None, timezone=None): timezone = astimezone(timezone) or get_localzone() if run_date is not None: self.run_date = convert_to_datetime(run_date, timezone, 'run_date') else: self.run_date = datetime.now(timezone) def get_next_fire_time(self, previous_fire_time, now): return self.run_date if previous_fire_time is None else None def __getstate__(self): return { 'version': 1, 'run_date': self.run_date } def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 1: raise ValueError( 'Got serialized data for version %s of %s, but only version 1 can be handled' % (state['version'], self.__class__.__name__)) self.run_date = state['run_date'] def __str__(self): return 'date[%s]' % datetime_repr(self.run_date) def __repr__(self): return "<%s (run_date='%s')>" % (self.__class__.__name__, datetime_repr(self.run_date))
1,705
Python
.py
37
38
98
0.628779
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,781
base.py
evilhero_mylar/lib/apscheduler/triggers/base.py
from abc import ABCMeta, abstractmethod import six class BaseTrigger(six.with_metaclass(ABCMeta)): """Abstract base class that defines the interface that every trigger must implement.""" __slots__ = () @abstractmethod def get_next_fire_time(self, previous_fire_time, now): """ Returns the next datetime to fire on, If no such datetime can be calculated, returns ``None``. :param datetime.datetime previous_fire_time: the previous time the trigger was fired :param datetime.datetime now: current datetime """
578
Python
.py
13
38.076923
92
0.701252
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,782
expressions.py
evilhero_mylar/lib/apscheduler/triggers/cron/expressions.py
"""This module contains the expressions applicable for CronTrigger's fields.""" from calendar import monthrange import re from apscheduler.util import asint __all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', 'WeekdayPositionExpression', 'LastDayOfMonthExpression') WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] class AllExpression(object): value_re = re.compile(r'\*(?:/(?P<step>\d+))?$') def __init__(self, step=None): self.step = asint(step) if self.step == 0: raise ValueError('Increment must be higher than 0') def get_next_value(self, date, field): start = field.get_value(date) minval = field.get_min(date) maxval = field.get_max(date) start = max(start, minval) if not self.step: next = start else: distance_to_next = (self.step - (start - minval)) % self.step next = start + distance_to_next if next <= maxval: return next def __eq__(self, other): return isinstance(other, self.__class__) and self.step == other.step def __str__(self): if self.step: return '*/%d' % self.step return '*' def __repr__(self): return "%s(%s)" % (self.__class__.__name__, self.step) class RangeExpression(AllExpression): value_re = re.compile( r'(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$') def __init__(self, first, last=None, step=None): AllExpression.__init__(self, step) first = asint(first) last = asint(last) if last is None and step is None: last = first if last is not None and first > last: raise ValueError('The minimum value in a range must not be higher than the maximum') self.first = first self.last = last def get_next_value(self, date, field): startval = field.get_value(date) minval = field.get_min(date) maxval = field.get_max(date) # Apply range limits minval = max(minval, self.first) maxval = min(maxval, self.last) if self.last is not None else maxval nextval = max(minval, startval) # Apply the step if defined if self.step: distance_to_next = (self.step - (nextval - minval)) % self.step nextval += distance_to_next return nextval if nextval <= maxval else None def __eq__(self, other): return (isinstance(other, self.__class__) and self.first == other.first and self.last == other.last) def __str__(self): if self.last != self.first and self.last is not None: range = '%d-%d' % (self.first, self.last) else: range = str(self.first) if self.step: return '%s/%d' % (range, self.step) return range def __repr__(self): args = [str(self.first)] if self.last != self.first and self.last is not None or self.step: args.append(str(self.last)) if self.step: args.append(str(self.step)) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class WeekdayRangeExpression(RangeExpression): value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE) def __init__(self, first, last=None): try: first_num = WEEKDAYS.index(first.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % first) if last: try: last_num = WEEKDAYS.index(last.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % last) else: last_num = None RangeExpression.__init__(self, first_num, last_num) def __str__(self): if self.last != self.first and self.last is not None: return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last]) return WEEKDAYS[self.first] def __repr__(self): args = ["'%s'" % WEEKDAYS[self.first]] if self.last != self.first and self.last is not None: args.append("'%s'" % WEEKDAYS[self.last]) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class WeekdayPositionExpression(AllExpression): options = ['1st', '2nd', '3rd', '4th', '5th', 'last'] value_re = re.compile(r'(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))' % '|'.join(options), re.IGNORECASE) def __init__(self, option_name, weekday_name): try: self.option_num = self.options.index(option_name.lower()) except ValueError: raise ValueError('Invalid weekday position "%s"' % option_name) try: self.weekday = WEEKDAYS.index(weekday_name.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % weekday_name) def get_next_value(self, date, field): # Figure out the weekday of the month's first day and the number of days in that month first_day_wday, last_day = monthrange(date.year, date.month) # Calculate which day of the month is the first of the target weekdays first_hit_day = self.weekday - first_day_wday + 1 if first_hit_day <= 0: first_hit_day += 7 # Calculate what day of the month the target weekday would be if self.option_num < 5: target_day = first_hit_day + self.option_num * 7 else: target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7 if target_day <= last_day and target_day >= date.day: return target_day def __eq__(self, other): return (super(WeekdayPositionExpression, self).__eq__(other) and self.option_num == other.option_num and self.weekday == other.weekday) def __str__(self): return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday]) def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num], WEEKDAYS[self.weekday]) class LastDayOfMonthExpression(AllExpression): value_re = re.compile(r'last', re.IGNORECASE) def __init__(self): pass def get_next_value(self, date, field): return monthrange(date.year, date.month)[1] def __str__(self): return 'last' def __repr__(self): return "%s()" % self.__class__.__name__
6,527
Python
.py
146
35.369863
96
0.579438
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,783
__init__.py
evilhero_mylar/lib/apscheduler/triggers/cron/__init__.py
from datetime import datetime, timedelta from tzlocal import get_localzone import six from apscheduler.triggers.base import BaseTrigger from apscheduler.triggers.cron.fields import ( BaseField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES) from apscheduler.util import datetime_ceil, convert_to_datetime, datetime_repr, astimezone class CronTrigger(BaseTrigger): """ Triggers when current time matches all specified time constraints, similarly to how the UNIX cron scheduler works. :param int|str year: 4-digit year :param int|str month: month (1-12) :param int|str day: day of the (1-31) :param int|str week: ISO week (1-53) :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) :param int|str hour: hour (0-23) :param int|str minute: minute (0-59) :param int|str second: second (0-59) :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) :param datetime|str end_date: latest possible date/time to trigger on (inclusive) :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults to scheduler timezone) .. note:: The first weekday is always **monday**. """ FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') FIELDS_MAP = { 'year': BaseField, 'month': BaseField, 'week': WeekField, 'day': DayOfMonthField, 'day_of_week': DayOfWeekField, 'hour': BaseField, 'minute': BaseField, 'second': BaseField } __slots__ = 'timezone', 'start_date', 'end_date', 'fields' def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, end_date=None, timezone=None): if timezone: self.timezone = astimezone(timezone) elif isinstance(start_date, datetime) and start_date.tzinfo: self.timezone = start_date.tzinfo elif isinstance(end_date, datetime) and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') values = dict((key, value) for (key, value) in six.iteritems(locals()) if key in self.FIELD_NAMES and value is not None) self.fields = [] assign_defaults = False for field_name in self.FIELD_NAMES: if field_name in values: exprs = values.pop(field_name) is_default = False assign_defaults = not values elif assign_defaults: exprs = DEFAULT_VALUES[field_name] is_default = True else: exprs = '*' is_default = True field_class = self.FIELDS_MAP[field_name] field = field_class(field_name, exprs, is_default) self.fields.append(field) def _increment_field_value(self, dateval, fieldnum): """ Increments the designated field and resets all less significant fields to their minimum values. :type dateval: datetime :type fieldnum: int :return: a tuple containing the new date, and the number of the field that was actually incremented :rtype: tuple """ values = {} i = 0 while i < len(self.fields): field = self.fields[i] if not field.REAL: if i == fieldnum: fieldnum -= 1 i -= 1 else: i += 1 continue if i < fieldnum: values[field.name] = field.get_value(dateval) i += 1 elif i > fieldnum: values[field.name] = field.get_min(dateval) i += 1 else: value = field.get_value(dateval) maxval = field.get_max(dateval) if value == maxval: fieldnum -= 1 i -= 1 else: values[field.name] = value + 1 i += 1 difference = datetime(**values) - dateval.replace(tzinfo=None) return self.timezone.normalize(dateval + difference), fieldnum def _set_field_value(self, dateval, fieldnum, new_value): values = {} for i, field in enumerate(self.fields): if field.REAL: if i < fieldnum: values[field.name] = field.get_value(dateval) elif i > fieldnum: values[field.name] = field.get_min(dateval) else: values[field.name] = new_value return self.timezone.localize(datetime(**values)) def get_next_fire_time(self, previous_fire_time, now): if previous_fire_time: start_date = min(now, previous_fire_time + timedelta(microseconds=1)) if start_date == previous_fire_time: start_date += timedelta(microseconds=1) else: start_date = max(now, self.start_date) if self.start_date else now fieldnum = 0 next_date = datetime_ceil(start_date).astimezone(self.timezone) while 0 <= fieldnum < len(self.fields): field = self.fields[fieldnum] curr_value = field.get_value(next_date) next_value = field.get_next_value(next_date) if next_value is None: # No valid value was found next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1) elif next_value > curr_value: # A valid, but higher than the starting value, was found if field.REAL: next_date = self._set_field_value(next_date, fieldnum, next_value) fieldnum += 1 else: next_date, fieldnum = self._increment_field_value(next_date, fieldnum) else: # A valid value was found, no changes necessary fieldnum += 1 # Return if the date has rolled past the end date if self.end_date and next_date > self.end_date: return None if fieldnum >= 0: return next_date def __getstate__(self): return { 'version': 1, 'timezone': self.timezone, 'start_date': self.start_date, 'end_date': self.end_date, 'fields': self.fields } def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 1: raise ValueError( 'Got serialized data for version %s of %s, but only version 1 can be handled' % (state['version'], self.__class__.__name__)) self.timezone = state['timezone'] self.start_date = state['start_date'] self.end_date = state['end_date'] self.fields = state['fields'] def __str__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] return 'cron[%s]' % (', '.join(options)) def __repr__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] if self.start_date: options.append("start_date='%s'" % datetime_repr(self.start_date)) return "<%s (%s, timezone='%s')>" % ( self.__class__.__name__, ', '.join(options), self.timezone)
7,858
Python
.py
176
33.119318
98
0.571223
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,784
fields.py
evilhero_mylar/lib/apscheduler/triggers/cron/fields.py
"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields.""" from calendar import monthrange from apscheduler.triggers.cron.expressions import ( AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, WeekdayRangeExpression) __all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField') MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0} MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59} DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0, 'second': 0} class BaseField(object): REAL = True COMPILERS = [AllExpression, RangeExpression] def __init__(self, name, exprs, is_default=False): self.name = name self.is_default = is_default self.compile_expressions(exprs) def get_min(self, dateval): return MIN_VALUES[self.name] def get_max(self, dateval): return MAX_VALUES[self.name] def get_value(self, dateval): return getattr(dateval, self.name) def get_next_value(self, dateval): smallest = None for expr in self.expressions: value = expr.get_next_value(dateval, self) if smallest is None or (value is not None and value < smallest): smallest = value return smallest def compile_expressions(self, exprs): self.expressions = [] # Split a comma-separated expression list, if any exprs = str(exprs).strip() if ',' in exprs: for expr in exprs.split(','): self.compile_expression(expr) else: self.compile_expression(exprs) def compile_expression(self, expr): for compiler in self.COMPILERS: match = compiler.value_re.match(expr) if match: compiled_expr = compiler(**match.groupdict()) self.expressions.append(compiled_expr) return raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name)) def __eq__(self, other): return isinstance(self, self.__class__) and self.expressions == other.expressions def __str__(self): expr_strings = (str(e) for e in self.expressions) return ','.join(expr_strings) def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self) class WeekField(BaseField): REAL = False def get_value(self, dateval): return dateval.isocalendar()[1] class DayOfMonthField(BaseField): COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression] def get_max(self, dateval): return monthrange(dateval.year, dateval.month)[1] class DayOfWeekField(BaseField): REAL = False COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression] def get_value(self, dateval): return dateval.weekday()
3,190
Python
.py
70
37.328571
97
0.628155
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,785
redis.py
evilhero_mylar/lib/apscheduler/jobstores/redis.py
from __future__ import absolute_import from datetime import datetime from pytz import utc import six from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from redis import StrictRedis except ImportError: # pragma: nocover raise ImportError('RedisJobStore requires redis installed') class RedisJobStore(BaseJobStore): """ Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's :class:`~redis.StrictRedis`. Plugin alias: ``redis`` :param int db: the database number to store jobs in :param str jobs_key: key to store jobs in :param str run_times_key: key to store the jobs' run times in :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times', pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(RedisJobStore, self).__init__() if db is None: raise ValueError('The "db" parameter must not be empty') if not jobs_key: raise ValueError('The "jobs_key" parameter must not be empty') if not run_times_key: raise ValueError('The "run_times_key" parameter must not be empty') self.pickle_protocol = pickle_protocol self.jobs_key = jobs_key self.run_times_key = run_times_key self.redis = StrictRedis(db=int(db), **connect_args) def lookup_job(self, job_id): job_state = self.redis.hget(self.jobs_key, job_id) return self._reconstitute_job(job_state) if job_state else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp) if job_ids: job_states = self.redis.hmget(self.jobs_key, *job_ids) return self._reconstitute_jobs(six.moves.zip(job_ids, job_states)) return [] def get_next_run_time(self): next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True) if next_run_time: return utc_timestamp_to_datetime(next_run_time[0][1]) def get_all_jobs(self): job_states = self.redis.hgetall(self.jobs_key) jobs = self._reconstitute_jobs(six.iteritems(job_states)) paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key) def add_job(self, job): if self.redis.hexists(self.jobs_key, job.id): raise ConflictingIdError(job.id) with self.redis.pipeline() as pipe: pipe.multi() pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id) pipe.execute() def update_job(self, job): if not self.redis.hexists(self.jobs_key, job.id): raise JobLookupError(job.id) with self.redis.pipeline() as pipe: pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id) else: pipe.zrem(self.run_times_key, job.id) pipe.execute() def remove_job(self, job_id): if not self.redis.hexists(self.jobs_key, job_id): raise JobLookupError(job_id) with self.redis.pipeline() as pipe: pipe.hdel(self.jobs_key, job_id) pipe.zrem(self.run_times_key, job_id) pipe.execute() def remove_all_jobs(self): with self.redis.pipeline() as pipe: pipe.delete(self.jobs_key) pipe.delete(self.run_times_key) pipe.execute() def shutdown(self): self.redis.connection_pool.disconnect() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _reconstitute_jobs(self, job_states): jobs = [] failed_job_ids = [] for job_id, job_state in job_states: try: jobs.append(self._reconstitute_job(job_state)) except: self._logger.exception('Unable to restore job "%s" -- removing it', job_id) failed_job_ids.append(job_id) # Remove all the jobs we failed to restore if failed_job_ids: with self.redis.pipeline() as pipe: pipe.hdel(self.jobs_key, *failed_job_ids) pipe.zrem(self.run_times_key, *failed_job_ids) pipe.execute() return jobs def __repr__(self): return '<%s>' % self.__class__.__name__
5,423
Python
.py
118
36
99
0.623081
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,786
memory.py
evilhero_mylar/lib/apscheduler/jobstores/memory.py
from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import datetime_to_utc_timestamp class MemoryJobStore(BaseJobStore): """ Stores jobs in an array in RAM. Provides no persistence support. Plugin alias: ``memory`` """ def __init__(self): super(MemoryJobStore, self).__init__() # list of (job, timestamp), sorted by next_run_time and job id (ascending) self._jobs = [] self._jobs_index = {} # id -> (job, timestamp) lookup table def lookup_job(self, job_id): return self._jobs_index.get(job_id, (None, None))[0] def get_due_jobs(self, now): now_timestamp = datetime_to_utc_timestamp(now) pending = [] for job, timestamp in self._jobs: if timestamp is None or timestamp > now_timestamp: break pending.append(job) return pending def get_next_run_time(self): return self._jobs[0][0].next_run_time if self._jobs else None def get_all_jobs(self): return [j[0] for j in self._jobs] def add_job(self, job): if job.id in self._jobs_index: raise ConflictingIdError(job.id) timestamp = datetime_to_utc_timestamp(job.next_run_time) index = self._get_job_index(timestamp, job.id) self._jobs.insert(index, (job, timestamp)) self._jobs_index[job.id] = (job, timestamp) def update_job(self, job): old_job, old_timestamp = self._jobs_index.get(job.id, (None, None)) if old_job is None: raise JobLookupError(job.id) # If the next run time has not changed, simply replace the job in its present index. # Otherwise, reinsert the job to the list to preserve the ordering. old_index = self._get_job_index(old_timestamp, old_job.id) new_timestamp = datetime_to_utc_timestamp(job.next_run_time) if old_timestamp == new_timestamp: self._jobs[old_index] = (job, new_timestamp) else: del self._jobs[old_index] new_index = self._get_job_index(new_timestamp, job.id) self._jobs.insert(new_index, (job, new_timestamp)) self._jobs_index[old_job.id] = (job, new_timestamp) def remove_job(self, job_id): job, timestamp = self._jobs_index.get(job_id, (None, None)) if job is None: raise JobLookupError(job_id) index = self._get_job_index(timestamp, job_id) del self._jobs[index] del self._jobs_index[job.id] def remove_all_jobs(self): self._jobs = [] self._jobs_index = {} def shutdown(self): self.remove_all_jobs() def _get_job_index(self, timestamp, job_id): """ Returns the index of the given job, or if it's not found, the index where the job should be inserted based on the given timestamp. :type timestamp: int :type job_id: str """ lo, hi = 0, len(self._jobs) timestamp = float('inf') if timestamp is None else timestamp while lo < hi: mid = (lo + hi) // 2 mid_job, mid_timestamp = self._jobs[mid] mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp if mid_timestamp > timestamp: hi = mid elif mid_timestamp < timestamp: lo = mid + 1 elif mid_job.id > job_id: hi = mid elif mid_job.id < job_id: lo = mid + 1 else: return mid return lo
3,655
Python
.py
85
33.447059
99
0.598534
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,787
rethinkdb.py
evilhero_mylar/lib/apscheduler/jobstores/rethinkdb.py
from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: import rethinkdb as r except ImportError: # pragma: nocover raise ImportError('RethinkDBJobStore requires rethinkdb installed') class RethinkDBJobStore(BaseJobStore): """ Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to rethinkdb's `RethinkdbClient <http://www.rethinkdb.com/api/#connect>`_. Plugin alias: ``rethinkdb`` :param str database: database to store jobs in :param str collection: collection to store jobs in :param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, database='apscheduler', table='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(RethinkDBJobStore, self).__init__() if not database: raise ValueError('The "database" parameter must not be empty') if not table: raise ValueError('The "table" parameter must not be empty') self.database = database self.table = table self.client = client self.pickle_protocol = pickle_protocol self.connect_args = connect_args self.conn = None def start(self, scheduler, alias): super(RethinkDBJobStore, self).start(scheduler, alias) if self.client: self.conn = maybe_ref(self.client) else: self.conn = r.connect(db=self.database, **self.connect_args) if self.database not in r.db_list().run(self.conn): r.db_create(self.database).run(self.conn) if self.table not in r.table_list().run(self.conn): r.table_create(self.table).run(self.conn) if 'next_run_time' not in r.table(self.table).index_list().run(self.conn): r.table(self.table).index_create('next_run_time').run(self.conn) self.table = r.db(self.database).table(self.table) def lookup_job(self, job_id): results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn)) return self._reconstitute_job(results[0]['job_state']) if results else None def get_due_jobs(self, now): return self._get_jobs(r.row['next_run_time'] <= datetime_to_utc_timestamp(now)) def get_next_run_time(self): results = list( self.table .filter(r.row['next_run_time'] != None) # flake8: noqa .order_by(r.asc('next_run_time')) .map(lambda x: x['next_run_time']) .limit(1) .run(self.conn) ) return utc_timestamp_to_datetime(results[0]) if results else None def get_all_jobs(self): jobs = self._get_jobs() self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): job_dict = { 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.insert(job_dict).run(self.conn) if results['errors'] > 0: raise ConflictingIdError(job.id) def update_job(self, job): changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.get_all(job.id).update(changes).run(self.conn) skipped = False in map(lambda x: results[x] == 0, results.keys()) if results['skipped'] > 0 or results['errors'] > 0 or not skipped: raise JobLookupError(job.id) def remove_job(self, job_id): results = self.table.get_all(job_id).delete().run(self.conn) if results['deleted'] + results['skipped'] != 1: raise JobLookupError(job_id) def remove_all_jobs(self): self.table.delete().run(self.conn) def shutdown(self): self.conn.close() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, predicate=None): jobs = [] failed_job_ids = [] query = (self.table.filter(r.row['next_run_time'] != None).filter(predicate) if predicate else self.table) query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') for document in query.run(self.conn): try: jobs.append(self._reconstitute_job(document['job_state'])) except: self._logger.exception('Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete()).run(self.conn) return jobs def __repr__(self): connection = self.conn return '<%s (connection=%s)>' % (self.__class__.__name__, connection)
5,683
Python
.py
122
37.606557
99
0.629656
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,788
sqlalchemy.py
evilhero_mylar/lib/apscheduler/jobstores/sqlalchemy.py
from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from sqlalchemy import ( create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select) from sqlalchemy.exc import IntegrityError from sqlalchemy.sql.expression import null except ImportError: # pragma: nocover raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed') class SQLAlchemyJobStore(BaseJobStore): """ Stores jobs in a database table using SQLAlchemy. The table will be created if it doesn't exist in the database. Plugin alias: ``sqlalchemy`` :param str url: connection string (see `SQLAlchemy documentation <http://docs.sqlalchemy.org/en/latest/core/engines.html?highlight=create_engine#database-urls>`_ on this) :param engine: an SQLAlchemy Engine to use instead of creating a new one based on ``url`` :param str tablename: name of the table to store jobs in :param metadata: a :class:`~sqlalchemy.MetaData` instance to use instead of creating a new one :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL): super(SQLAlchemyJobStore, self).__init__() self.pickle_protocol = pickle_protocol metadata = maybe_ref(metadata) or MetaData() if engine: self.engine = maybe_ref(engine) elif url: self.engine = create_engine(url) else: raise ValueError('Need either "engine" or "url" defined') # 191 = max key length in MySQL for InnoDB/utf8mb4 tables, # 25 = precision that translates to an 8-byte float self.jobs_t = Table( tablename, metadata, Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True), Column('next_run_time', Float(25), index=True), Column('job_state', LargeBinary, nullable=False) ) def start(self, scheduler, alias): super(SQLAlchemyJobStore, self).start(scheduler, alias) self.jobs_t.create(self.engine, True) def lookup_job(self, job_id): selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id) job_state = self.engine.execute(selectable).scalar() return self._reconstitute_job(job_state) if job_state else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp) def get_next_run_time(self): selectable = select([self.jobs_t.c.next_run_time]).\ where(self.jobs_t.c.next_run_time != null()).\ order_by(self.jobs_t.c.next_run_time).limit(1) next_run_time = self.engine.execute(selectable).scalar() return utc_timestamp_to_datetime(next_run_time) def get_all_jobs(self): jobs = self._get_jobs() self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): insert = self.jobs_t.insert().values(**{ 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) }) try: self.engine.execute(insert) except IntegrityError: raise ConflictingIdError(job.id) def update_job(self, job): update = self.jobs_t.update().values(**{ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) }).where(self.jobs_t.c.id == job.id) result = self.engine.execute(update) if result.rowcount == 0: raise JobLookupError(id) def remove_job(self, job_id): delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id) result = self.engine.execute(delete) if result.rowcount == 0: raise JobLookupError(job_id) def remove_all_jobs(self): delete = self.jobs_t.delete() self.engine.execute(delete) def shutdown(self): self.engine.dispose() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job_state['jobstore'] = self job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, *conditions): jobs = [] selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\ order_by(self.jobs_t.c.next_run_time) selectable = selectable.where(*conditions) if conditions else selectable failed_job_ids = set() for row in self.engine.execute(selectable): try: jobs.append(self._reconstitute_job(row.job_state)) except: self._logger.exception('Unable to restore job "%s" -- removing it', row.id) failed_job_ids.add(row.id) # Remove all the jobs we failed to restore if failed_job_ids: delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids)) self.engine.execute(delete) return jobs def __repr__(self): return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
5,785
Python
.py
123
38.373984
104
0.645201
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,789
base.py
evilhero_mylar/lib/apscheduler/jobstores/base.py
from abc import ABCMeta, abstractmethod import logging import six class JobLookupError(KeyError): """Raised when the job store cannot find a job for update or removal.""" def __init__(self, job_id): super(JobLookupError, self).__init__(u'No job by the id of %s was found' % job_id) class ConflictingIdError(KeyError): """Raised when the uniqueness of job IDs is being violated.""" def __init__(self, job_id): super(ConflictingIdError, self).__init__( u'Job identifier (%s) conflicts with an existing job' % job_id) class TransientJobError(ValueError): """ Raised when an attempt to add transient (with no func_ref) job to a persistent job store is detected. """ def __init__(self, job_id): super(TransientJobError, self).__init__( u'Job (%s) cannot be added to this job store because a reference to the callable ' u'could not be determined.' % job_id) class BaseJobStore(six.with_metaclass(ABCMeta)): """Abstract base class that defines the interface that every job store must implement.""" _scheduler = None _alias = None _logger = logging.getLogger('apscheduler.jobstores') def start(self, scheduler, alias): """ Called by the scheduler when the scheduler is being started or when the job store is being added to an already running scheduler. :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this job store :param str|unicode alias: alias of this job store as it was assigned to the scheduler """ self._scheduler = scheduler self._alias = alias self._logger = logging.getLogger('apscheduler.jobstores.%s' % alias) def shutdown(self): """Frees any resources still bound to this job store.""" def _fix_paused_jobs_sorting(self, jobs): for i, job in enumerate(jobs): if job.next_run_time is not None: if i > 0: paused_jobs = jobs[:i] del jobs[:i] jobs.extend(paused_jobs) break @abstractmethod def lookup_job(self, job_id): """ Returns a specific job, or ``None`` if it isn't found.. The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned job to point to the scheduler and itself, respectively. :param str|unicode job_id: identifier of the job :rtype: Job """ @abstractmethod def get_due_jobs(self, now): """ Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``. The returned jobs must be sorted by next run time (ascending). :param datetime.datetime now: the current (timezone aware) datetime :rtype: list[Job] """ @abstractmethod def get_next_run_time(self): """ Returns the earliest run time of all the jobs stored in this job store, or ``None`` if there are no active jobs. :rtype: datetime.datetime """ @abstractmethod def get_all_jobs(self): """ Returns a list of all jobs in this job store. The returned jobs should be sorted by next run time (ascending). Paused jobs (next_run_time == None) should be sorted last. The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned jobs to point to the scheduler and itself, respectively. :rtype: list[Job] """ @abstractmethod def add_job(self, job): """ Adds the given job to this store. :param Job job: the job to add :raises ConflictingIdError: if there is another job in this store with the same ID """ @abstractmethod def update_job(self, job): """ Replaces the job in the store with the given newer version. :param Job job: the job to update :raises JobLookupError: if the job does not exist """ @abstractmethod def remove_job(self, job_id): """ Removes the given job from this store. :param str|unicode job_id: identifier of the job :raises JobLookupError: if the job does not exist """ @abstractmethod def remove_all_jobs(self): """Removes all jobs from this store.""" def __repr__(self): return '<%s>' % self.__class__.__name__
4,523
Python
.py
107
34.018692
98
0.634703
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,790
mongodb.py
evilhero_mylar/lib/apscheduler/jobstores/mongodb.py
from __future__ import absolute_import import warnings from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from bson.binary import Binary from pymongo.errors import DuplicateKeyError from pymongo import MongoClient, ASCENDING except ImportError: # pragma: nocover raise ImportError('MongoDBJobStore requires PyMongo installed') class MongoDBJobStore(BaseJobStore): """ Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to pymongo's `MongoClient <http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_. Plugin alias: ``mongodb`` :param str database: database to store jobs in :param str collection: collection to store jobs in :param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, database='apscheduler', collection='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(MongoDBJobStore, self).__init__() self.pickle_protocol = pickle_protocol if not database: raise ValueError('The "database" parameter must not be empty') if not collection: raise ValueError('The "collection" parameter must not be empty') if client: self.client = maybe_ref(client) else: connect_args.setdefault('w', 1) self.client = MongoClient(**connect_args) self.collection = self.client[database][collection] def start(self, scheduler, alias): super(MongoDBJobStore, self).start(scheduler, alias) self.collection.ensure_index('next_run_time', sparse=True) @property def connection(self): warnings.warn('The "connection" member is deprecated -- use "client" instead', DeprecationWarning) return self.client def lookup_job(self, job_id): document = self.collection.find_one(job_id, ['job_state']) return self._reconstitute_job(document['job_state']) if document else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) return self._get_jobs({'next_run_time': {'$lte': timestamp}}) def get_next_run_time(self): document = self.collection.find_one({'next_run_time': {'$ne': None}}, projection=['next_run_time'], sort=[('next_run_time', ASCENDING)]) return utc_timestamp_to_datetime(document['next_run_time']) if document else None def get_all_jobs(self): jobs = self._get_jobs({}) self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): try: self.collection.insert({ '_id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) }) except DuplicateKeyError: raise ConflictingIdError(job.id) def update_job(self, job): changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } result = self.collection.update({'_id': job.id}, {'$set': changes}) if result and result['n'] == 0: raise JobLookupError(job.id) def remove_job(self, job_id): result = self.collection.remove(job_id) if result and result['n'] == 0: raise JobLookupError(job_id) def remove_all_jobs(self): self.collection.remove() def shutdown(self): self.client.close() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, conditions): jobs = [] failed_job_ids = [] for document in self.collection.find(conditions, ['_id', 'job_state'], sort=[('next_run_time', ASCENDING)]): try: jobs.append(self._reconstitute_job(document['job_state'])) except: self._logger.exception('Unable to restore job "%s" -- removing it', document['_id']) failed_job_ids.append(document['_id']) # Remove all the jobs we failed to restore if failed_job_ids: self.collection.remove({'_id': {'$in': failed_job_ids}}) return jobs def __repr__(self): return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
5,282
Python
.py
114
36.54386
109
0.624587
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,791
zookeeper.py
evilhero_mylar/lib/apscheduler/jobstores/zookeeper.py
from __future__ import absolute_import import os from datetime import datetime from pytz import utc from kazoo.exceptions import NoNodeError, NodeExistsError from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from kazoo.client import KazooClient except ImportError: # pragma: nocover raise ImportError('ZooKeeperJobStore requires Kazoo installed') class ZooKeeperJobStore(BaseJobStore): """ Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to kazoo's `KazooClient <http://kazoo.readthedocs.io/en/latest/api/client.html>`_. Plugin alias: ``zookeeper`` :param str path: path to store jobs in :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(ZooKeeperJobStore, self).__init__() self.pickle_protocol = pickle_protocol self.close_connection_on_exit = close_connection_on_exit if not path: raise ValueError('The "path" parameter must not be empty') self.path = path if client: self.client = maybe_ref(client) else: self.client = KazooClient(**connect_args) self._ensured_path = False def _ensure_paths(self): if not self._ensured_path: self.client.ensure_path(self.path) self._ensured_path = True def start(self, scheduler, alias): super(ZooKeeperJobStore, self).start(scheduler, alias) if not self.client.connected: self.client.start() def lookup_job(self, job_id): self._ensure_paths() node_path = os.path.join(self.path, job_id) try: content, _ = self.client.get(node_path) doc = pickle.loads(content) job = self._reconstitute_job(doc['job_state']) return job except: return None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) jobs = [job_def['job'] for job_def in self._get_jobs() if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp] return jobs def get_next_run_time(self): next_runs = [job_def['next_run_time'] for job_def in self._get_jobs() if job_def['next_run_time'] is not None] return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None def get_all_jobs(self): jobs = [job_def['job'] for job_def in self._get_jobs()] self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): self._ensure_paths() node_path = os.path.join(self.path, str(job.id)) value = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() } data = pickle.dumps(value, self.pickle_protocol) try: self.client.create(node_path, value=data) except NodeExistsError: raise ConflictingIdError(job.id) def update_job(self, job): self._ensure_paths() node_path = os.path.join(self.path, str(job.id)) changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() } data = pickle.dumps(changes, self.pickle_protocol) try: self.client.set(node_path, value=data) except NoNodeError: raise JobLookupError(job.id) def remove_job(self, job_id): self._ensure_paths() node_path = os.path.join(self.path, str(job_id)) try: self.client.delete(node_path) except NoNodeError: raise JobLookupError(job_id) def remove_all_jobs(self): try: self.client.delete(self.path, recursive=True) except NoNodeError: pass self._ensured_path = False def shutdown(self): if self.close_connection_on_exit: self.client.stop() self.client.close() def _reconstitute_job(self, job_state): job_state = job_state job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self): self._ensure_paths() jobs = [] failed_job_ids = [] all_ids = self.client.get_children(self.path) for node_name in all_ids: try: node_path = os.path.join(self.path, node_name) content, _ = self.client.get(node_path) doc = pickle.loads(content) job_def = { 'job_id': node_name, 'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None, 'job_state': doc['job_state'], 'job': self._reconstitute_job(doc['job_state']), 'creation_time': _.ctime } jobs.append(job_def) except: self._logger.exception('Unable to restore job "%s" -- removing it' % node_name) failed_job_ids.append(node_name) # Remove all the jobs we failed to restore if failed_job_ids: for failed_id in failed_job_ids: self.remove_job(failed_id) paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key, job_def['creation_time'])) def __repr__(self): self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client)) return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
6,378
Python
.py
151
32.423841
98
0.603162
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,792
gevent.py
evilhero_mylar/lib/apscheduler/schedulers/gevent.py
from __future__ import absolute_import from apscheduler.schedulers.blocking import BlockingScheduler from apscheduler.schedulers.base import BaseScheduler try: from gevent.event import Event from gevent.lock import RLock import gevent except ImportError: # pragma: nocover raise ImportError('GeventScheduler requires gevent installed') class GeventScheduler(BlockingScheduler): """A scheduler that runs as a Gevent greenlet.""" _greenlet = None def start(self, *args, **kwargs): self._event = Event() BaseScheduler.start(self, *args, **kwargs) self._greenlet = gevent.spawn(self._main_loop) return self._greenlet def shutdown(self, *args, **kwargs): super(GeventScheduler, self).shutdown(*args, **kwargs) self._greenlet.join() del self._greenlet def _create_lock(self): return RLock() def _create_default_executor(self): from apscheduler.executors.gevent import GeventExecutor return GeventExecutor()
1,031
Python
.py
26
33.692308
66
0.715863
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,793
twisted.py
evilhero_mylar/lib/apscheduler/schedulers/twisted.py
from __future__ import absolute_import from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: from twisted.internet import reactor as default_reactor except ImportError: # pragma: nocover raise ImportError('TwistedScheduler requires Twisted installed') def run_in_reactor(func): @wraps(func) def wrapper(self, *args, **kwargs): self._reactor.callFromThread(func, self, *args, **kwargs) return wrapper class TwistedScheduler(BaseScheduler): """ A scheduler that runs on a Twisted reactor. Extra options: =========== ======================================================== ``reactor`` Reactor instance to use (defaults to the global reactor) =========== ======================================================== """ _reactor = None _delayedcall = None def _configure(self, config): self._reactor = maybe_ref(config.pop('reactor', default_reactor)) super(TwistedScheduler, self)._configure(config) @run_in_reactor def shutdown(self, wait=True): super(TwistedScheduler, self).shutdown(wait) self._stop_timer() def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup) def _stop_timer(self): if self._delayedcall and self._delayedcall.active(): self._delayedcall.cancel() del self._delayedcall @run_in_reactor def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds) def _create_default_executor(self): from apscheduler.executors.twisted import TwistedExecutor return TwistedExecutor()
1,844
Python
.py
46
33.782609
82
0.640292
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,794
asyncio.py
evilhero_mylar/lib/apscheduler/schedulers/asyncio.py
from __future__ import absolute_import from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: import asyncio except ImportError: # pragma: nocover try: import trollius as asyncio except ImportError: raise ImportError( 'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed') def run_in_event_loop(func): @wraps(func) def wrapper(self, *args): self._eventloop.call_soon_threadsafe(func, self, *args) return wrapper class AsyncIOScheduler(BaseScheduler): """ A scheduler that runs on an asyncio (:pep:`3156`) event loop. The default executor can run jobs based on native coroutines (``async def``). Extra options: ============== ============================================================= ``event_loop`` AsyncIO event loop to use (defaults to the global event loop) ============== ============================================================= """ _eventloop = None _timeout = None @run_in_event_loop def shutdown(self, wait=True): super(AsyncIOScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop() super(AsyncIOScheduler, self)._configure(config) def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup) def _stop_timer(self): if self._timeout: self._timeout.cancel() del self._timeout @run_in_event_loop def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds) def _create_default_executor(self): from apscheduler.executors.asyncio import AsyncIOExecutor return AsyncIOExecutor()
2,018
Python
.py
51
33.078431
95
0.627371
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,795
tornado.py
evilhero_mylar/lib/apscheduler/schedulers/tornado.py
from __future__ import absolute_import from datetime import timedelta from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: from tornado.ioloop import IOLoop except ImportError: # pragma: nocover raise ImportError('TornadoScheduler requires tornado installed') def run_in_ioloop(func): @wraps(func) def wrapper(self, *args, **kwargs): self._ioloop.add_callback(func, self, *args, **kwargs) return wrapper class TornadoScheduler(BaseScheduler): """ A scheduler that runs on a Tornado IOLoop. The default executor can run jobs based on native coroutines (``async def``). =========== =============================================================== ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop) =========== =============================================================== """ _ioloop = None _timeout = None @run_in_ioloop def shutdown(self, wait=True): super(TornadoScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current() super(TornadoScheduler, self)._configure(config) def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup) def _stop_timer(self): if self._timeout: self._ioloop.remove_timeout(self._timeout) del self._timeout def _create_default_executor(self): from apscheduler.executors.tornado import TornadoExecutor return TornadoExecutor() @run_in_ioloop def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds)
1,926
Python
.py
47
34.787234
98
0.634997
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,796
qt.py
evilhero_mylar/lib/apscheduler/schedulers/qt.py
from __future__ import absolute_import from apscheduler.schedulers.base import BaseScheduler try: from PyQt5.QtCore import QObject, QTimer except ImportError: # pragma: nocover try: from PyQt4.QtCore import QObject, QTimer except ImportError: try: from PySide.QtCore import QObject, QTimer # flake8: noqa except ImportError: raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed') class QtScheduler(BaseScheduler): """A scheduler that runs in a Qt event loop.""" _timer = None def shutdown(self, *args, **kwargs): super(QtScheduler, self).shutdown(*args, **kwargs) self._stop_timer() def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timer = QTimer.singleShot(wait_seconds * 1000, self._process_jobs) def _stop_timer(self): if self._timer: if self._timer.isActive(): self._timer.stop() del self._timer def wakeup(self): self._start_timer(0) def _process_jobs(self): wait_seconds = super(QtScheduler, self)._process_jobs() self._start_timer(wait_seconds)
1,238
Python
.py
32
31
93
0.652174
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,797
background.py
evilhero_mylar/lib/apscheduler/schedulers/background.py
from __future__ import absolute_import from threading import Thread, Event from apscheduler.schedulers.base import BaseScheduler from apscheduler.schedulers.blocking import BlockingScheduler from apscheduler.util import asbool class BackgroundScheduler(BlockingScheduler): """ A scheduler that runs in the background using a separate thread (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately). Extra options: ========== ============================================================================= ``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see `the documentation <https://docs.python.org/3.4/library/threading.html#thread-objects>`_ for further details) ========== ============================================================================= """ _thread = None def _configure(self, config): self._daemon = asbool(config.pop('daemon', True)) super(BackgroundScheduler, self)._configure(config) def start(self, *args, **kwargs): self._event = Event() BaseScheduler.start(self, *args, **kwargs) self._thread = Thread(target=self._main_loop, name='APScheduler') self._thread.daemon = self._daemon self._thread.start() def shutdown(self, *args, **kwargs): super(BackgroundScheduler, self).shutdown(*args, **kwargs) self._thread.join() del self._thread
1,505
Python
.py
31
41.645161
92
0.603142
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,798
__init__.py
evilhero_mylar/lib/apscheduler/schedulers/__init__.py
class SchedulerAlreadyRunningError(Exception): """Raised when attempting to start or configure the scheduler when it's already running.""" def __str__(self): return 'Scheduler is already running' class SchedulerNotRunningError(Exception): """Raised when attempting to shutdown the scheduler when it's not running.""" def __str__(self): return 'Scheduler is not running'
406
Python
.py
8
45.25
95
0.730964
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,799
blocking.py
evilhero_mylar/lib/apscheduler/schedulers/blocking.py
from __future__ import absolute_import from threading import Event from apscheduler.schedulers.base import BaseScheduler, STATE_STOPPED from apscheduler.util import TIMEOUT_MAX class BlockingScheduler(BaseScheduler): """ A scheduler that runs in the foreground (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block). """ _event = None def start(self, *args, **kwargs): self._event = Event() super(BlockingScheduler, self).start(*args, **kwargs) self._main_loop() def shutdown(self, wait=True): super(BlockingScheduler, self).shutdown(wait) self._event.set() def _main_loop(self): wait_seconds = TIMEOUT_MAX while self.state != STATE_STOPPED: self._event.wait(wait_seconds) self._event.clear() wait_seconds = self._process_jobs() def wakeup(self): self._event.set()
924
Python
.py
25
30.2
74
0.667789
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)